From 6f5771a1253c5a47d6d09a164da1376a452746cb Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 17 Sep 2025 09:32:04 +0200 Subject: [PATCH 01/86] chore: remove unused TypeSignature::empty_buffer(), #6467 --- clarity-types/src/types/signatures.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index e2778447fe..3475993b19 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -878,14 +878,6 @@ impl TupleTypeSignature { } impl TypeSignature { - pub fn empty_buffer() -> Result { - Ok(SequenceType(SequenceSubtype::BufferType( - 0_u32.try_into().map_err(|_| { - CheckErrors::Expects("FAIL: Empty clarity value size is not realizable".into()) - })?, - ))) - } - pub fn min_buffer() -> Result { Ok(SequenceType(SequenceSubtype::BufferType( 1_u32.try_into().map_err(|_| { From 2e0cad6fa75fa8d8594d5c04f43d3c4d991a6789 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 17 Sep 2025 10:14:05 +0200 Subject: [PATCH 02/86] test: add TypeSignature::min_buffer() unit test, #6467 --- clarity-types/src/tests/types/signatures.rs | 12 +++++++++++- clarity-types/src/types/signatures.rs | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index bb8fa5231a..ddc9937b91 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -18,10 +18,20 @@ use crate::errors::CheckErrors; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, TraitIdentifier, + BufferLength, MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, TraitIdentifier, TupleTypeSignature, }; +#[test] +fn test_min_buffer() { + let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength(1))); + let actual = TypeSignature::min_buffer().unwrap(); + assert_eq!(expected, actual); + assert_eq!(5, actual.size().unwrap(), "size should be 5"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 3475993b19..f09011aecf 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -102,7 +102,7 @@ mod tuple_type_map_serde { } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -pub struct BufferLength(u32); +pub struct BufferLength(pub(crate) u32); #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StringUTF8Length(u32); From bd5ef8a665c39c9043624e93f2d212a8bf945ae7 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 17 Sep 2025 11:02:11 +0200 Subject: [PATCH 03/86] chore: convert TypeSignature::min_buffer() to const, #6467 --- clarity-types/src/tests/types/signatures.rs | 12 ++++++------ clarity-types/src/types/mod.rs | 4 ++-- clarity-types/src/types/signatures.rs | 10 +++------- .../analysis/type_checker/v2_05/natives/sequences.rs | 2 +- .../src/vm/analysis/type_checker/v2_05/tests/mod.rs | 2 +- .../analysis/type_checker/v2_1/natives/sequences.rs | 2 +- .../src/vm/analysis/type_checker/v2_1/tests/mod.rs | 4 ++-- clarity/src/vm/tests/sequences.rs | 2 +- 8 files changed, 17 insertions(+), 21 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index ddc9937b91..c9a9aa3c93 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -25,7 +25,7 @@ use crate::types::{ #[test] fn test_min_buffer() { let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength(1))); - let actual = TypeSignature::min_buffer().unwrap(); + let actual = TypeSignature::min_buffer(); assert_eq!(expected, actual); assert_eq!(5, actual.size().unwrap(), "size should be 5"); assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); @@ -78,8 +78,8 @@ fn test_least_supertype() { TypeSignature::BoolType, ), ( - (TypeSignature::NoType, TypeSignature::min_buffer().unwrap()), - TypeSignature::min_buffer().unwrap(), + (TypeSignature::NoType, TypeSignature::min_buffer()), + TypeSignature::min_buffer(), ), ( ( @@ -288,7 +288,7 @@ fn test_least_supertype() { ( ( TypeSignature::max_buffer().unwrap(), - TypeSignature::min_buffer().unwrap(), + TypeSignature::min_buffer(), ), TypeSignature::max_buffer().unwrap(), ), @@ -364,7 +364,7 @@ fn test_least_supertype() { 5, ) .unwrap(), - TypeSignature::list_of(TypeSignature::min_buffer().unwrap(), 3).unwrap(), + TypeSignature::list_of(TypeSignature::min_buffer(), 3).unwrap(), ), TypeSignature::list_of( TypeSignature::SequenceType(SequenceSubtype::BufferType( @@ -451,7 +451,7 @@ fn test_least_supertype() { ), ( TypeSignature::min_string_utf8().unwrap(), - TypeSignature::min_buffer().unwrap(), + TypeSignature::min_buffer(), ), ( TypeSignature::TupleType( diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index cc892a517a..eba85d281f 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -343,7 +343,7 @@ impl SequenceData { pub fn element_size(&self) -> Result { let out = match self { - SequenceData::Buffer(..) => TypeSignature::min_buffer()?.size(), + SequenceData::Buffer(..) => TypeSignature::min_buffer().size(), SequenceData::List(data) => data.type_signature.get_list_item_type().size(), SequenceData::String(CharType::ASCII(..)) => TypeSignature::min_string_ascii()?.size(), SequenceData::String(CharType::UTF8(..)) => TypeSignature::min_string_utf8()?.size(), @@ -455,7 +455,7 @@ impl SequenceData { Ok(None) } } else { - Err(CheckErrors::TypeValueError(TypeSignature::min_buffer()?, to_find).into()) + Err(CheckErrors::TypeValueError(TypeSignature::min_buffer(), to_find).into()) } } SequenceData::List(data) => { diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index f09011aecf..eb2e43f98a 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -153,7 +153,7 @@ impl SequenceSubtype { pub fn unit_type(&self) -> Result { match &self { SequenceSubtype::ListType(list_data) => Ok(list_data.clone().destruct().0), - SequenceSubtype::BufferType(_) => TypeSignature::min_buffer(), + SequenceSubtype::BufferType(_) => Ok(TypeSignature::min_buffer()), SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { TypeSignature::min_string_ascii() } @@ -878,12 +878,8 @@ impl TupleTypeSignature { } impl TypeSignature { - pub fn min_buffer() -> Result { - Ok(SequenceType(SequenceSubtype::BufferType( - 1_u32.try_into().map_err(|_| { - CheckErrors::Expects("FAIL: Min clarity value size is not realizable".into()) - })?, - ))) + pub const fn min_buffer() -> TypeSignature { + SequenceType(SequenceSubtype::BufferType(BufferLength(1))) } pub fn min_string_ascii() -> Result { diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index 136d5bd81e..93693c2012 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -81,7 +81,7 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer()?, buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::min_buffer(), buffer_data.into()), StringType(ASCII(ascii_data)) => { (TypeSignature::min_string_ascii()?, ascii_data.into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index 7c326bb5c4..4c66ddf6db 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -703,7 +703,7 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer().unwrap(), + TypeSignature::min_buffer(), TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index d53209fbc2..46aa3717e5 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -87,7 +87,7 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer()?, buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::min_buffer(), buffer_data.into()), StringType(ASCII(ascii_data)) => { (TypeSignature::min_string_ascii()?, ascii_data.into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 6d29d01420..84137642d1 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -1055,7 +1055,7 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer().unwrap(), + TypeSignature::min_buffer(), TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( @@ -1073,7 +1073,7 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer().unwrap(), + TypeSignature::min_buffer(), TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index d89a2460b2..b6c5506bf3 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -114,7 +114,7 @@ fn test_index_of() { let bad_expected = [ CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeValueError( - TypeSignature::min_buffer().unwrap(), + TypeSignature::min_buffer(), execute("\"a\"").unwrap().unwrap(), ), CheckErrors::TypeValueError( From 87918a8262160f41e7875ec2a1bd0c59db01b4b4 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 17 Sep 2025 11:13:33 +0200 Subject: [PATCH 04/86] chore: convert TypeSignature::min_buffer() to TypeSignature::BUFFER_MIN, #6467 --- clarity-types/src/tests/types/signatures.rs | 14 +++++++------- clarity-types/src/types/mod.rs | 4 ++-- clarity-types/src/types/signatures.rs | 7 +++---- .../type_checker/v2_05/natives/sequences.rs | 2 +- .../vm/analysis/type_checker/v2_05/tests/mod.rs | 2 +- .../type_checker/v2_1/natives/sequences.rs | 2 +- .../src/vm/analysis/type_checker/v2_1/tests/mod.rs | 4 ++-- clarity/src/vm/tests/sequences.rs | 2 +- 8 files changed, 18 insertions(+), 19 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index c9a9aa3c93..db1cb266d6 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -23,9 +23,9 @@ use crate::types::{ }; #[test] -fn test_min_buffer() { +fn test_type_buffer_min() { let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength(1))); - let actual = TypeSignature::min_buffer(); + let actual = TypeSignature::BUFFER_MIN; assert_eq!(expected, actual); assert_eq!(5, actual.size().unwrap(), "size should be 5"); assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); @@ -78,8 +78,8 @@ fn test_least_supertype() { TypeSignature::BoolType, ), ( - (TypeSignature::NoType, TypeSignature::min_buffer()), - TypeSignature::min_buffer(), + (TypeSignature::NoType, TypeSignature::BUFFER_MIN), + TypeSignature::BUFFER_MIN, ), ( ( @@ -288,7 +288,7 @@ fn test_least_supertype() { ( ( TypeSignature::max_buffer().unwrap(), - TypeSignature::min_buffer(), + TypeSignature::BUFFER_MIN, ), TypeSignature::max_buffer().unwrap(), ), @@ -364,7 +364,7 @@ fn test_least_supertype() { 5, ) .unwrap(), - TypeSignature::list_of(TypeSignature::min_buffer(), 3).unwrap(), + TypeSignature::list_of(TypeSignature::BUFFER_MIN, 3).unwrap(), ), TypeSignature::list_of( TypeSignature::SequenceType(SequenceSubtype::BufferType( @@ -451,7 +451,7 @@ fn test_least_supertype() { ), ( TypeSignature::min_string_utf8().unwrap(), - TypeSignature::min_buffer(), + TypeSignature::BUFFER_MIN, ), ( TypeSignature::TupleType( diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index eba85d281f..b3b84b103d 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -343,7 +343,7 @@ impl SequenceData { pub fn element_size(&self) -> Result { let out = match self { - SequenceData::Buffer(..) => TypeSignature::min_buffer().size(), + SequenceData::Buffer(..) => TypeSignature::BUFFER_MIN.size(), SequenceData::List(data) => data.type_signature.get_list_item_type().size(), SequenceData::String(CharType::ASCII(..)) => TypeSignature::min_string_ascii()?.size(), SequenceData::String(CharType::UTF8(..)) => TypeSignature::min_string_utf8()?.size(), @@ -455,7 +455,7 @@ impl SequenceData { Ok(None) } } else { - Err(CheckErrors::TypeValueError(TypeSignature::min_buffer(), to_find).into()) + Err(CheckErrors::TypeValueError(TypeSignature::BUFFER_MIN, to_find).into()) } } SequenceData::List(data) => { diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index eb2e43f98a..e8b8b08612 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -153,7 +153,7 @@ impl SequenceSubtype { pub fn unit_type(&self) -> Result { match &self { SequenceSubtype::ListType(list_data) => Ok(list_data.clone().destruct().0), - SequenceSubtype::BufferType(_) => Ok(TypeSignature::min_buffer()), + SequenceSubtype::BufferType(_) => Ok(TypeSignature::BUFFER_MIN), SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { TypeSignature::min_string_ascii() } @@ -878,9 +878,8 @@ impl TupleTypeSignature { } impl TypeSignature { - pub const fn min_buffer() -> TypeSignature { - SequenceType(SequenceSubtype::BufferType(BufferLength(1))) - } + pub const BUFFER_MIN: TypeSignature = + SequenceType(SequenceSubtype::BufferType(BufferLength(1))); pub fn min_string_ascii() -> Result { Ok(SequenceType(SequenceSubtype::StringType( diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index 93693c2012..d9e3cbaeba 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -81,7 +81,7 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer(), buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::BUFFER_MIN, buffer_data.into()), StringType(ASCII(ascii_data)) => { (TypeSignature::min_string_ascii()?, ascii_data.into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index 4c66ddf6db..5667ad1559 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -703,7 +703,7 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer(), + TypeSignature::BUFFER_MIN, TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 46aa3717e5..04df6fc2b1 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -87,7 +87,7 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer(), buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::BUFFER_MIN, buffer_data.into()), StringType(ASCII(ascii_data)) => { (TypeSignature::min_string_ascii()?, ascii_data.into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 84137642d1..b6de12d015 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -1055,7 +1055,7 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer(), + TypeSignature::BUFFER_MIN, TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( @@ -1073,7 +1073,7 @@ fn test_index_of() { CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeError(TypeSignature::IntType, TypeSignature::UIntType), CheckErrors::TypeError( - TypeSignature::min_buffer(), + TypeSignature::BUFFER_MIN, TypeSignature::min_string_ascii().unwrap(), ), CheckErrors::TypeError( diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index b6c5506bf3..6f8a38d4e4 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -114,7 +114,7 @@ fn test_index_of() { let bad_expected = [ CheckErrors::ExpectedSequence(TypeSignature::IntType), CheckErrors::TypeValueError( - TypeSignature::min_buffer(), + TypeSignature::BUFFER_MIN, execute("\"a\"").unwrap().unwrap(), ), CheckErrors::TypeValueError( From 936300ed58df2d5b1b97e1a751d2c499b94e5d9f Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 17 Sep 2025 11:36:26 +0200 Subject: [PATCH 05/86] refactor: replace lazy static BUFF_1 with TypeSignature::BUFFER_1 const, #6467 --- clarity-types/src/tests/types/signatures.rs | 9 +++++++-- clarity-types/src/types/mod.rs | 2 +- clarity-types/src/types/signatures.rs | 12 ++++-------- .../analysis/type_checker/v2_05/natives/sequences.rs | 8 ++++---- .../src/vm/analysis/type_checker/v2_1/natives/mod.rs | 6 +++--- .../analysis/type_checker/v2_1/natives/sequences.rs | 8 ++++---- .../src/vm/analysis/type_checker/v2_1/tests/mod.rs | 8 ++++---- clarity/src/vm/functions/principals.rs | 6 +++--- clarity/src/vm/tests/principals.rs | 6 +++--- clarity/src/vm/types/mod.rs | 6 +++--- clarity/src/vm/types/signatures.rs | 4 ++-- 11 files changed, 38 insertions(+), 37 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index db1cb266d6..68c4d7a309 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -23,9 +23,14 @@ use crate::types::{ }; #[test] -fn test_type_buffer_min() { +fn test_type_buffer_min_to_be_buffer_1() { + assert_eq!(TypeSignature::BUFFER_1, TypeSignature::BUFFER_MIN); +} + +#[test] +fn test_type_buffer_1() { let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength(1))); - let actual = TypeSignature::BUFFER_MIN; + let actual = TypeSignature::BUFFER_1; assert_eq!(expected, actual); assert_eq!(5, actual.size().unwrap(), "size should be 5"); assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index b3b84b103d..765e62abd3 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -34,7 +34,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_1, BUFF_20, BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, + AssetIdentifier, BUFF_20, BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, TypeSignature, }; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index e8b8b08612..c595128cf3 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -228,12 +228,7 @@ lazy_static! { BufferLength::try_from(21u32).expect("BUG: Legal Clarity buffer length marked invalid"), )) }; - pub static ref BUFF_1: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(1u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; + pub static ref BUFF_16: TypeSignature = { #[allow(clippy::expect_used)] SequenceType(SequenceSubtype::BufferType( @@ -878,8 +873,9 @@ impl TupleTypeSignature { } impl TypeSignature { - pub const BUFFER_MIN: TypeSignature = - SequenceType(SequenceSubtype::BufferType(BufferLength(1))); + pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; + + pub const BUFFER_1: TypeSignature = SequenceType(SequenceSubtype::BufferType(BufferLength(1))); pub fn min_string_ascii() -> Result { Ok(SequenceType(SequenceSubtype::StringType( diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index d9e3cbaeba..f2ca26101c 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -25,7 +25,7 @@ use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{analysis_typecheck_cost, runtime_cost}; use crate::vm::functions::NativeFunctions; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; -pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length, BUFF_1}; +pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length}; use crate::vm::types::SequenceSubtype::*; use crate::vm::types::StringSubtype::*; use crate::vm::types::{FunctionType, TypeSignature, Value}; @@ -376,9 +376,9 @@ pub fn check_special_element_at( let (entry_type, _) = list.destruct(); TypeSignature::new_option(entry_type).map_err(|e| e.into()) } - TypeSignature::SequenceType(BufferType(_)) => { - Ok(TypeSignature::OptionalType(Box::new(BUFF_1.clone()))) - } + TypeSignature::SequenceType(BufferType(_)) => Ok(TypeSignature::OptionalType(Box::new( + TypeSignature::BUFFER_1, + ))), TypeSignature::SequenceType(StringType(ASCII(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(ASCII( BufferLength::try_from(1u32) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index fb0aaeec9e..ee85f1dd46 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -32,7 +32,7 @@ use crate::vm::types::signatures::{ use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, - TupleTypeSignature, TypeSignature, Value, BUFF_1, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + TupleTypeSignature, TypeSignature, Value, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -672,7 +672,7 @@ fn check_principal_construct( ) -> Result { check_arguments_at_least(2, args)?; check_arguments_at_most(3, args)?; - checker.type_check_expects(&args[0], context, &BUFF_1)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_1)?; checker.type_check_expects(&args[1], context, &BUFF_20)?; if args.len() > 2 { checker.type_check_expects( @@ -1030,7 +1030,7 @@ impl TypedNativeFunction { /// and error types are the same. fn parse_principal_basic_type() -> Result { TupleTypeSignature::try_from(vec![ - ("version".into(), BUFF_1.clone()), + ("version".into(), TypeSignature::BUFFER_1), ("hash-bytes".into(), BUFF_20.clone()), ( "name".into(), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 04df6fc2b1..9b2d8e2916 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -26,7 +26,7 @@ use crate::vm::costs::{analysis_typecheck_cost, runtime_cost, CostTracker}; use crate::vm::diagnostic::Diagnostic; use crate::vm::functions::NativeFunctions; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; -pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length, BUFF_1}; +pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length}; use crate::vm::types::SequenceSubtype::*; use crate::vm::types::StringSubtype::*; use crate::vm::types::{FunctionType, TypeSignature, Value}; @@ -423,9 +423,9 @@ pub fn check_special_element_at( let (entry_type, _) = list.destruct(); TypeSignature::new_option(entry_type).map_err(|e| e.into()) } - TypeSignature::SequenceType(BufferType(_)) => { - Ok(TypeSignature::OptionalType(Box::new(BUFF_1.clone()))) - } + TypeSignature::SequenceType(BufferType(_)) => Ok(TypeSignature::OptionalType(Box::new( + TypeSignature::BUFFER_1, + ))), TypeSignature::SequenceType(StringType(ASCII(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(ASCII( BufferLength::try_from(1u32) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index b6de12d015..510c4d7b09 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -33,7 +33,7 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, SequenceType, UIntType}; use crate::vm::types::{ BufferLength, FixedFunction, FunctionType, QualifiedContractIdentifier, TraitIdentifier, - TypeSignature, TypeSignatureExt as _, BUFF_1, BUFF_20, BUFF_21, BUFF_32, BUFF_64, + TypeSignature, TypeSignatureExt as _, BUFF_20, BUFF_21, BUFF_32, BUFF_64, }; use crate::vm::{execute_v2, ClarityName, ClarityVersion}; @@ -3241,7 +3241,7 @@ fn test_principal_destruct() { let bad_expected = [ CheckErrors::IncorrectArgumentCount(1, 2), CheckErrors::IncorrectArgumentCount(1, 0), - CheckErrors::TypeError(TypeSignature::PrincipalType, BUFF_1.clone()), + CheckErrors::TypeError(TypeSignature::PrincipalType, TypeSignature::BUFFER_1), ]; for (good_test, expected) in good.iter().zip(expected.iter()) { @@ -3302,7 +3302,7 @@ fn test_principal_construct() { // The first buffer is too long, should be `(buff 1)`. ( r#"(principal-construct? 0xfa6bf38ed557fe417333710d6033e9419391a320 0xfa6bf38ed557fe417333710d6033e9419391a320)"#, - CheckErrors::TypeError(BUFF_1.clone(), BUFF_20.clone()), + CheckErrors::TypeError(TypeSignature::BUFFER_1, BUFF_20.clone()), ), // The second buffer is too long, should be `(buff 20)`. ( @@ -3312,7 +3312,7 @@ fn test_principal_construct() { // `int` argument instead of `(buff 1)` for version. ( r#"(principal-construct? 22 0xfa6bf38ed557fe417333710d6033e9419391a320)"#, - CheckErrors::TypeError(BUFF_1.clone(), IntType), + CheckErrors::TypeError(TypeSignature::BUFFER_1, IntType), ), // `name` argument is too long ( diff --git a/clarity/src/vm/functions/principals.rs b/clarity/src/vm/functions/principals.rs index c3600e6654..5bcc8aa13a 100644 --- a/clarity/src/vm/functions/principals.rs +++ b/clarity/src/vm/functions/principals.rs @@ -13,7 +13,7 @@ use crate::vm::errors::{ use crate::vm::representations::{ SymbolicExpression, CONTRACT_MAX_NAME_LENGTH, CONTRACT_MIN_NAME_LENGTH, }; -use crate::vm::types::signatures::{BUFF_1, BUFF_20}; +use crate::vm::types::signatures::BUFF_20; use crate::vm::types::{ ASCIIData, BuffData, CharType, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, StandardPrincipalData, TupleData, TypeSignature, Value, @@ -203,14 +203,14 @@ pub fn special_principal_construct( _ => { return { // This is an aborting error because this should have been caught in analysis pass. - Err(CheckErrors::TypeValueError(BUFF_1.clone(), version).into()) + Err(CheckErrors::TypeValueError(TypeSignature::BUFFER_1, version).into()) }; } }; let version_byte = if verified_version.len() > 1 { // should have been caught by the type-checker - return Err(CheckErrors::TypeValueError(BUFF_1.clone(), version).into()); + return Err(CheckErrors::TypeValueError(TypeSignature::BUFFER_1, version).into()); } else if verified_version.is_empty() { // the type checker does not check the actual length of the buffer, but a 0-length buffer // will type-check to (buff 1) diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index aec00b3173..184b59cb2d 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -11,7 +11,7 @@ use crate::vm::{ errors::CheckErrors, functions::principals::PrincipalConstructErrorCode, types::TypeSignature::PrincipalType, - types::{ResponseData, TypeSignature, BUFF_1, BUFF_20}, + types::{ResponseData, TypeSignature, BUFF_20}, }; use crate::vm::{execute_with_parameters, ClarityVersion}; @@ -952,7 +952,7 @@ fn test_principal_construct_check_errors() { let input = r#"(principal-construct? 0x590493 0x0102030405060708091011121314151617181920)"#; assert_eq!( Err(CheckErrors::TypeValueError( - BUFF_1.clone(), + TypeSignature::BUFFER_1, Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("590493").unwrap() })) @@ -971,7 +971,7 @@ fn test_principal_construct_check_errors() { // `CheckErrors`. let input = r#"(principal-construct? u22 0x0102030405060708091011121314151617181920)"#; assert_eq!( - Err(CheckErrors::TypeValueError(BUFF_1.clone(), Value::UInt(22)).into()), + Err(CheckErrors::TypeValueError(TypeSignature::BUFFER_1, Value::UInt(22)).into()), execute_with_parameters( input, ClarityVersion::Clarity2, diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 5d18da01fa..d5db81dfdf 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,8 +32,8 @@ use crate::vm::errors::CheckErrors; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_1, BUFF_20, - BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_20, BUFF_21, + BUFF_32, BUFF_33, BUFF_64, BUFF_65, }; use crate::vm::ClarityVersion; @@ -98,7 +98,7 @@ impl BurnBlockInfoProperty { TypeSignature::list_of( TypeSignature::TupleType( TupleTypeSignature::try_from(vec![ - ("version".into(), BUFF_1.clone()), + ("version".into(), TypeSignature::BUFFER_1), ("hashbytes".into(), BUFF_32.clone()), ]) .map_err(|_| { diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 09e0c74be4..e96ccb01ef 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,8 +19,8 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_1, BUFF_16, BUFF_20, - BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_16, BUFF_20, BUFF_21, + BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; pub use clarity_types::types::Value; From 0610ce3487a42291978ef6ec53b28bf70858e17a Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 18 Sep 2025 11:11:03 +0200 Subject: [PATCH 06/86] refactor: improve proposal, #6467 --- clarity-types/src/tests/types/signatures.rs | 50 ++++++++++++++++++- clarity-types/src/types/signatures.rs | 55 +++++++++++++++++---- clarity/src/vm/functions/principals.rs | 14 ++++-- clarity/src/vm/tests/principals.rs | 9 ++-- 4 files changed, 110 insertions(+), 18 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 68c4d7a309..321c7264d1 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -22,6 +22,53 @@ use crate::types::{ TupleTypeSignature, }; +#[test] +fn test_buffer_length_from_u32_runtime() { + let buffer = BufferLength::try_from_u32(0).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from_u32(MAX_VALUE_SIZE).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from_u32(MAX_VALUE_SIZE + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_buffer_length_from_u32_compile_time() { + const B_0: BufferLength = BufferLength::from_const_u32::<0>(); + assert_eq!(0, B_0.get_value()); + + const B_MAX: BufferLength = BufferLength::from_const_u32::(); + assert_eq!(MAX_VALUE_SIZE, B_MAX.get_value()); + + //moved to runtime check, otherwise it emits a compilation error + std::panic::catch_unwind(|| { + const OUT_RANGE: u32 = MAX_VALUE_SIZE + 1; + let _ = BufferLength::from_const_u32::(); + }) + .expect_err("panic"); +} + +#[test] +fn test_buffer_length_try_from_u32_trait() { + let buffer = BufferLength::try_from(0u32).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_buffer_length_to_u32_with_from_trait() { + let buffer = BufferLength::new_unsafe(0); + assert_eq!(0, u32::from(&buffer)); + assert_eq!(0, u32::from(buffer)); +} + #[test] fn test_type_buffer_min_to_be_buffer_1() { assert_eq!(TypeSignature::BUFFER_1, TypeSignature::BUFFER_MIN); @@ -29,7 +76,8 @@ fn test_type_buffer_min_to_be_buffer_1() { #[test] fn test_type_buffer_1() { - let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength(1))); + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(1))); let actual = TypeSignature::BUFFER_1; assert_eq!(expected, actual); assert_eq!(5, actual.size().unwrap(), "size should be 5"); diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 9bfb275b6b..ca4fe3d2ac 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -102,7 +102,39 @@ mod tuple_type_map_serde { } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -pub struct BufferLength(pub(crate) u32); +pub struct BufferLength(u32); + +impl BufferLength { + /// Returns the internal `u32` value of this [`BufferLength`]. + pub fn get_value(&self) -> u32 { + self.0 + } + + /// Attempts to create a [`BufferLength`] from a `u32` at runtime. + pub fn try_from_u32(value: u32) -> Result { + if value > MAX_VALUE_SIZE { + Err(CheckErrors::ValueTooLarge) + } else { + Ok(BufferLength(value)) + } + } + + /// Creates a [`BufferLength`] from a `u32` constant at compile time. + pub const fn from_const_u32() -> Self { + assert!(VALUE <= MAX_VALUE_SIZE, "Value Too Large"); + BufferLength(VALUE) + } +} + +/// Test-only utilities for [`BufferLength`]. +#[cfg(test)] +impl BufferLength { + /// Allow to create a [`BufferLength`] in unsafe way, + /// allowing direct write-access to its internal state. + pub fn new_unsafe(value: u32) -> Self { + Self(value) + } +} #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StringUTF8Length(u32); @@ -280,24 +312,20 @@ impl From for TypeSignature { impl From<&BufferLength> for u32 { fn from(v: &BufferLength) -> u32 { - v.0 + v.get_value() } } impl From for u32 { fn from(v: BufferLength) -> u32 { - v.0 + v.get_value() } } impl TryFrom for BufferLength { type Error = CheckErrors; fn try_from(data: u32) -> Result { - if data > MAX_VALUE_SIZE { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(BufferLength(data)) - } + BufferLength::try_from_u32(data) } } @@ -873,9 +901,18 @@ impl TupleTypeSignature { } impl TypeSignature { + /// Buffer type with minimum size. Alias for [`TypeSignature::BUFFER_1`] pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; - pub const BUFFER_1: TypeSignature = SequenceType(SequenceSubtype::BufferType(BufferLength(1))); + /// Buffer type with size 1. + pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); + + /// Creates a buffer type with a given size at compile time. + const fn type_buffer_of_size() -> Self { + SequenceType(SequenceSubtype::BufferType(BufferLength::from_const_u32::< + VALUE, + >())) + } pub fn min_string_ascii() -> Result { Ok(SequenceType(SequenceSubtype::StringType( diff --git a/clarity/src/vm/functions/principals.rs b/clarity/src/vm/functions/principals.rs index 20c565e607..c8d3776ca3 100644 --- a/clarity/src/vm/functions/principals.rs +++ b/clarity/src/vm/functions/principals.rs @@ -211,16 +211,22 @@ pub fn special_principal_construct( _ => { return { // This is an aborting error because this should have been caught in analysis pass. - Err(CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_1), Box::new(version)).into()) + Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_1), + Box::new(version), + ) + .into()) }; } }; let version_byte = if verified_version.len() > 1 { // should have been caught by the type-checker - return Err( - CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_1), Box::new(version)).into(), - ); + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_1), + Box::new(version), + ) + .into()); } else if verified_version.is_empty() { // the type checker does not check the actual length of the buffer, but a 0-length buffer // will type-check to (buff 1) diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 661d6094d7..cb6a5b2aa2 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -971,10 +971,11 @@ fn test_principal_construct_check_errors() { // `CheckErrors`. let input = r#"(principal-construct? u22 0x0102030405060708091011121314151617181920)"#; assert_eq!( - Err( - CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_1), Box::new(Value::UInt(22)),) - .into() - ), + Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_1), + Box::new(Value::UInt(22)), + ) + .into()), execute_with_parameters( input, ClarityVersion::Clarity2, From 95178d6ce2f6f264f7d4274d6bedd50fbd9f36bf Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 18 Sep 2025 07:02:50 -0700 Subject: [PATCH 07/86] fix: prevent miner from reorging itself --- CHANGELOG.md | 1 + stacks-node/src/nakamoto_node/relayer.rs | 102 +++++++++++++++-- stacks-node/src/tests/signer/v0.rs | 139 +++++++++++++++++++++++ stacks-signer/src/chainstate/v1.rs | 1 + 4 files changed, 231 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index da077c5051..24bd95e72c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `block-time` - `to-ascii?` - Added `contract_cost_limit_percentage` to the miner config file — sets the percentage of a block’s execution cost at which, if a large non-boot contract call would cause a BlockTooBigError, the miner will stop adding further non-boot contract calls and only include STX transfers and boot contract calls for the remainder of the block. +- Fixed a bug caused by a miner winning a sortition with a block commit that pointed to a previous tip, which would cause the miner to try and reorg itself. [#6481](https://github.com/stacks-network/stacks-core/issues/6481) ### Changed diff --git a/stacks-node/src/nakamoto_node/relayer.rs b/stacks-node/src/nakamoto_node/relayer.rs index 117163df10..2f7d10733d 100644 --- a/stacks-node/src/nakamoto_node/relayer.rs +++ b/stacks-node/src/nakamoto_node/relayer.rs @@ -50,6 +50,7 @@ use stacks::net::db::LocalPeer; use stacks::net::p2p::NetworkHandle; use stacks::net::relay::Relayer; use stacks::net::NetworkResult; +use stacks::util_lib::db::Error as DbError; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, }; @@ -82,6 +83,11 @@ pub static TEST_MINER_THREAD_STALL: LazyLock> = LazyLock::new(Tes pub static TEST_MINER_THREAD_START_STALL: LazyLock> = LazyLock::new(TestFlag::default); +#[cfg(test)] +/// Test flag to set the tip for the miner to commit to +pub static TEST_MINER_COMMIT_TIP: LazyLock>> = + LazyLock::new(TestFlag::default); + /// Command types for the Nakamoto relayer thread, issued to it by other threads #[allow(clippy::large_enum_variant)] pub enum RelayerDirective { @@ -616,7 +622,10 @@ impl RelayerThread { /// Specifically: /// /// If we won the given sortition `sn`, then we can start mining immediately with a `BlockFound` - /// tenure-change. Otherwise, if we won the tenure which started the ongoing Stacks tenure + /// tenure-change. The exception is if we won the sortition, but the sortition's winning commit + /// does not commit to the ongoing tenure. In this case, we instead extend the current tenure. + /// + /// Otherwise, if we did not win `sn`, if we won the tenure which started the ongoing Stacks tenure /// (i.e. we're the active miner), then we _may_ start mining after a timeout _if_ the winning /// miner (not us) fails to submit a `BlockFound` tenure-change block for `sn`. fn choose_directive_sortition_with_winner( @@ -626,6 +635,60 @@ impl RelayerThread { committed_index_hash: StacksBlockId, ) -> Option { let won_sortition = sn.miner_pk_hash.as_ref() == Some(mining_pkh); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + let canonical_stacks_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no sortition for canonical stacks tip"); + + // If we won the sortition, ensure that the sortition's winning commit actually commits to + // the ongoing tenure. If it does not (i.e. commit is "stale" and points to N-1 when we are + // currently in N), and if we are also the ongoing tenure's miner, then we must not attempt + // a tenure change (which would reorg our own signed blocks). Instead, we should immediately + // extend the tenure. + if won_sortition && !self.config.get_node_config(false).mock_mining { + let canonical_stacks_tip = + StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); + + let commits_to_tip_tenure = match Self::sortition_commits_to_stacks_tip_tenure( + &mut self.chainstate, + &canonical_stacks_tip, + &canonical_stacks_snapshot, + &sn, + ) { + Ok(b) => b, + Err(e) => { + warn!( + "Relayer: Failed to determine if winning sortition commits to current tenure: {e:?}"; + "sortition_ch" => %sn.consensus_hash, + "stacks_tip_ch" => %canonical_stacks_tip_ch + ); + false + } + }; + + if !commits_to_tip_tenure { + let won_ongoing_tenure_sortition = + canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh); + + if won_ongoing_tenure_sortition { + info!( + "Relayer: Won sortition, but commit does not target ongoing tenure. Will extend instead of starting a new tenure."; + "winning_sortition" => %sn.consensus_hash, + "ongoing_tenure" => %canonical_stacks_snapshot.consensus_hash, + "commits_to_tip_tenure?" => commits_to_tip_tenure + ); + // Extend tenure to the new burn view instead of attempting BlockFound + return Some(MinerDirective::ContinueTenure { + new_burn_view: sn.consensus_hash, + }); + } + } + } + if won_sortition || self.config.get_node_config(false).mock_mining { // a sortition happenend, and we won info!("Won sortition; begin tenure."; @@ -643,13 +706,6 @@ impl RelayerThread { "Relayer: did not win sortition {}, so stopping tenure", &sn.sortition ); - let (canonical_stacks_tip_ch, _) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) - .expect("FATAL: failed to query sortition DB for stacks tip"); - let canonical_stacks_snapshot = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) - .expect("FATAL: failed to query sortiiton DB for epoch") - .expect("FATAL: no sortition for canonical stacks tip"); let won_ongoing_tenure_sortition = canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh); @@ -1637,6 +1693,31 @@ impl RelayerThread { false } + /// Get the canonical tip for the miner to commit to. + /// This is provided as a separate function so that it can be overridden for testing. + #[cfg(not(test))] + fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> { + None + } + + #[cfg(test)] + fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> { + TEST_MINER_COMMIT_TIP.get() + } + + fn get_commit_for_tip(&mut self) -> Result<(ConsensusHash, BlockHeaderHash), DbError> { + if let Some((consensus_hash, block_header_hash)) = self.fault_injection_get_tip_for_commit() + { + info!("Relayer: using test tip for commit"; + "consensus_hash" => %consensus_hash, + "block_header_hash" => %block_header_hash, + ); + Ok((consensus_hash, block_header_hash)) + } else { + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + } + } + /// Generate and submit the next block-commit, and record it locally fn issue_block_commit(&mut self) -> Result<(), NakamotoNodeError> { if self.fault_injection_skip_block_commit() { @@ -1645,10 +1726,7 @@ impl RelayerThread { ); return Ok(()); } - let (tip_block_ch, tip_block_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash( - self.sortdb.conn(), - ) - .unwrap_or_else(|e| { + let (tip_block_ch, tip_block_bh) = self.get_commit_for_tip().unwrap_or_else(|e| { panic!("Failed to load canonical stacks tip: {e:?}"); }); let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; diff --git a/stacks-node/src/tests/signer/v0.rs b/stacks-node/src/tests/signer/v0.rs index c2e6e7a4cb..a7d8dd5a9b 100644 --- a/stacks-node/src/tests/signer/v0.rs +++ b/stacks-node/src/tests/signer/v0.rs @@ -103,6 +103,7 @@ use stacks_signer::v0::tests::{ TEST_SKIP_SIGNER_CLEANUP, TEST_STALL_BLOCK_VALIDATION_SUBMISSION, }; use stacks_signer::v0::SpawnedSigner; +use stdext::prelude::DurationExt; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -114,6 +115,7 @@ use crate::nakamoto_node::miner::{ fault_injection_stall_miner, fault_injection_unstall_miner, TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_PROPOSAL_STALL, TEST_MINE_SKIP, TEST_P2P_BROADCAST_STALL, }; +use crate::nakamoto_node::relayer::TEST_MINER_COMMIT_TIP; use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; @@ -192,6 +194,11 @@ impl SignerTest { }) .expect("Timed out waiting for network to restart after 3.0 boundary reached"); + if self.snapshot_path.is_some() { + info!("Booted to epoch 3.0, ready for snapshot."); + return; + } + // Wait until we see the first block of epoch 3.0. // Note, we don't use `nakamoto_blocks_mined` counter, because there // could be other miners mining blocks. @@ -18874,3 +18881,135 @@ fn signers_treat_signatures_as_precommits() { info!("------------------------- Shutdown -------------------------"); signer_test.shutdown(); } + +#[test] +#[ignore] +/// Scenario: same miner extends tenure when the block-commit for the next tenure still confirms N-1 +/// +/// Flow: +/// - Miner A wins tenure N +/// - Miner A submits a block-commit confirming N-1 (commit submitted before N's block gets approved) +/// - Miner A mines at least 2 blocks in tenure N +/// - Miner A wins tenure N+1 with the stale commit (confirming N-1) +/// - Miner A cannot mine a normal tenure-change + coinbase in N+1 (would reorg its own N blocks) +/// - Miner A should issue a TenureExtend on top of tenure N +fn tenure_extend_after_stale_commit_same_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::from_seed("sender".as_bytes()); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + + let signer_test: SignerTest = + SignerTest::new_with_config_modifications_and_snapshot( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * 10)], + |signer_cfg| { + signer_cfg.block_proposal_timeout = Duration::from_minutes(60); + }, + |node_cfg| { + node_cfg.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, + Some(function_name!()), + ); + + if signer_test.bootstrap_snapshot() { + signer_test.shutdown_and_snapshot(); + return; + } + + let conf = &signer_test.running_nodes.conf; + let miner_pk = + StacksPublicKey::from_private(&conf.miner.mining_key.clone().expect("Missing mining key")); + let miner_pkh = Hash160::from_node_public_key(&miner_pk); + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + + let pre_test_tenures = 4; + for i in 1..=pre_test_tenures { + info!("Mining pre-test tenure {i} of {pre_test_tenures}"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + } + + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + // We are now in "N-1" + let prev_tip = get_chain_info(&signer_test.running_nodes.conf); + + info!("---- Waiting for block-commit to N-1 ----"; + "Current height" => prev_tip.burn_block_height, + ); + + let Counters { + naka_skip_commit_op: skip_commit_op, + naka_submitted_commit_last_burn_height: last_commit_burn_height, + .. + } = signer_test.running_nodes.counters.clone(); + + wait_for(30, || { + let last_height = last_commit_burn_height.get(); + Ok(last_height == prev_tip.burn_block_height) + }) + .expect("Timed out waiting for block-commit to N-1"); + + skip_commit_op.set(true); + + let prev_tip = get_chain_info(&signer_test.running_nodes.conf); + + signer_test.mine_nakamoto_block_without_commit(Duration::from_secs(30), true); + + TEST_MINER_COMMIT_TIP.set(Some((prev_tip.pox_consensus, prev_tip.stacks_tip))); + + // Now in tenure N + + // Mine a second block in tenure N to ensure that + // signers will reject a reorg attempt + let (_, transfer_nonce) = signer_test + .submit_transfer_tx(&sender_sk, send_fee, send_amt) + .unwrap(); + + signer_test + .wait_for_nonce_increase(&sender_addr, transfer_nonce) + .unwrap(); + + skip_commit_op.set(false); + + info!("---- Waiting for block commit to N-1 ----"); + + wait_for(30, || { + let last_height = last_commit_burn_height.get(); + Ok(last_height == prev_tip.burn_block_height) + }) + .expect("Timed out waiting for block commit to N-1"); + + // Start a new tenure (N+1) + + let info_before = get_chain_info(conf); + let stacks_height_before = info_before.stacks_tip_height; + + signer_test.mine_bitcoin_block(); + + verify_sortition_winner(&sortdb, &miner_pkh); + + info!("---- Waiting for a tenure extend block in tenure N+1 ----"; + "stacks_height_before" => stacks_height_before, + ); + + wait_for_block_proposal(30, stacks_height_before + 1, &miner_pk) + .expect("Timed out waiting for block proposal in tenure N+1"); + + // Verify that the next block is a TenureExtend at the expected height + wait_for_tenure_change_tx(30, TenureChangeCause::Extended, stacks_height_before + 1) + .expect("Timed out waiting for a TenureExtend block atop tenure N in tenure N+1"); + + signer_test.shutdown(); +} diff --git a/stacks-signer/src/chainstate/v1.rs b/stacks-signer/src/chainstate/v1.rs index a550efa32d..3cf6773353 100644 --- a/stacks-signer/src/chainstate/v1.rs +++ b/stacks-signer/src/chainstate/v1.rs @@ -286,6 +286,7 @@ impl SortitionsView { "Current miner behaved improperly, this signer views the miner as invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "signer_signature_hash" => %block.header.signer_signature_hash(), + "current_sortition_miner_status" => ?sortition.miner_status, ); return Err(RejectReason::InvalidMiner); } From 8c828f658d90946360c7932d117222263c6ed12b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 18 Sep 2025 17:49:28 -0700 Subject: [PATCH 08/86] fix: compiler error from return type change --- stacks-node/src/nakamoto_node/relayer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-node/src/nakamoto_node/relayer.rs b/stacks-node/src/nakamoto_node/relayer.rs index b4cc78b77e..b1ca555651 100644 --- a/stacks-node/src/nakamoto_node/relayer.rs +++ b/stacks-node/src/nakamoto_node/relayer.rs @@ -682,9 +682,9 @@ impl RelayerThread { "commits_to_tip_tenure?" => commits_to_tip_tenure ); // Extend tenure to the new burn view instead of attempting BlockFound - return Some(MinerDirective::ContinueTenure { + return MinerDirective::ContinueTenure { new_burn_view: sn.consensus_hash, - }); + }; } } } From a5c784be1401ab750c1b1a7a5087db6cba8b3763 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 10:47:16 +0200 Subject: [PATCH 09/86] chore: proposal revision, #6467 --- clarity-types/src/tests/types/signatures.rs | 32 +++----------- clarity-types/src/types/signatures.rs | 48 +++++++++++---------- 2 files changed, 31 insertions(+), 49 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 321c7264d1..9710ebcc7c 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -23,36 +23,13 @@ use crate::types::{ }; #[test] -fn test_buffer_length_from_u32_runtime() { - let buffer = BufferLength::try_from_u32(0).unwrap(); - assert_eq!(0, buffer.get_value()); - - let buffer = BufferLength::try_from_u32(MAX_VALUE_SIZE).unwrap(); - assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); - - let err = BufferLength::try_from_u32(MAX_VALUE_SIZE + 1).unwrap_err(); - assert_eq!(CheckErrors::ValueTooLarge, err); -} - -#[test] -fn test_buffer_length_from_u32_compile_time() { - const B_0: BufferLength = BufferLength::from_const_u32::<0>(); - assert_eq!(0, B_0.get_value()); - - const B_MAX: BufferLength = BufferLength::from_const_u32::(); - assert_eq!(MAX_VALUE_SIZE, B_MAX.get_value()); - - //moved to runtime check, otherwise it emits a compilation error - std::panic::catch_unwind(|| { - const OUT_RANGE: u32 = MAX_VALUE_SIZE + 1; - let _ = BufferLength::from_const_u32::(); - }) - .expect_err("panic"); +fn test_max_value_size() { + assert_eq!(1024 * 1024, MAX_VALUE_SIZE); } #[test] fn test_buffer_length_try_from_u32_trait() { - let buffer = BufferLength::try_from(0u32).unwrap(); + let buffer = BufferLength::try_from(0_u32).unwrap(); assert_eq!(0, buffer.get_value()); let buffer = BufferLength::try_from(MAX_VALUE_SIZE).unwrap(); @@ -63,7 +40,7 @@ fn test_buffer_length_try_from_u32_trait() { } #[test] -fn test_buffer_length_to_u32_with_from_trait() { +fn test_buffer_length_to_u32_using_from_trait() { let buffer = BufferLength::new_unsafe(0); assert_eq!(0, u32::from(&buffer)); assert_eq!(0, u32::from(buffer)); @@ -79,6 +56,7 @@ fn test_type_buffer_1() { let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(1))); let actual = TypeSignature::BUFFER_1; + assert_eq!(expected, actual); assert_eq!(5, actual.size().unwrap(), "size should be 5"); assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index ca4fe3d2ac..e2afcf1e97 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -105,25 +105,20 @@ mod tuple_type_map_serde { pub struct BufferLength(u32); impl BufferLength { - /// Returns the internal `u32` value of this [`BufferLength`]. - pub fn get_value(&self) -> u32 { - self.0 - } - - /// Attempts to create a [`BufferLength`] from a `u32` at runtime. - pub fn try_from_u32(value: u32) -> Result { + /// Attempts to create a [`BufferLength`] from a [`u32`] as an [`Option`]. + /// + /// This function is primarily intended for internal use when defining + /// `const` values, since it returns an [`Option`] that can be unwrapped + /// with [`Option::expect`] in a `const fn`. + /// + /// It can also be reused in a runtime context + const fn try_from_u32_as_opt(value: u32) -> Option { if value > MAX_VALUE_SIZE { - Err(CheckErrors::ValueTooLarge) + None } else { - Ok(BufferLength(value)) + Some(BufferLength(value)) } } - - /// Creates a [`BufferLength`] from a `u32` constant at compile time. - pub const fn from_const_u32() -> Self { - assert!(VALUE <= MAX_VALUE_SIZE, "Value Too Large"); - BufferLength(VALUE) - } } /// Test-only utilities for [`BufferLength`]. @@ -134,6 +129,12 @@ impl BufferLength { pub fn new_unsafe(value: u32) -> Self { Self(value) } + + /// Returns the underlying [`u32`] value of this [`BufferLength`]. + /// This to have an easy read-access to its internal state. + pub fn get_value(&self) -> u32 { + self.0 + } } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -312,20 +313,20 @@ impl From for TypeSignature { impl From<&BufferLength> for u32 { fn from(v: &BufferLength) -> u32 { - v.get_value() + v.0 } } impl From for u32 { fn from(v: BufferLength) -> u32 { - v.get_value() + v.0 } } impl TryFrom for BufferLength { type Error = CheckErrors; fn try_from(data: u32) -> Result { - BufferLength::try_from_u32(data) + Self::try_from_u32_as_opt(data).ok_or(CheckErrors::ValueTooLarge) } } @@ -907,11 +908,14 @@ impl TypeSignature { /// Buffer type with size 1. pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); - /// Creates a buffer type with a given size at compile time. + /// Creates a buffer type with a given size known at compile time. + /// + /// This function is intended for defining constant buffer types + /// type aliases (e.g., [`TypeSignature::BUFFER_1`]) without repeating logic. const fn type_buffer_of_size() -> Self { - SequenceType(SequenceSubtype::BufferType(BufferLength::from_const_u32::< - VALUE, - >())) + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), + )) } pub fn min_string_ascii() -> Result { From 181adcc70b56e89f23840b7a4592fab7fb0f6d6b Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 11:57:09 +0200 Subject: [PATCH 10/86] chore: remove unused BUFF_16, #6467 --- clarity-types/src/types/signatures.rs | 6 ------ clarity/src/vm/types/signatures.rs | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index e2afcf1e97..10034201eb 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -262,12 +262,6 @@ lazy_static! { )) }; - pub static ref BUFF_16: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(16u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; /// Maximum-sized buffer allowed for `to-ascii?` call. pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { #[allow(clippy::expect_used)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index e96ccb01ef..5b1307b930 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,7 +19,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_16, BUFF_20, BUFF_21, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_20, BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; From 0a9ad44cf7cfb00aaf77f3272ab13890732c8f65 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 12:22:30 +0200 Subject: [PATCH 11/86] chore: porting BUFFER_21 for tests, #6467 --- clarity-types/src/types/mod.rs | 5 ++--- clarity-types/src/types/signatures.rs | 9 +++------ clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs | 7 +++++-- clarity/src/vm/types/mod.rs | 4 ++-- clarity/src/vm/types/signatures.rs | 6 +++--- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index 9e8c95c78c..d126043797 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -33,9 +33,8 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_20, BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, - ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, - TypeSignature, + AssetIdentifier, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, ListTypeData, + SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, TypeSignature, }; use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; use crate::representations::{ClarityName, ContractName, SymbolicExpression}; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 10034201eb..2f33eb197b 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -255,12 +255,6 @@ lazy_static! { BufferLength::try_from(20u32).expect("BUG: Legal Clarity buffer length marked invalid"), )) }; - pub static ref BUFF_21: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(21u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; /// Maximum-sized buffer allowed for `to-ascii?` call. pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { @@ -912,6 +906,9 @@ impl TypeSignature { )) } + #[cfg(any(test, feature = "testing"))] + pub const BUFFER_21: TypeSignature = Self::type_buffer_of_size::<21>(); + pub fn min_string_ascii() -> Result { Ok(SequenceType(SequenceSubtype::StringType( StringSubtype::ASCII(1_u32.try_into().map_err(|_| { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index c1afc3e830..79c1df9592 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -33,7 +33,7 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, SequenceType, UIntType}; use crate::vm::types::{ BufferLength, FixedFunction, FunctionType, QualifiedContractIdentifier, TraitIdentifier, - TypeSignature, TypeSignatureExt as _, BUFF_20, BUFF_21, BUFF_32, BUFF_64, + TypeSignature, TypeSignatureExt as _, BUFF_20, BUFF_32, BUFF_64, }; use crate::vm::{execute_v2, ClarityName, ClarityVersion}; @@ -3413,7 +3413,10 @@ fn test_principal_construct() { // The second buffer is too long, should be `(buff 20)`. ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a32009)"#, - CheckErrors::TypeError(Box::new(BUFF_20.clone()), Box::new(BUFF_21.clone())), + CheckErrors::TypeError( + Box::new(BUFF_20.clone()), + Box::new(TypeSignature::BUFFER_21), + ), ), // `int` argument instead of `(buff 1)` for version. ( diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index d5db81dfdf..1868aca818 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,8 +32,8 @@ use crate::vm::errors::CheckErrors; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_20, BUFF_21, - BUFF_32, BUFF_33, BUFF_64, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_20, BUFF_32, + BUFF_33, BUFF_64, BUFF_65, }; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 5b1307b930..bc3937b6b8 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,9 +19,9 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_20, BUFF_21, - BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, - TO_ASCII_RESPONSE_STRING, UTF8_40, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_20, BUFF_32, BUFF_33, + BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, + UTF8_40, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; From b0a541f84550cc408591d05605ad3d97263504d5 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 12:36:02 +0200 Subject: [PATCH 12/86] chore: ported BUFF_20, #6467 --- clarity-types/src/tests/types/signatures.rs | 12 ++++++++++++ clarity-types/src/types/mod.rs | 2 +- clarity-types/src/types/signatures.rs | 8 ++------ .../vm/analysis/type_checker/v2_05/natives/mod.rs | 4 ++-- .../src/vm/analysis/type_checker/v2_1/natives/mod.rs | 9 ++++----- .../src/vm/analysis/type_checker/v2_1/tests/mod.rs | 9 ++++++--- clarity/src/vm/functions/principals.rs | 11 ++++++----- clarity/src/vm/tests/principals.rs | 4 ++-- clarity/src/vm/types/mod.rs | 4 ++-- clarity/src/vm/types/signatures.rs | 5 ++--- 10 files changed, 39 insertions(+), 29 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 9710ebcc7c..8443b40f5f 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -63,6 +63,18 @@ fn test_type_buffer_1() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_buffer_20() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(20))); + let actual = TypeSignature::BUFFER_20; + + assert_eq!(expected, actual); + assert_eq!(24, actual.size().unwrap(), "size should be 24"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index d126043797..9e294992b7 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -33,7 +33,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, ListTypeData, + AssetIdentifier, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, TypeSignature, }; use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 2f33eb197b..93fa7b6b55 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -249,12 +249,6 @@ lazy_static! { BufferLength::try_from(33u32).expect("BUG: Legal Clarity buffer length marked invalid"), )) }; - pub static ref BUFF_20: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(20u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; /// Maximum-sized buffer allowed for `to-ascii?` call. pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { @@ -895,6 +889,8 @@ impl TypeSignature { /// Buffer type with size 1. pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); + /// Buffer type with size 20. + pub const BUFFER_20: TypeSignature = Self::type_buffer_of_size::<20>(); /// Creates a buffer type with a given size known at compile time. /// diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 08297f7440..059bc130af 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -24,7 +24,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::{ BlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, - TupleTypeSignature, TypeSignature, Value, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_33, BUFF_64, BUFF_65, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -614,7 +614,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_20.clone(), + TypeSignature::BUFFER_20, ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 60e3036870..88cfebb45e 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -32,8 +32,7 @@ use crate::vm::types::signatures::{ use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, - TupleTypeSignature, TypeSignature, Value, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, - MAX_VALUE_SIZE, + TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -677,7 +676,7 @@ fn check_principal_construct( check_arguments_at_least(2, args)?; check_arguments_at_most(3, args)?; checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_1)?; - checker.type_check_expects(&args[1], context, &BUFF_20)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_20)?; if args.len() > 2 { checker.type_check_expects( &args[2], @@ -972,7 +971,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_20.clone(), + TypeSignature::BUFFER_20, ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ @@ -1035,7 +1034,7 @@ impl TypedNativeFunction { fn parse_principal_basic_type() -> Result { TupleTypeSignature::try_from(vec![ ("version".into(), TypeSignature::BUFFER_1), - ("hash-bytes".into(), BUFF_20.clone()), + ("hash-bytes".into(), TypeSignature::BUFFER_20), ( "name".into(), TypeSignature::new_option( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 79c1df9592..a086b2a6ab 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -33,7 +33,7 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, SequenceType, UIntType}; use crate::vm::types::{ BufferLength, FixedFunction, FunctionType, QualifiedContractIdentifier, TraitIdentifier, - TypeSignature, TypeSignatureExt as _, BUFF_20, BUFF_32, BUFF_64, + TypeSignature, TypeSignatureExt as _, BUFF_32, BUFF_64, }; use crate::vm::{execute_v2, ClarityName, ClarityVersion}; @@ -3408,13 +3408,16 @@ fn test_principal_construct() { // The first buffer is too long, should be `(buff 1)`. ( r#"(principal-construct? 0xfa6bf38ed557fe417333710d6033e9419391a320 0xfa6bf38ed557fe417333710d6033e9419391a320)"#, - CheckErrors::TypeError(Box::new(TypeSignature::BUFFER_1), Box::new(BUFF_20.clone())), + CheckErrors::TypeError( + Box::new(TypeSignature::BUFFER_1), + Box::new(TypeSignature::BUFFER_20), + ), ), // The second buffer is too long, should be `(buff 20)`. ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a32009)"#, CheckErrors::TypeError( - Box::new(BUFF_20.clone()), + Box::new(TypeSignature::BUFFER_20), Box::new(TypeSignature::BUFFER_21), ), ), diff --git a/clarity/src/vm/functions/principals.rs b/clarity/src/vm/functions/principals.rs index c8d3776ca3..c510ffbb71 100644 --- a/clarity/src/vm/functions/principals.rs +++ b/clarity/src/vm/functions/principals.rs @@ -13,7 +13,6 @@ use crate::vm::errors::{ use crate::vm::representations::{ SymbolicExpression, CONTRACT_MAX_NAME_LENGTH, CONTRACT_MIN_NAME_LENGTH, }; -use crate::vm::types::signatures::BUFF_20; use crate::vm::types::{ ASCIIData, BuffData, CharType, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, StandardPrincipalData, TupleData, TypeSignature, Value, @@ -251,7 +250,7 @@ pub fn special_principal_construct( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => data, _ => { return Err(CheckErrors::TypeValueError( - Box::new(BUFF_20.clone()), + Box::new(TypeSignature::BUFFER_20), Box::new(hash_bytes), ) .into()) @@ -261,9 +260,11 @@ pub fn special_principal_construct( // This must have been a (buff 20). // This is an aborting error because this should have been caught in analysis pass. if verified_hash_bytes.len() > 20 { - return Err( - CheckErrors::TypeValueError(Box::new(BUFF_20.clone()), Box::new(hash_bytes)).into(), - ); + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_20), + Box::new(hash_bytes), + ) + .into()); } // If the hash-bytes buffer has less than 20 bytes, this is a runtime error, because it diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index cb6a5b2aa2..cb45e32f92 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -11,7 +11,7 @@ use crate::vm::{ errors::CheckErrors, functions::principals::PrincipalConstructErrorCode, types::TypeSignature::PrincipalType, - types::{ResponseData, TypeSignature, BUFF_20}, + types::{ResponseData, TypeSignature}, }; use crate::vm::{execute_with_parameters, ClarityVersion}; @@ -998,7 +998,7 @@ fn test_principal_construct_check_errors() { ) .unwrap_err(), CheckErrors::TypeValueError( - Box::new(BUFF_20.clone()), + Box::new(TypeSignature::BUFFER_20), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("010203040506070809101112131415161718192021").unwrap() }))), diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 1868aca818..3bc2ca7251 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,8 +32,8 @@ use crate::vm::errors::CheckErrors; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_20, BUFF_32, - BUFF_33, BUFF_64, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_32, BUFF_33, + BUFF_64, BUFF_65, }; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index bc3937b6b8..7b62a8efe1 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,9 +19,8 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_20, BUFF_32, BUFF_33, - BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, - UTF8_40, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_32, BUFF_33, BUFF_64, + BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; From d350e761327142a653dc861f55e124a8b88066e7 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 12:46:04 +0200 Subject: [PATCH 13/86] chore: ported BUFF_33, #6467 --- clarity-types/src/tests/types/signatures.rs | 12 ++++++++++++ clarity-types/src/types/mod.rs | 2 +- clarity-types/src/types/signatures.rs | 10 +++------- .../vm/analysis/type_checker/v2_05/natives/mod.rs | 8 ++++---- .../src/vm/analysis/type_checker/v2_1/natives/mod.rs | 8 ++++---- clarity/src/vm/functions/crypto.rs | 10 +++++----- clarity/src/vm/tests/simple_apply_eval.rs | 2 +- clarity/src/vm/types/mod.rs | 2 +- clarity/src/vm/types/signatures.rs | 2 +- 9 files changed, 32 insertions(+), 24 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 8443b40f5f..cdb7e8b047 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -75,6 +75,18 @@ fn test_type_buffer_20() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_buffer_33() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(33))); + let actual = TypeSignature::BUFFER_33; + + assert_eq!(expected, actual); + assert_eq!(37, actual.size().unwrap(), "size should be 37"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index 9e294992b7..4f2cc07efc 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -33,7 +33,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, ListTypeData, + AssetIdentifier, BUFF_32, BUFF_64, BUFF_65, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, TypeSignature, }; use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 93fa7b6b55..050fe30bb3 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -243,13 +243,7 @@ lazy_static! { BufferLength::try_from(32u32).expect("BUG: Legal Clarity buffer length marked invalid"), )) }; - pub static ref BUFF_33: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(33u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - + /// Maximum-sized buffer allowed for `to-ascii?` call. pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { #[allow(clippy::expect_used)] @@ -891,6 +885,8 @@ impl TypeSignature { pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); /// Buffer type with size 20. pub const BUFFER_20: TypeSignature = Self::type_buffer_of_size::<20>(); + /// Buffer type with size 33. + pub const BUFFER_33: TypeSignature = Self::type_buffer_of_size::<33>(); /// Creates a buffer type with a given size known at compile time. /// diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 059bc130af..8635504394 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -24,7 +24,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::{ BlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, - TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_64, BUFF_65, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -482,7 +482,7 @@ fn check_principal_of( context: &TypingContext, ) -> Result { check_argument_count(1, args)?; - checker.type_check_expects(&args[0], context, &BUFF_33)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_33)?; Ok( TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType) .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, @@ -498,7 +498,7 @@ fn check_secp256k1_recover( checker.type_check_expects(&args[0], context, &BUFF_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; Ok( - TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType) + TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, ) } @@ -511,7 +511,7 @@ fn check_secp256k1_verify( check_argument_count(3, args)?; checker.type_check_expects(&args[0], context, &BUFF_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; - checker.type_check_expects(&args[2], context, &BUFF_33)?; + checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 88cfebb45e..5f0ae0b56b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -32,7 +32,7 @@ use crate::vm::types::signatures::{ use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, - TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_VALUE_SIZE, + TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_64, BUFF_65, MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -655,7 +655,7 @@ fn check_principal_of( context: &TypingContext, ) -> Result { check_argument_count(1, args)?; - checker.type_check_expects(&args[0], context, &BUFF_33)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_33)?; Ok( TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType) .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, @@ -709,7 +709,7 @@ fn check_secp256k1_recover( checker.type_check_expects(&args[0], context, &BUFF_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; Ok( - TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType) + TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, ) } @@ -722,7 +722,7 @@ fn check_secp256k1_verify( check_argument_count(3, args)?; checker.type_check_expects(&args[0], context, &BUFF_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; - checker.type_check_expects(&args[2], context, &BUFF_33)?; + checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) } diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index 3b5223bd72..c80507e029 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -27,7 +27,7 @@ use crate::vm::errors::{ check_argument_count, CheckErrors, InterpreterError, InterpreterResult as Result, }; use crate::vm::representations::SymbolicExpression; -use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value, BUFF_32, BUFF_33, BUFF_65}; +use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value, BUFF_32, BUFF_65}; use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; macro_rules! native_hash_func { @@ -103,7 +103,7 @@ pub fn special_principal_of( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 33 { return Err(CheckErrors::TypeValueError( - Box::new(BUFF_33.clone()), + Box::new(TypeSignature::BUFFER_33), Box::new(param0), ) .into()); @@ -112,7 +112,7 @@ pub fn special_principal_of( } _ => { return Err( - CheckErrors::TypeValueError(Box::new(BUFF_33.clone()), Box::new(param0)).into(), + CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(param0)).into(), ) } }; @@ -255,7 +255,7 @@ pub fn special_secp256k1_verify( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 33 { return Err(CheckErrors::TypeValueError( - Box::new(BUFF_33.clone()), + Box::new(TypeSignature::BUFFER_33), Box::new(param2), ) .into()); @@ -264,7 +264,7 @@ pub fn special_secp256k1_verify( } _ => { return Err( - CheckErrors::TypeValueError(Box::new(BUFF_33.clone()), Box::new(param2)).into(), + CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(param2)).into(), ) } }; diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 53558e3548..b01bc6da59 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -588,7 +588,7 @@ fn test_secp256k1_errors() { CheckErrors::TypeValueError(Box::new(BUFF_32.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), CheckErrors::TypeValueError(Box::new(BUFF_65.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130111").unwrap() })))).into(), - CheckErrors::TypeValueError(Box::new(BUFF_33.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7").unwrap() })))).into(), + CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7").unwrap() })))).into(), CheckErrors::IncorrectArgumentCount(3, 2).into(), CheckErrors::IncorrectArgumentCount(1, 2).into(), diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 3bc2ca7251..3036a4e25f 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,7 +32,7 @@ use crate::vm::errors::CheckErrors; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_32, BUFF_33, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_32, BUFF_64, BUFF_65, }; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 7b62a8efe1..39a1da7c96 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,7 +19,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_32, BUFF_33, BUFF_64, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_32, BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; pub use clarity_types::types::Value; From 6e7846edd09f27d8cf6458edba67f50d621a68d3 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 12:52:40 +0200 Subject: [PATCH 14/86] chore: ported BUFF_32, #6467 --- clarity-types/src/tests/types/signatures.rs | 12 ++++++++ clarity-types/src/types/mod.rs | 4 +-- clarity-types/src/types/signatures.rs | 10 ++----- .../type_checker/v2_05/natives/mod.rs | 14 ++++----- .../analysis/type_checker/v2_05/tests/mod.rs | 7 +++-- .../analysis/type_checker/v2_1/natives/mod.rs | 21 +++++++------ .../analysis/type_checker/v2_1/tests/mod.rs | 7 +++-- clarity/src/vm/functions/crypto.rs | 30 ++++++++++++------- clarity/src/vm/functions/database.rs | 8 +++-- clarity/src/vm/tests/simple_apply_eval.rs | 4 +-- clarity/src/vm/types/mod.rs | 15 +++++----- clarity/src/vm/types/signatures.rs | 4 +-- 12 files changed, 83 insertions(+), 53 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index cdb7e8b047..60246a06d3 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -75,6 +75,18 @@ fn test_type_buffer_20() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_buffer_32() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(32))); + let actual = TypeSignature::BUFFER_32; + + assert_eq!(expected, actual); + assert_eq!(36, actual.size().unwrap(), "size should be 36"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_type_buffer_33() { let expected = diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index 4f2cc07efc..2ad2b11cd1 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -33,8 +33,8 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_32, BUFF_64, BUFF_65, BufferLength, ListTypeData, - SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, TypeSignature, + AssetIdentifier, BUFF_64, BUFF_65, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, + StringUTF8Length, TupleTypeSignature, TypeSignature, }; use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; use crate::representations::{ClarityName, ContractName, SymbolicExpression}; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 050fe30bb3..6ed7d3da58 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -237,13 +237,7 @@ lazy_static! { BufferLength::try_from(65u32).expect("BUG: Legal Clarity buffer length marked invalid"), )) }; - pub static ref BUFF_32: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(32u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - + /// Maximum-sized buffer allowed for `to-ascii?` call. pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { #[allow(clippy::expect_used)] @@ -885,6 +879,8 @@ impl TypeSignature { pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); /// Buffer type with size 20. pub const BUFFER_20: TypeSignature = Self::type_buffer_of_size::<20>(); + /// Buffer type with size 32. + pub const BUFFER_32: TypeSignature = Self::type_buffer_of_size::<32>(); /// Buffer type with size 33. pub const BUFFER_33: TypeSignature = Self::type_buffer_of_size::<33>(); diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 8635504394..f82c855d57 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -24,7 +24,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::{ BlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, - TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_64, BUFF_65, + TupleTypeSignature, TypeSignature, Value, BUFF_64, BUFF_65, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -90,7 +90,7 @@ fn check_special_at_block( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check(&args[1], context) } @@ -495,7 +495,7 @@ fn check_secp256k1_recover( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; Ok( TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) @@ -509,7 +509,7 @@ fn check_secp256k1_verify( context: &TypingContext, ) -> Result { check_argument_count(3, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) @@ -622,7 +622,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ @@ -630,7 +630,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ @@ -646,7 +646,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Secp256k1Recover => Special(SpecialNativeFunction(&check_secp256k1_recover)), Secp256k1Verify => Special(SpecialNativeFunction(&check_secp256k1_verify)), diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index a7357b0d8a..3f97b141a4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -25,7 +25,7 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, UIntType}; use crate::vm::types::{ FixedFunction, FunctionType, QualifiedContractIdentifier, TypeSignature, TypeSignatureExt as _, - BUFF_32, BUFF_64, + BUFF_64, }; use crate::vm::ClarityVersion; mod assets; @@ -452,7 +452,10 @@ fn test_at_block() { let bad = [ ( "(at-block (sha512 u0) u1)", - CheckErrors::TypeError(Box::new(BUFF_32.clone()), Box::new(BUFF_64.clone())), + CheckErrors::TypeError( + Box::new(TypeSignature::BUFFER_32), + Box::new(BUFF_64.clone()), + ), ), ( "(at-block (sha256 u0) u1 u2)", diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 5f0ae0b56b..fca0952aca 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -32,7 +32,7 @@ use crate::vm::types::signatures::{ use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, - TupleTypeSignature, TypeSignature, Value, BUFF_32, BUFF_64, BUFF_65, MAX_VALUE_SIZE, + TupleTypeSignature, TypeSignature, Value, BUFF_64, BUFF_65, MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -129,7 +129,7 @@ fn check_special_at_block( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check(&args[1], context) } @@ -706,7 +706,7 @@ fn check_secp256k1_recover( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; Ok( TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) @@ -720,7 +720,7 @@ fn check_secp256k1_verify( context: &TypingContext, ) -> Result { check_argument_count(3, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check_expects(&args[1], context, &BUFF_65)?; checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) @@ -979,7 +979,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ @@ -987,7 +987,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ @@ -1003,7 +1003,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Secp256k1Recover => Special(SpecialNativeFunction(&check_secp256k1_recover)), Secp256k1Verify => Special(SpecialNativeFunction(&check_secp256k1_verify)), @@ -1188,8 +1188,11 @@ impl TypedNativeFunction { ) })?, )], - returns: TypeSignature::new_response(BUFF_32.clone(), TypeSignature::UIntType) - .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, + returns: TypeSignature::new_response( + TypeSignature::BUFFER_32, + TypeSignature::UIntType, + ) + .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, }))), ToAscii => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index a086b2a6ab..1fff6c192b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -33,7 +33,7 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, SequenceType, UIntType}; use crate::vm::types::{ BufferLength, FixedFunction, FunctionType, QualifiedContractIdentifier, TraitIdentifier, - TypeSignature, TypeSignatureExt as _, BUFF_32, BUFF_64, + TypeSignature, TypeSignatureExt as _, BUFF_64, }; use crate::vm::{execute_v2, ClarityName, ClarityVersion}; @@ -746,7 +746,10 @@ fn test_at_block() { let bad = [ ( "(at-block (sha512 u0) u1)", - CheckErrors::TypeError(Box::new(BUFF_32.clone()), Box::new(BUFF_64.clone())), + CheckErrors::TypeError( + Box::new(TypeSignature::BUFFER_32), + Box::new(BUFF_64.clone()), + ), ), ( "(at-block (sha256 u0) u1 u2)", diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index c80507e029..57a1c90d5c 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -27,7 +27,7 @@ use crate::vm::errors::{ check_argument_count, CheckErrors, InterpreterError, InterpreterResult as Result, }; use crate::vm::representations::SymbolicExpression; -use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value, BUFF_32, BUFF_65}; +use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value, BUFF_65}; use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; macro_rules! native_hash_func { @@ -111,9 +111,11 @@ pub fn special_principal_of( data } _ => { - return Err( - CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(param0)).into(), + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_33), + Box::new(param0), ) + .into()) } }; @@ -149,7 +151,7 @@ pub fn special_secp256k1_recover( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 32 { return Err(CheckErrors::TypeValueError( - Box::new(BUFF_32.clone()), + Box::new(TypeSignature::BUFFER_32), Box::new(param0), ) .into()); @@ -157,9 +159,11 @@ pub fn special_secp256k1_recover( data } _ => { - return Err( - CheckErrors::TypeValueError(Box::new(BUFF_32.clone()), Box::new(param0)).into(), + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_32), + Box::new(param0), ) + .into()) } }; @@ -211,7 +215,7 @@ pub fn special_secp256k1_verify( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 32 { return Err(CheckErrors::TypeValueError( - Box::new(BUFF_32.clone()), + Box::new(TypeSignature::BUFFER_32), Box::new(param0), ) .into()); @@ -219,9 +223,11 @@ pub fn special_secp256k1_verify( data } _ => { - return Err( - CheckErrors::TypeValueError(Box::new(BUFF_32.clone()), Box::new(param0)).into(), + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_32), + Box::new(param0), ) + .into()) } }; @@ -263,9 +269,11 @@ pub fn special_secp256k1_verify( data } _ => { - return Err( - CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(param2)).into(), + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_33), + Box::new(param2), ) + .into()) } }; diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index 93fbef0258..a9e759fd80 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -28,7 +28,7 @@ use crate::vm::errors::{ use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; use crate::vm::types::{ BlockInfoProperty, BuffData, BurnBlockInfoProperty, PrincipalData, SequenceData, - StacksBlockInfoProperty, TenureInfoProperty, TupleData, TypeSignature, Value, BUFF_32, + StacksBlockInfoProperty, TenureInfoProperty, TupleData, TypeSignature, Value, }; use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; @@ -452,7 +452,11 @@ pub fn special_at_block( } } x => { - return Err(CheckErrors::TypeValueError(Box::new(BUFF_32.clone()), Box::new(x)).into()) + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_32), + Box::new(x), + ) + .into()) } }; diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index b01bc6da59..bcefc5081d 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -581,12 +581,12 @@ fn test_secp256k1_errors() { ]; let expectations: &[Error] = &[ - CheckErrors::TypeValueError(Box::new(BUFF_32.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), + CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_32), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), CheckErrors::TypeValueError(Box::new(BUFF_65.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130100").unwrap() })))).into(), CheckErrors::IncorrectArgumentCount(2, 1).into(), CheckErrors::IncorrectArgumentCount(2, 3).into(), - CheckErrors::TypeValueError(Box::new(BUFF_32.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), + CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_32), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), CheckErrors::TypeValueError(Box::new(BUFF_65.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130111").unwrap() })))).into(), CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7").unwrap() })))).into(), CheckErrors::IncorrectArgumentCount(3, 2).into(), diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 3036a4e25f..c812ab5455 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,8 +32,7 @@ use crate::vm::errors::CheckErrors; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_32, - BUFF_64, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_64, BUFF_65, }; use crate::vm::ClarityVersion; @@ -81,7 +80,9 @@ impl BlockInfoProperty { use self::BlockInfoProperty::*; match self { Time | MinerSpendWinner | MinerSpendTotal | BlockReward => TypeSignature::UIntType, - IdentityHeaderHash | VrfSeed | HeaderHash | BurnchainHeaderHash => BUFF_32.clone(), + IdentityHeaderHash | VrfSeed | HeaderHash | BurnchainHeaderHash => { + TypeSignature::BUFFER_32 + } MinerAddress => TypeSignature::PrincipalType, } } @@ -91,7 +92,7 @@ impl BurnBlockInfoProperty { pub fn type_result(&self) -> std::result::Result { use self::BurnBlockInfoProperty::*; let result = match self { - HeaderHash => BUFF_32.clone(), + HeaderHash => TypeSignature::BUFFER_32, PoxAddrs => TupleTypeSignature::try_from(vec![ ( "addrs".into(), @@ -99,7 +100,7 @@ impl BurnBlockInfoProperty { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![ ("version".into(), TypeSignature::BUFFER_1), - ("hashbytes".into(), BUFF_32.clone()), + ("hashbytes".into(), TypeSignature::BUFFER_32), ]) .map_err(|_| { CheckErrors::Expects( @@ -125,7 +126,7 @@ impl StacksBlockInfoProperty { use self::StacksBlockInfoProperty::*; match self { Time => TypeSignature::UIntType, - IndexHeaderHash | HeaderHash => BUFF_32.clone(), + IndexHeaderHash | HeaderHash => TypeSignature::BUFFER_32, } } } @@ -135,7 +136,7 @@ impl TenureInfoProperty { use self::TenureInfoProperty::*; match self { Time | MinerSpendWinner | MinerSpendTotal | BlockReward => TypeSignature::UIntType, - VrfSeed | BurnchainHeaderHash => BUFF_32.clone(), + VrfSeed | BurnchainHeaderHash => TypeSignature::BUFFER_32, MinerAddress => TypeSignature::PrincipalType, } } diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 39a1da7c96..4b37d1cc91 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,8 +19,8 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_32, BUFF_64, - BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_64, BUFF_65, + MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; From 4542fce5fb6c97628f4a2a7155fee1843690f6ee Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 16:22:57 +0200 Subject: [PATCH 15/86] chore: ported BUFF_64, #6467 --- clarity-types/src/tests/types/signatures.rs | 12 ++++++++++++ clarity-types/src/types/mod.rs | 2 +- clarity-types/src/types/signatures.rs | 8 ++------ .../vm/analysis/type_checker/v2_05/natives/mod.rs | 4 ++-- .../src/vm/analysis/type_checker/v2_05/tests/mod.rs | 3 +-- .../src/vm/analysis/type_checker/v2_1/natives/mod.rs | 4 ++-- .../src/vm/analysis/type_checker/v2_1/tests/mod.rs | 4 ++-- clarity/src/vm/types/mod.rs | 2 +- clarity/src/vm/types/signatures.rs | 2 +- 9 files changed, 24 insertions(+), 17 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 60246a06d3..1d00da40bb 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -99,6 +99,18 @@ fn test_type_buffer_33() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_buffer_64() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(64))); + let actual = TypeSignature::BUFFER_64; + + assert_eq!(expected, actual); + assert_eq!(68, actual.size().unwrap(), "size should be 68"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index 2ad2b11cd1..0c6a7711a6 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -33,7 +33,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_64, BUFF_65, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, + AssetIdentifier, BUFF_65, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, TypeSignature, }; use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 6ed7d3da58..1afa2dac7b 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -225,12 +225,6 @@ const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; lazy_static! { - pub static ref BUFF_64: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(64u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; pub static ref BUFF_65: TypeSignature = { #[allow(clippy::expect_used)] SequenceType(SequenceSubtype::BufferType( @@ -883,6 +877,8 @@ impl TypeSignature { pub const BUFFER_32: TypeSignature = Self::type_buffer_of_size::<32>(); /// Buffer type with size 33. pub const BUFFER_33: TypeSignature = Self::type_buffer_of_size::<33>(); + /// Buffer type with size 64. + pub const BUFFER_64: TypeSignature = Self::type_buffer_of_size::<64>(); /// Creates a buffer type with a given size known at compile time. /// diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index f82c855d57..82e75158d4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -24,7 +24,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::{ BlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, - TupleTypeSignature, TypeSignature, Value, BUFF_64, BUFF_65, + TupleTypeSignature, TypeSignature, Value, BUFF_65, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -638,7 +638,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_64.clone(), + TypeSignature::BUFFER_64, ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index 3f97b141a4..56c8b0b461 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -25,7 +25,6 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, UIntType}; use crate::vm::types::{ FixedFunction, FunctionType, QualifiedContractIdentifier, TypeSignature, TypeSignatureExt as _, - BUFF_64, }; use crate::vm::ClarityVersion; mod assets; @@ -454,7 +453,7 @@ fn test_at_block() { "(at-block (sha512 u0) u1)", CheckErrors::TypeError( Box::new(TypeSignature::BUFFER_32), - Box::new(BUFF_64.clone()), + Box::new(TypeSignature::BUFFER_64), ), ), ( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index fca0952aca..e712e129cc 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -32,7 +32,7 @@ use crate::vm::types::signatures::{ use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, - TupleTypeSignature, TypeSignature, Value, BUFF_64, BUFF_65, MAX_VALUE_SIZE, + TupleTypeSignature, TypeSignature, Value, BUFF_65, MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -995,7 +995,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_64.clone(), + TypeSignature::BUFFER_64, ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 1fff6c192b..c77730021e 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -33,7 +33,7 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, SequenceType, UIntType}; use crate::vm::types::{ BufferLength, FixedFunction, FunctionType, QualifiedContractIdentifier, TraitIdentifier, - TypeSignature, TypeSignatureExt as _, BUFF_64, + TypeSignature, TypeSignatureExt as _, }; use crate::vm::{execute_v2, ClarityName, ClarityVersion}; @@ -748,7 +748,7 @@ fn test_at_block() { "(at-block (sha512 u0) u1)", CheckErrors::TypeError( Box::new(TypeSignature::BUFFER_32), - Box::new(BUFF_64.clone()), + Box::new(TypeSignature::BUFFER_64), ), ), ( diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index c812ab5455..2694c6faa4 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,7 +32,7 @@ use crate::vm::errors::CheckErrors; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_64, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_65, }; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 4b37d1cc91..1fdb4daf7c 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,7 +19,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_64, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; pub use clarity_types::types::Value; From 87d22970ab21c06cb6badda8fd84d014ada08dc3 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 16:28:59 +0200 Subject: [PATCH 16/86] chore: ported BUFF_65, #6467 --- clarity-types/src/tests/types/signatures.rs | 12 ++++++++++++ clarity-types/src/types/mod.rs | 4 ++-- clarity-types/src/types/signatures.rs | 9 ++------- .../analysis/type_checker/v2_05/natives/mod.rs | 6 +++--- .../analysis/type_checker/v2_1/natives/mod.rs | 6 +++--- clarity/src/vm/functions/crypto.rs | 18 +++++++++++------- clarity/src/vm/tests/simple_apply_eval.rs | 4 ++-- clarity/src/vm/types/mod.rs | 2 +- clarity/src/vm/types/signatures.rs | 4 ++-- 9 files changed, 38 insertions(+), 27 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 1d00da40bb..edce808795 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -111,6 +111,18 @@ fn test_type_buffer_64() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_buffer_65() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(65))); + let actual = TypeSignature::BUFFER_65; + + assert_eq!(expected, actual); + assert_eq!(69, actual.size().unwrap(), "size should be 69"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index 0c6a7711a6..0693ce84cc 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -33,8 +33,8 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_65, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, + AssetIdentifier, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, + TupleTypeSignature, TypeSignature, }; use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; use crate::representations::{ClarityName, ContractName, SymbolicExpression}; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 1afa2dac7b..a943f6a369 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -225,13 +225,6 @@ const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; lazy_static! { - pub static ref BUFF_65: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(65u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - /// Maximum-sized buffer allowed for `to-ascii?` call. pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { #[allow(clippy::expect_used)] @@ -879,6 +872,8 @@ impl TypeSignature { pub const BUFFER_33: TypeSignature = Self::type_buffer_of_size::<33>(); /// Buffer type with size 64. pub const BUFFER_64: TypeSignature = Self::type_buffer_of_size::<64>(); + /// Buffer type with size 65. + pub const BUFFER_65: TypeSignature = Self::type_buffer_of_size::<65>(); /// Creates a buffer type with a given size known at compile time. /// diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 82e75158d4..ec3151fc2c 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -24,7 +24,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::{ BlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, - TupleTypeSignature, TypeSignature, Value, BUFF_65, + TupleTypeSignature, TypeSignature, Value, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -496,7 +496,7 @@ fn check_secp256k1_recover( ) -> Result { check_argument_count(2, args)?; checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; Ok( TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, @@ -510,7 +510,7 @@ fn check_secp256k1_verify( ) -> Result { check_argument_count(3, args)?; checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index e712e129cc..487cc1b4b1 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -32,7 +32,7 @@ use crate::vm::types::signatures::{ use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, - TupleTypeSignature, TypeSignature, Value, BUFF_65, MAX_VALUE_SIZE, + TupleTypeSignature, TypeSignature, Value, MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -707,7 +707,7 @@ fn check_secp256k1_recover( ) -> Result { check_argument_count(2, args)?; checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; Ok( TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, @@ -721,7 +721,7 @@ fn check_secp256k1_verify( ) -> Result { check_argument_count(3, args)?; checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) } diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index 57a1c90d5c..5ca6cd8ef2 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -27,7 +27,7 @@ use crate::vm::errors::{ check_argument_count, CheckErrors, InterpreterError, InterpreterResult as Result, }; use crate::vm::representations::SymbolicExpression; -use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value, BUFF_65}; +use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value}; use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; macro_rules! native_hash_func { @@ -172,7 +172,7 @@ pub fn special_secp256k1_recover( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() > 65 { return Err(CheckErrors::TypeValueError( - Box::new(BUFF_65.clone()), + Box::new(TypeSignature::BUFFER_65), Box::new(param1), ) .into()); @@ -183,9 +183,11 @@ pub fn special_secp256k1_recover( data } _ => { - return Err( - CheckErrors::TypeValueError(Box::new(BUFF_65.clone()), Box::new(param1)).into(), + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_65), + Box::new(param1), ) + .into()) } }; @@ -236,7 +238,7 @@ pub fn special_secp256k1_verify( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() > 65 { return Err(CheckErrors::TypeValueError( - Box::new(BUFF_65.clone()), + Box::new(TypeSignature::BUFFER_65), Box::new(param1), ) .into()); @@ -250,9 +252,11 @@ pub fn special_secp256k1_verify( data } _ => { - return Err( - CheckErrors::TypeValueError(Box::new(BUFF_65.clone()), Box::new(param1)).into(), + return Err(CheckErrors::TypeValueError( + Box::new(TypeSignature::BUFFER_65), + Box::new(param1), ) + .into()) } }; diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index bcefc5081d..858cfb3cd4 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -582,12 +582,12 @@ fn test_secp256k1_errors() { let expectations: &[Error] = &[ CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_32), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), - CheckErrors::TypeValueError(Box::new(BUFF_65.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130100").unwrap() })))).into(), + CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_65), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130100").unwrap() })))).into(), CheckErrors::IncorrectArgumentCount(2, 1).into(), CheckErrors::IncorrectArgumentCount(2, 3).into(), CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_32), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), - CheckErrors::TypeValueError(Box::new(BUFF_65.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130111").unwrap() })))).into(), + CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_65), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130111").unwrap() })))).into(), CheckErrors::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7").unwrap() })))).into(), CheckErrors::IncorrectArgumentCount(3, 2).into(), diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 2694c6faa4..620d7f5d99 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,7 +32,7 @@ use crate::vm::errors::CheckErrors; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, }; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 1fdb4daf7c..d60f20078c 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,8 +19,8 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_65, - MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, + StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, MAX_TO_ASCII_BUFFER_LEN, + TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; From 276386c4736068aaa89919b13cfc79ed9e332655 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 16:36:52 +0200 Subject: [PATCH 17/86] chore: move BUFFER_21 to test local, 6467 --- clarity-types/src/types/signatures.rs | 3 --- clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs | 5 ++++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index a943f6a369..c705628000 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -885,9 +885,6 @@ impl TypeSignature { )) } - #[cfg(any(test, feature = "testing"))] - pub const BUFFER_21: TypeSignature = Self::type_buffer_of_size::<21>(); - pub fn min_string_ascii() -> Result { Ok(SequenceType(SequenceSubtype::StringType( StringSubtype::ASCII(1_u32.try_into().map_err(|_| { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index c77730021e..8efb04b973 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use clarity_types::types::SequenceSubtype; #[cfg(test)] use rstest::rstest; #[cfg(test)] @@ -3421,7 +3422,9 @@ fn test_principal_construct() { r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a32009)"#, CheckErrors::TypeError( Box::new(TypeSignature::BUFFER_20), - Box::new(TypeSignature::BUFFER_21), + Box::new(TypeSignature::SequenceType(SequenceSubtype::BufferType( + 21_u32.try_into().unwrap(), + ))), ), ), // `int` argument instead of `(buff 1)` for version. From 0cdea7c9aa750bfde8e19d9825fa29af24bd28bd Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 16:46:04 +0200 Subject: [PATCH 18/86] chore: convert max_buffer() to BUFFER_MAX, 6467 --- clarity-types/src/tests/types/signatures.rs | 33 ++++++++++++------- clarity-types/src/types/signatures.rs | 13 ++------ .../type_checker/v2_05/natives/mod.rs | 10 +++--- .../src/vm/analysis/type_checker/v2_1/mod.rs | 2 +- .../type_checker/v2_1/natives/conversions.rs | 2 +- .../analysis/type_checker/v2_1/natives/mod.rs | 10 +++--- .../analysis/type_checker/v2_1/tests/mod.rs | 2 +- clarity/src/vm/functions/arithmetic.rs | 2 +- clarity/src/vm/functions/conversions.rs | 2 +- clarity/src/vm/functions/crypto.rs | 2 +- clarity/src/vm/tests/simple_apply_eval.rs | 18 +++++----- 11 files changed, 49 insertions(+), 47 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index edce808795..9bf7b7006b 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -51,6 +51,23 @@ fn test_type_buffer_min_to_be_buffer_1() { assert_eq!(TypeSignature::BUFFER_1, TypeSignature::BUFFER_MIN); } +#[test] +fn test_type_buffer_max() { + let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType( + BufferLength::new_unsafe(MAX_VALUE_SIZE), + )); + let actual = TypeSignature::BUFFER_MAX; + + assert_eq!(expected, actual); + assert_eq!( + MAX_VALUE_SIZE + 4, + actual.size().unwrap(), + "size should be 1_048_580" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_type_buffer_1() { let expected = @@ -270,11 +287,8 @@ fn test_least_supertype() { ((UIntType, UIntType), UIntType), ((BoolType, BoolType), BoolType), ( - ( - TypeSignature::max_buffer().unwrap(), - TypeSignature::max_buffer().unwrap(), - ), - TypeSignature::max_buffer().unwrap(), + (TypeSignature::BUFFER_MAX, TypeSignature::BUFFER_MAX), + TypeSignature::BUFFER_MAX, ), ( ( @@ -377,11 +391,8 @@ fn test_least_supertype() { let matched_pairs = [ ( - ( - TypeSignature::max_buffer().unwrap(), - TypeSignature::BUFFER_MIN, - ), - TypeSignature::max_buffer().unwrap(), + (TypeSignature::BUFFER_MAX, TypeSignature::BUFFER_MIN), + TypeSignature::BUFFER_MAX, ), ( ( @@ -529,7 +540,7 @@ fn test_least_supertype() { (IntType, UIntType), (BoolType, IntType), ( - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, TypeSignature::max_string_ascii().unwrap(), ), ( diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index c705628000..353106301f 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -861,7 +861,8 @@ impl TupleTypeSignature { impl TypeSignature { /// Buffer type with minimum size. Alias for [`TypeSignature::BUFFER_1`] pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; - + /// Buffer type with maximum size. Depends on [`MAX_VALUE_SIZE`]. + pub const BUFFER_MAX: TypeSignature = Self::type_buffer_of_size::(); /// Buffer type with size 1. pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); /// Buffer type with size 20. @@ -921,16 +922,6 @@ impl TypeSignature { ))) } - pub fn max_buffer() -> Result { - Ok(SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(MAX_VALUE_SIZE).map_err(|_| { - CheckErrors::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in Buffer Type".into(), - ) - })?, - ))) - } - pub fn contract_name_string_ascii_type() -> Result { TypeSignature::bound_string_ascii_type(CONTRACT_MAX_NAME_LENGTH.try_into().map_err( |_| CheckErrors::Expects("FAIL: contract name max length exceeds u32 space".into()), diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index ec3151fc2c..4cd38adb1c 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -610,7 +610,7 @@ impl TypedNativeFunction { }))), Hash160 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -618,7 +618,7 @@ impl TypedNativeFunction { ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -626,7 +626,7 @@ impl TypedNativeFunction { ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -634,7 +634,7 @@ impl TypedNativeFunction { ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -642,7 +642,7 @@ impl TypedNativeFunction { ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 131186958c..3f3519ccdb 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -409,7 +409,7 @@ impl FunctionType { TypeSignature::UIntType, TypeSignature::max_string_ascii()?, TypeSignature::max_string_utf8()?, - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, ], Box::new(first.clone()), ) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs index 88f5470d53..2b429d5621 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs @@ -38,6 +38,6 @@ pub fn check_special_from_consensus_buff( ) -> Result { check_argument_count(2, args)?; let result_type = TypeSignature::parse_type_repr(StacksEpochId::Epoch21, &args[0], checker)?; - checker.type_check_expects(&args[1], context, &TypeSignature::max_buffer()?)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_MAX)?; TypeSignature::new_option(result_type).map_err(CheckError::from) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 487cc1b4b1..fda694b30f 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -967,7 +967,7 @@ impl TypedNativeFunction { }))), Hash160 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -975,7 +975,7 @@ impl TypedNativeFunction { ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -983,7 +983,7 @@ impl TypedNativeFunction { ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -991,7 +991,7 @@ impl TypedNativeFunction { ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], @@ -999,7 +999,7 @@ impl TypedNativeFunction { ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 8efb04b973..5b08be8459 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -101,7 +101,7 @@ fn test_from_consensus_buff() { ( "(from-consensus-buff? int u6)", CheckErrors::TypeError( - Box::new(TypeSignature::max_buffer().unwrap()), + Box::new(TypeSignature::BUFFER_MAX), Box::new(TypeSignature::UIntType), ), ), diff --git a/clarity/src/vm/functions/arithmetic.rs b/clarity/src/vm/functions/arithmetic.rs index 2d5c2aff3a..83bb4b64fb 100644 --- a/clarity/src/vm/functions/arithmetic.rs +++ b/clarity/src/vm/functions/arithmetic.rs @@ -125,7 +125,7 @@ macro_rules! type_force_binary_comparison_v2 { TypeSignature::UIntType, TypeSignature::max_string_ascii()?, TypeSignature::max_string_utf8()?, - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, ], Box::new(x), ) diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index bd7834a552..47fd47ad52 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -325,7 +325,7 @@ pub fn from_consensus_buff( Ok(buff_data.data) } else { Err(CheckErrors::TypeValueError( - Box::new(TypeSignature::max_buffer()?), + Box::new(TypeSignature::BUFFER_MAX), Box::new(value), )) }?; diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index 5ca6cd8ef2..9277860fae 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -41,7 +41,7 @@ macro_rules! native_hash_func { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, ], Box::new(input), )), diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 858cfb3cd4..32557821a9 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -1016,7 +1016,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::UIntType, TypeSignature::max_string_ascii().unwrap(), TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Int(0)), ) @@ -1027,7 +1027,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::UIntType, TypeSignature::max_string_ascii().unwrap(), TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Int(0)), ) @@ -1050,7 +1050,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::UIntType, TypeSignature::max_string_ascii().unwrap(), TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( ASCIIData { @@ -1065,7 +1065,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::UIntType, TypeSignature::max_string_ascii().unwrap(), TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( ASCIIData { @@ -1480,7 +1480,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1489,7 +1489,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1498,7 +1498,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1507,7 +1507,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1517,7 +1517,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) From 87fbbe982720fe1d90c029866643913b98d5dd3f Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 19 Sep 2025 17:10:25 +0200 Subject: [PATCH 19/86] chore: move try_from traits next to BufferLength struct, #6467 --- clarity-types/src/tests/types/signatures.rs | 27 +++++++ clarity-types/src/types/signatures.rs | 86 ++++++++++----------- 2 files changed, 70 insertions(+), 43 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 9bf7b7006b..7343bb87e8 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -39,6 +39,33 @@ fn test_buffer_length_try_from_u32_trait() { assert_eq!(CheckErrors::ValueTooLarge, err); } +#[test] +fn test_buffer_length_try_from_usize_trait() { + let buffer = BufferLength::try_from(0_usize).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE as usize).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE as usize + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_buffer_length_try_from_i128_trait() { + let buffer = BufferLength::try_from(0_i128).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE as i128).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE as i128 + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = BufferLength::try_from(-1_i128).unwrap_err(); + assert_eq!(CheckErrors::ValueOutOfBounds, err); +} + #[test] fn test_buffer_length_to_u32_using_from_trait() { let buffer = BufferLength::new_unsafe(0); diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 353106301f..3ec5570b5e 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -137,6 +137,49 @@ impl BufferLength { } } +impl From<&BufferLength> for u32 { + fn from(v: &BufferLength) -> u32 { + v.0 + } +} + +impl From for u32 { + fn from(v: BufferLength) -> u32 { + v.0 + } +} + +impl TryFrom for BufferLength { + type Error = CheckErrors; + fn try_from(data: u32) -> Result { + Self::try_from_u32_as_opt(data).ok_or(CheckErrors::ValueTooLarge) + } +} + +impl TryFrom for BufferLength { + type Error = CheckErrors; + fn try_from(data: usize) -> Result { + if data > (MAX_VALUE_SIZE as usize) { + Err(CheckErrors::ValueTooLarge) + } else { + Ok(BufferLength(data as u32)) + } + } +} + +impl TryFrom for BufferLength { + type Error = CheckErrors; + fn try_from(data: i128) -> Result { + if data > (MAX_VALUE_SIZE as i128) { + Err(CheckErrors::ValueTooLarge) + } else if data < 0 { + Err(CheckErrors::ValueOutOfBounds) + } else { + Ok(BufferLength(data as u32)) + } + } +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StringUTF8Length(u32); @@ -268,49 +311,6 @@ impl From for TypeSignature { } } -impl From<&BufferLength> for u32 { - fn from(v: &BufferLength) -> u32 { - v.0 - } -} - -impl From for u32 { - fn from(v: BufferLength) -> u32 { - v.0 - } -} - -impl TryFrom for BufferLength { - type Error = CheckErrors; - fn try_from(data: u32) -> Result { - Self::try_from_u32_as_opt(data).ok_or(CheckErrors::ValueTooLarge) - } -} - -impl TryFrom for BufferLength { - type Error = CheckErrors; - fn try_from(data: usize) -> Result { - if data > (MAX_VALUE_SIZE as usize) { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(BufferLength(data as u32)) - } - } -} - -impl TryFrom for BufferLength { - type Error = CheckErrors; - fn try_from(data: i128) -> Result { - if data > (MAX_VALUE_SIZE as i128) { - Err(CheckErrors::ValueTooLarge) - } else if data < 0 { - Err(CheckErrors::ValueOutOfBounds) - } else { - Ok(BufferLength(data as u32)) - } - } -} - impl From<&StringUTF8Length> for u32 { fn from(v: &StringUTF8Length) -> u32 { v.0 From 6a1f41583e6cb0a59646da2ce67592e633103596 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 10:08:10 +0200 Subject: [PATCH 20/86] refactor: avoid TryFrom code duplication with BufferLength::try_from_i128, #6467 --- clarity-types/src/types/signatures.rs | 32 ++++++++++++++------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 3ec5570b5e..e86eb09465 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -110,8 +110,6 @@ impl BufferLength { /// This function is primarily intended for internal use when defining /// `const` values, since it returns an [`Option`] that can be unwrapped /// with [`Option::expect`] in a `const fn`. - /// - /// It can also be reused in a runtime context const fn try_from_u32_as_opt(value: u32) -> Option { if value > MAX_VALUE_SIZE { None @@ -119,6 +117,20 @@ impl BufferLength { Some(BufferLength(value)) } } + + /// Attempts to create a [`BufferLength`] from a [`i128`] as a [`Result`]. + /// + /// This function is primarily intended for internal runtime use, + /// and serves as the central place for all integer validation logic. + fn try_from_i128(data: i128) -> Result { + if data > (MAX_VALUE_SIZE as i128) { + Err(CheckErrors::ValueTooLarge) + } else if data < 0 { + Err(CheckErrors::ValueOutOfBounds) + } else { + Ok(BufferLength(data as u32)) + } + } } /// Test-only utilities for [`BufferLength`]. @@ -152,31 +164,21 @@ impl From for u32 { impl TryFrom for BufferLength { type Error = CheckErrors; fn try_from(data: u32) -> Result { - Self::try_from_u32_as_opt(data).ok_or(CheckErrors::ValueTooLarge) + Self::try_from(data as usize) } } impl TryFrom for BufferLength { type Error = CheckErrors; fn try_from(data: usize) -> Result { - if data > (MAX_VALUE_SIZE as usize) { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(BufferLength(data as u32)) - } + Self::try_from(data as i128) } } impl TryFrom for BufferLength { type Error = CheckErrors; fn try_from(data: i128) -> Result { - if data > (MAX_VALUE_SIZE as i128) { - Err(CheckErrors::ValueTooLarge) - } else if data < 0 { - Err(CheckErrors::ValueOutOfBounds) - } else { - Ok(BufferLength(data as u32)) - } + Self::try_from_i128(data) } } From 7ecd45ae4ea7a9261098fef6dde854ea3df5bcfb Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 10:19:37 +0200 Subject: [PATCH 21/86] chore: remove unused TypeSignature::new_string_ascii(), #6467 --- clarity-types/src/types/signatures.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index e86eb09465..94eb8a1a63 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -440,13 +440,6 @@ impl TypeSignature { } } - pub fn new_string_ascii(len: usize) -> Result { - let len = BufferLength::try_from(len)?; - Ok(TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(len), - ))) - } - pub fn new_string_utf8(len: usize) -> Result { let len = StringUTF8Length::try_from(len)?; Ok(TypeSignature::SequenceType(SequenceSubtype::StringType( From 93a646bb8865eef8eb8825bc9fd9b9dc5954e1c5 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 10:54:09 +0200 Subject: [PATCH 22/86] chore: ported min_string_ascii() to STRING_ASCII_MIN, #6467 --- clarity-types/src/tests/types/signatures.rs | 31 +++++++++++++------ clarity-types/src/types/mod.rs | 4 +-- clarity-types/src/types/signatures.rs | 30 ++++++++---------- .../type_checker/v2_05/natives/sequences.rs | 2 +- .../analysis/type_checker/v2_05/tests/mod.rs | 6 ++-- .../type_checker/v2_1/natives/sequences.rs | 2 +- .../analysis/type_checker/v2_1/tests/mod.rs | 12 +++---- clarity/src/vm/tests/sequences.rs | 2 +- 8 files changed, 49 insertions(+), 40 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 7343bb87e8..8c5561ba17 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -18,8 +18,8 @@ use crate::errors::CheckErrors; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - BufferLength, MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, TraitIdentifier, - TupleTypeSignature, + BufferLength, MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, + TraitIdentifier, TupleTypeSignature, }; #[test] @@ -167,6 +167,19 @@ fn test_type_buffer_65() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_string_ascii_min() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(1), + ))); + let actual = TypeSignature::STRING_ASCII_MIN; + + assert_eq!(expected, actual); + assert_eq!(5, actual.size().unwrap(), "size should be 5"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ @@ -430,7 +443,7 @@ fn test_least_supertype() { ), ( ( - TypeSignature::min_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MIN, TypeSignature::bound_string_ascii_type(17).unwrap(), ), TypeSignature::bound_string_ascii_type(17).unwrap(), @@ -508,7 +521,7 @@ fn test_least_supertype() { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::min_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MIN, )]) .unwrap(), ), @@ -530,7 +543,7 @@ fn test_least_supertype() { ), ( ( - TypeSignature::new_option(TypeSignature::min_string_ascii().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17).unwrap()) .unwrap(), ), @@ -613,7 +626,7 @@ fn test_least_supertype() { ), (list_union.clone(), TypeSignature::PrincipalType), ( - TypeSignature::min_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MIN, list_union_principals, ), ( @@ -624,13 +637,13 @@ fn test_least_supertype() { 5, ) .unwrap(), - TypeSignature::list_of(TypeSignature::min_string_ascii().unwrap(), 3).unwrap(), + TypeSignature::list_of(TypeSignature::STRING_ASCII_MIN, 3).unwrap(), ), ( TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::min_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MIN, )]) .unwrap(), ), @@ -639,7 +652,7 @@ fn test_least_supertype() { ), ), ( - TypeSignature::new_option(TypeSignature::min_string_ascii().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), TypeSignature::new_option(TypeSignature::min_string_utf8().unwrap()).unwrap(), ), ( diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index 0693ce84cc..b8fce796a3 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -343,7 +343,7 @@ impl SequenceData { let out = match self { SequenceData::Buffer(..) => TypeSignature::BUFFER_MIN.size(), SequenceData::List(data) => data.type_signature.get_list_item_type().size(), - SequenceData::String(CharType::ASCII(..)) => TypeSignature::min_string_ascii()?.size(), + SequenceData::String(CharType::ASCII(..)) => TypeSignature::STRING_ASCII_MIN.size(), SequenceData::String(CharType::UTF8(..)) => TypeSignature::min_string_utf8()?.size(), }?; Ok(out) @@ -483,7 +483,7 @@ impl SequenceData { } } else { Err(CheckErrors::TypeValueError( - Box::new(TypeSignature::min_string_ascii()?), + Box::new(TypeSignature::STRING_ASCII_MIN), Box::new(to_find), ) .into()) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 94eb8a1a63..3ca5fd42ce 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -232,9 +232,7 @@ impl SequenceSubtype { match &self { SequenceSubtype::ListType(list_data) => Ok(list_data.clone().destruct().0), SequenceSubtype::BufferType(_) => Ok(TypeSignature::BUFFER_MIN), - SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { - TypeSignature::min_string_ascii() - } + SequenceSubtype::StringType(StringSubtype::ASCII(_)) => Ok(TypeSignature::STRING_ASCII_MIN), SequenceSubtype::StringType(StringSubtype::UTF8(_)) => TypeSignature::min_string_utf8(), } } @@ -440,13 +438,6 @@ impl TypeSignature { } } - pub fn new_string_utf8(len: usize) -> Result { - let len = StringUTF8Length::try_from(len)?; - Ok(TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(len), - ))) - } - pub fn is_response_type(&self) -> bool { matches!(self, TypeSignature::ResponseType(_)) } @@ -871,21 +862,26 @@ impl TypeSignature { /// Buffer type with size 65. pub const BUFFER_65: TypeSignature = Self::type_buffer_of_size::<65>(); + /// String ASCII type with size 1 + pub const STRING_ASCII_MIN: TypeSignature = Self::type_string_ascii::<1>(); + /// Creates a buffer type with a given size known at compile time. /// - /// This function is intended for defining constant buffer types - /// type aliases (e.g., [`TypeSignature::BUFFER_1`]) without repeating logic. + /// This function is intended for defining constant buffer type + /// aliases (e.g., [`TypeSignature::BUFFER_1`]) without repeating logic. const fn type_buffer_of_size() -> Self { SequenceType(SequenceSubtype::BufferType( BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), )) } - pub fn min_string_ascii() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(1_u32.try_into().map_err(|_| { - CheckErrors::Expects("FAIL: Min clarity value size is not realizable".into()) - })?), + /// Creates a string ASCII type with a given size known at compile time. + /// + /// This function is intended for defining constant string type + /// aliases (e.g., [`TypeSignature::STRING_ASCII_MIN`]) without repeating logic. + const fn type_string_ascii() -> Self { + SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), ))) } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index 0fd61d6327..9cab80d2b4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -83,7 +83,7 @@ pub fn check_special_map( ListType(list_data) => list_data.destruct(), BufferType(buffer_data) => (TypeSignature::BUFFER_MIN, buffer_data.into()), StringType(ASCII(ascii_data)) => { - (TypeSignature::min_string_ascii()?, ascii_data.into()) + (TypeSignature::STRING_ASCII_MIN, ascii_data.into()) } StringType(UTF8(utf8_data)) => { (TypeSignature::min_string_utf8()?, utf8_data.into()) diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index 56c8b0b461..19f6833f5d 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -720,14 +720,14 @@ fn test_index_of() { ), CheckErrors::TypeError( Box::new(TypeSignature::BUFFER_MIN), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( Box::new(TypeSignature::min_string_utf8().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), Box::new(TypeSignature::min_string_utf8().unwrap()), ), CheckErrors::CouldNotDetermineType, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 9648820e69..06bbebf286 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -89,7 +89,7 @@ pub fn check_special_map( ListType(list_data) => list_data.destruct(), BufferType(buffer_data) => (TypeSignature::BUFFER_MIN, buffer_data.into()), StringType(ASCII(ascii_data)) => { - (TypeSignature::min_string_ascii()?, ascii_data.into()) + (TypeSignature::STRING_ASCII_MIN, ascii_data.into()) } StringType(UTF8(utf8_data)) => { (TypeSignature::min_string_utf8()?, utf8_data.into()) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 5b08be8459..2f13be3f74 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -1090,14 +1090,14 @@ fn test_index_of() { ), CheckErrors::TypeError( Box::new(TypeSignature::BUFFER_MIN), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( Box::new(TypeSignature::min_string_utf8().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), Box::new(TypeSignature::min_string_utf8().unwrap()), ), CheckErrors::TypeError( @@ -1111,14 +1111,14 @@ fn test_index_of() { ), CheckErrors::TypeError( Box::new(TypeSignature::BUFFER_MIN), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( Box::new(TypeSignature::min_string_utf8().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), Box::new(TypeSignature::min_string_utf8().unwrap()), ), CheckErrors::CouldNotDetermineType, diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index c2e8e2831a..efabf0f37d 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -122,7 +122,7 @@ fn test_index_of() { Box::new(execute("\"a\"").unwrap().unwrap()), ), CheckErrors::TypeValueError( - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), Box::new(execute("u\"a\"").unwrap().unwrap()), ), ]; From 5bba38ee33af57f0ce745a4b6cd30fc69710c76a Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 11:06:52 +0200 Subject: [PATCH 23/86] chore: ported max_string_ascii() to STRING_ASCII_MAX, #6467 --- clarity-types/src/tests/types/signatures.rs | 34 ++++++++++++------- clarity-types/src/types/signatures.rs | 20 ++++------- .../src/vm/analysis/type_checker/v2_1/mod.rs | 2 +- .../analysis/type_checker/v2_1/natives/mod.rs | 4 +-- .../analysis/type_checker/v2_1/tests/mod.rs | 8 ++--- clarity/src/vm/functions/arithmetic.rs | 2 +- clarity/src/vm/functions/conversions.rs | 2 +- clarity/src/vm/tests/conversions.rs | 4 +-- clarity/src/vm/tests/simple_apply_eval.rs | 8 ++--- 9 files changed, 43 insertions(+), 41 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 8c5561ba17..c6c89960ba 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -180,6 +180,23 @@ fn test_type_string_ascii_min() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_string_ascii_max() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(MAX_VALUE_SIZE), + ))); + let actual = TypeSignature::STRING_ASCII_MAX; + + assert_eq!(expected, actual); + assert_eq!( + MAX_VALUE_SIZE + 4, + actual.size().unwrap(), + "size should be 1_048_580" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ @@ -579,10 +596,7 @@ fn test_least_supertype() { let bad_pairs = [ (IntType, UIntType), (BoolType, IntType), - ( - TypeSignature::BUFFER_MAX, - TypeSignature::max_string_ascii().unwrap(), - ), + (TypeSignature::BUFFER_MAX, TypeSignature::STRING_ASCII_MAX), ( TypeSignature::list_of(TypeSignature::UIntType, 42).unwrap(), TypeSignature::list_of(TypeSignature::IntType, 42).unwrap(), @@ -625,10 +639,7 @@ fn test_least_supertype() { TypeSignature::PrincipalType, ), (list_union.clone(), TypeSignature::PrincipalType), - ( - TypeSignature::STRING_ASCII_MIN, - list_union_principals, - ), + (TypeSignature::STRING_ASCII_MIN, list_union_principals), ( TypeSignature::list_of( TypeSignature::SequenceType(SequenceSubtype::BufferType( @@ -641,11 +652,8 @@ fn test_least_supertype() { ), ( TypeSignature::TupleType( - TupleTypeSignature::try_from(vec![( - "b".into(), - TypeSignature::STRING_ASCII_MIN, - )]) - .unwrap(), + TupleTypeSignature::try_from(vec![("b".into(), TypeSignature::STRING_ASCII_MIN)]) + .unwrap(), ), TypeSignature::TupleType( TupleTypeSignature::try_from(vec![("b".into(), TypeSignature::UIntType)]).unwrap(), diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 3ca5fd42ce..b3b0549d41 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -232,7 +232,9 @@ impl SequenceSubtype { match &self { SequenceSubtype::ListType(list_data) => Ok(list_data.clone().destruct().0), SequenceSubtype::BufferType(_) => Ok(TypeSignature::BUFFER_MIN), - SequenceSubtype::StringType(StringSubtype::ASCII(_)) => Ok(TypeSignature::STRING_ASCII_MIN), + SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { + Ok(TypeSignature::STRING_ASCII_MIN) + } SequenceSubtype::StringType(StringSubtype::UTF8(_)) => TypeSignature::min_string_utf8(), } } @@ -845,7 +847,7 @@ impl TupleTypeSignature { } impl TypeSignature { - /// Buffer type with minimum size. Alias for [`TypeSignature::BUFFER_1`] + /// Buffer type with minimum size. Alias for [`TypeSignature::BUFFER_1`]. pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; /// Buffer type with maximum size. Depends on [`MAX_VALUE_SIZE`]. pub const BUFFER_MAX: TypeSignature = Self::type_buffer_of_size::(); @@ -862,8 +864,10 @@ impl TypeSignature { /// Buffer type with size 65. pub const BUFFER_65: TypeSignature = Self::type_buffer_of_size::<65>(); - /// String ASCII type with size 1 + /// String ASCII type with minimum size (`1`). pub const STRING_ASCII_MIN: TypeSignature = Self::type_string_ascii::<1>(); + /// String ASCII type with maximum size. Depends on [`MAX_VALUE_SIZE`]. + pub const STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::(); /// Creates a buffer type with a given size known at compile time. /// @@ -893,16 +897,6 @@ impl TypeSignature { ))) } - pub fn max_string_ascii() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength::try_from(MAX_VALUE_SIZE).map_err(|_| { - CheckErrors::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".into(), - ) - })?), - ))) - } - pub fn max_string_utf8() -> Result { Ok(SequenceType(SequenceSubtype::StringType( StringSubtype::UTF8(StringUTF8Length::try_from(MAX_VALUE_SIZE / 4).map_err(|_| { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 3f3519ccdb..4debaf2563 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -407,7 +407,7 @@ impl FunctionType { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii()?, + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8()?, TypeSignature::BUFFER_MAX, ], diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index fda694b30f..5add4db193 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -932,14 +932,14 @@ impl TypedNativeFunction { } StringToInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_string_ascii()?, + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8()?, ], TypeSignature::OptionalType(Box::new(TypeSignature::IntType)), ))), StringToUInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_string_ascii()?, + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8()?, ], TypeSignature::OptionalType(Box::new(TypeSignature::UIntType)), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 2f13be3f74..6b84210013 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -2231,7 +2231,7 @@ fn test_string_to_ints() { CheckErrors::IncorrectArgumentCount(1, 0), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), ], Box::new(SequenceType(BufferType( @@ -2240,7 +2240,7 @@ fn test_string_to_ints() { ), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), ], Box::new(IntType), @@ -2249,7 +2249,7 @@ fn test_string_to_ints() { CheckErrors::IncorrectArgumentCount(1, 0), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), ], Box::new(SequenceType(BufferType( @@ -2258,7 +2258,7 @@ fn test_string_to_ints() { ), CheckErrors::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), ], Box::new(IntType), diff --git a/clarity/src/vm/functions/arithmetic.rs b/clarity/src/vm/functions/arithmetic.rs index 83bb4b64fb..df7e4ebabd 100644 --- a/clarity/src/vm/functions/arithmetic.rs +++ b/clarity/src/vm/functions/arithmetic.rs @@ -123,7 +123,7 @@ macro_rules! type_force_binary_comparison_v2 { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii()?, + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8()?, TypeSignature::BUFFER_MAX, ], diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 47fd47ad52..c5f883b0d1 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -151,7 +151,7 @@ pub fn native_string_to_int_generic( } _ => Err(CheckErrors::UnionTypeValueError( vec![ - TypeSignature::max_string_ascii()?, + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8()?, ], Box::new(value), diff --git a/clarity/src/vm/tests/conversions.rs b/clarity/src/vm/tests/conversions.rs index c6835efaa4..e8a07fe4ea 100644 --- a/clarity/src/vm/tests/conversions.rs +++ b/clarity/src/vm/tests/conversions.rs @@ -314,7 +314,7 @@ fn test_simple_string_to_int() { execute_v2(wrong_type_error_test).unwrap_err(), CheckErrors::UnionTypeValueError( vec![ - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), ], Box::new(Value::Int(1)) @@ -379,7 +379,7 @@ fn test_simple_string_to_uint() { execute_v2(wrong_type_error_test).unwrap_err(), CheckErrors::UnionTypeValueError( vec![ - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), ], Box::new(Value::Int(1)) diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 32557821a9..471ed1721c 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -1014,7 +1014,7 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), TypeSignature::BUFFER_MAX, ], @@ -1025,7 +1025,7 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), TypeSignature::BUFFER_MAX, ], @@ -1048,7 +1048,7 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), TypeSignature::BUFFER_MAX, ], @@ -1063,7 +1063,7 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MAX, TypeSignature::max_string_utf8().unwrap(), TypeSignature::BUFFER_MAX, ], From a0f7ae2f3831f324686b3d6dafa900c2997f0768 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 11:16:03 +0200 Subject: [PATCH 24/86] chore: ported ASCII_40 to STRING_ASCII_40, #6467 --- clarity-types/src/tests/types/signatures.rs | 13 +++++++++++++ clarity-types/src/types/signatures.rs | 5 ++--- .../vm/analysis/type_checker/v2_1/natives/mod.rs | 4 ++-- clarity/src/vm/docs/mod.rs | 9 ++++++--- clarity/src/vm/types/signatures.rs | 2 +- 5 files changed, 24 insertions(+), 9 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index c6c89960ba..b6fb47fdf8 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -197,6 +197,19 @@ fn test_type_string_ascii_max() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_string_ascii_40() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(40), + ))); + let actual = TypeSignature::STRING_ASCII_40; + + assert_eq!(expected, actual); + assert_eq!(44, actual.size().unwrap(), "size should be 44"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index b3b0549d41..0641d2f018 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -288,9 +288,6 @@ lazy_static! { }; } -pub const ASCII_40: TypeSignature = SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength(40)), -)); pub const UTF8_40: TypeSignature = SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( StringUTF8Length(40), ))); @@ -868,6 +865,8 @@ impl TypeSignature { pub const STRING_ASCII_MIN: TypeSignature = Self::type_string_ascii::<1>(); /// String ASCII type with maximum size. Depends on [`MAX_VALUE_SIZE`]. pub const STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::(); + /// String ASCII type with size 40. + pub const STRING_ASCII_40: TypeSignature = Self::type_string_ascii::<40>(); /// Creates a buffer type with a given size known at compile time. /// diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 5add4db193..32d03104c8 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -26,7 +26,7 @@ use crate::vm::costs::{analysis_typecheck_cost, runtime_cost, CostErrors, CostTr use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::signatures::{ - CallableSubtype, FunctionArgSignature, FunctionReturnsSignature, SequenceSubtype, ASCII_40, + CallableSubtype, FunctionArgSignature, FunctionReturnsSignature, SequenceSubtype, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; use crate::vm::types::{ @@ -947,7 +947,7 @@ impl TypedNativeFunction { IntToAscii => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![TypeSignature::IntType, TypeSignature::UIntType], // 40 is the longest string one can get from int->string conversion. - ASCII_40, + TypeSignature::STRING_ASCII_40, ))), IntToUtf8 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![TypeSignature::IntType, TypeSignature::UIntType], diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 68e995bd7d..2fa54647e4 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2807,7 +2807,7 @@ mod test { BurnStateDB, ClarityDatabase, HeadersDB, MemoryBackingStore, STXBalance, }; use crate::vm::docs::get_output_type_string; - use crate::vm::types::signatures::{FunctionArgSignature, FunctionReturnsSignature, ASCII_40}; + use crate::vm::types::signatures::{FunctionArgSignature, FunctionReturnsSignature}; use crate::vm::types::{ FunctionType, PrincipalData, QualifiedContractIdentifier, TupleData, TypeSignature, }; @@ -3344,7 +3344,10 @@ mod test { function_type = FunctionType::Binary( FunctionArgSignature::Single(TypeSignature::IntType), - FunctionArgSignature::Union(vec![ASCII_40, TypeSignature::IntType]), + FunctionArgSignature::Union(vec![ + TypeSignature::STRING_ASCII_40, + TypeSignature::IntType, + ]), ret.clone(), ); result = get_input_type_string(&function_type); @@ -3488,7 +3491,7 @@ mod test { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::PrincipalType, - ASCII_40, + TypeSignature::STRING_ASCII_40, ]), FunctionReturnsSignature::TypeOfArgAtPosition(1), ); diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index d60f20078c..75274c19e1 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,7 +19,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, MAX_TO_ASCII_BUFFER_LEN, + StringUTF8Length, TupleTypeSignature, TypeSignature, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, }; pub use clarity_types::types::Value; From 649af14227d7cfdae0f3691dcb0577304fe91312 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 11:43:09 +0200 Subject: [PATCH 25/86] refactor: avoid TryFrom code duplication with StringUTF8Length::try_from_i128, #6467 --- clarity-types/src/tests/types/signatures.rs | 87 +++++++++++- clarity-types/src/types/signatures.rs | 143 ++++++++++++-------- 2 files changed, 170 insertions(+), 60 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index b6fb47fdf8..9bd4e7692a 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -19,7 +19,7 @@ use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ BufferLength, MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, - TraitIdentifier, TupleTypeSignature, + StringUTF8Length, TraitIdentifier, TupleTypeSignature, }; #[test] @@ -210,6 +210,91 @@ fn test_type_string_ascii_40() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_string_utf8_length_try_from_u32_trait() { + let string = StringUTF8Length::try_from(0_u32).unwrap(); + assert_eq!(0, string.get_value()); + + let string = StringUTF8Length::try_from(1_u32).unwrap(); + assert_eq!(1, string.get_value()); + + let string = StringUTF8Length::try_from(MAX_VALUE_SIZE / 4).unwrap(); + assert_eq!(MAX_VALUE_SIZE / 4, string.get_value()); + + let err = StringUTF8Length::try_from((MAX_VALUE_SIZE / 4) + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = StringUTF8Length::try_from((MAX_VALUE_SIZE / 4) + 4).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_string_utf8_length_try_from_usize_trait() { + let string = StringUTF8Length::try_from(0_usize).unwrap(); + assert_eq!(0, string.get_value()); + + let string = StringUTF8Length::try_from(1_usize).unwrap(); + assert_eq!(1, string.get_value()); + + let string = StringUTF8Length::try_from(MAX_VALUE_SIZE as usize / 4).unwrap(); + assert_eq!(MAX_VALUE_SIZE / 4, string.get_value()); + + let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as usize / 4) + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as usize / 4) + 4).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_string_utf8_length_try_from_i128_trait() { + let string = StringUTF8Length::try_from(0_i128).unwrap(); + assert_eq!(0, string.get_value()); + + let string = StringUTF8Length::try_from(1_i128).unwrap(); + assert_eq!(1, string.get_value()); + + let string = StringUTF8Length::try_from(MAX_VALUE_SIZE as i128 / 4).unwrap(); + assert_eq!(MAX_VALUE_SIZE / 4, string.get_value()); + + let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as i128 / 4) + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as i128 / 4) + 4).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = StringUTF8Length::try_from(-1_i128).unwrap_err(); + assert_eq!(CheckErrors::ValueOutOfBounds, err); +} + +/* +#[test] +fn test_string_utf8_length_try_from_usize_trait() { + let buffer = BufferLength::try_from(0_usize).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE as usize).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE as usize + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_buffer_length_try_from_i128_trait() { + let buffer = BufferLength::try_from(0_i128).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE as i128).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE as i128 + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = BufferLength::try_from(-1_i128).unwrap_err(); + assert_eq!(CheckErrors::ValueOutOfBounds, err); +} +*/ #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 0641d2f018..145ddb6a67 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -122,7 +122,7 @@ impl BufferLength { /// /// This function is primarily intended for internal runtime use, /// and serves as the central place for all integer validation logic. - fn try_from_i128(data: i128) -> Result { + fn try_from_i128(data: i128) -> Result { if data > (MAX_VALUE_SIZE as i128) { Err(CheckErrors::ValueTooLarge) } else if data < 0 { @@ -185,6 +185,74 @@ impl TryFrom for BufferLength { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StringUTF8Length(u32); +impl StringUTF8Length { + /// Attempts to create a [`StringUTF8Length`] from a [`i128`] as a [`Result`]. + /// + /// This function is primarily intended for internal runtime use, + /// and serves as the central place for all integer validation logic. + fn try_from_i128(data: i128) -> Result { + let len = data + .checked_mul(4) + .ok_or_else(|| CheckErrors::ValueTooLarge)?; + if len > (MAX_VALUE_SIZE as i128) { + Err(CheckErrors::ValueTooLarge) + } else if data < 0 { + Err(CheckErrors::ValueOutOfBounds) + } else { + Ok(StringUTF8Length(data as u32)) + } + } +} + +/// Test-only utilities for [`StringUTF8Length`]. +#[cfg(test)] +impl StringUTF8Length { + /// Allow to create a [`StringUTF8Length`] in unsafe way, + /// allowing direct write-access to its internal state. + pub fn new_unsafe(value: u32) -> Self { + Self(value) + } + + /// Returns the underlying [`u32`] value of this [`StringUTF8Length`]. + /// This to have an easy read-access to its internal state. + pub fn get_value(&self) -> u32 { + self.0 + } +} + +impl From<&StringUTF8Length> for u32 { + fn from(v: &StringUTF8Length) -> u32 { + v.0 + } +} + +impl From for u32 { + fn from(v: StringUTF8Length) -> u32 { + v.0 + } +} + +impl TryFrom for StringUTF8Length { + type Error = CheckErrors; + fn try_from(data: u32) -> Result { + Self::try_from(data as usize) + } +} + +impl TryFrom for StringUTF8Length { + type Error = CheckErrors; + fn try_from(data: usize) -> Result { + Self::try_from(data as i128) + } +} + +impl TryFrom for StringUTF8Length { + type Error = CheckErrors; + fn try_from(data: i128) -> Result { + Self::try_from_i128(data) + } +} + // INVARIANTS enforced by the Type Signatures. // 1. A TypeSignature constructor will always fail rather than construct a // type signature for a too large or invalid type. This is why any variable length @@ -310,62 +378,6 @@ impl From for TypeSignature { } } -impl From<&StringUTF8Length> for u32 { - fn from(v: &StringUTF8Length) -> u32 { - v.0 - } -} - -impl From for u32 { - fn from(v: StringUTF8Length) -> u32 { - v.0 - } -} - -impl TryFrom for StringUTF8Length { - type Error = CheckErrors; - fn try_from(data: u32) -> Result { - let len = data - .checked_mul(4) - .ok_or_else(|| CheckErrors::ValueTooLarge)?; - if len > MAX_VALUE_SIZE { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(StringUTF8Length(data)) - } - } -} - -impl TryFrom for StringUTF8Length { - type Error = CheckErrors; - fn try_from(data: usize) -> Result { - let len = data - .checked_mul(4) - .ok_or_else(|| CheckErrors::ValueTooLarge)?; - if len > (MAX_VALUE_SIZE as usize) { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(StringUTF8Length(data as u32)) - } - } -} - -impl TryFrom for StringUTF8Length { - type Error = CheckErrors; - fn try_from(data: i128) -> Result { - let len = data - .checked_mul(4) - .ok_or_else(|| CheckErrors::ValueTooLarge)?; - if len > (MAX_VALUE_SIZE as i128) { - Err(CheckErrors::ValueTooLarge) - } else if data < 0 { - Err(CheckErrors::ValueOutOfBounds) - } else { - Ok(StringUTF8Length(data as u32)) - } - } -} - impl ListTypeData { pub fn new_list(entry_type: TypeSignature, max_len: u32) -> Result { let would_be_depth = 1 + entry_type.depth(); @@ -868,6 +880,9 @@ impl TypeSignature { /// String ASCII type with size 40. pub const STRING_ASCII_40: TypeSignature = Self::type_string_ascii::<40>(); + /// String UTF8 type with minimum size (`1`). + //pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8::<1>(); + /// Creates a buffer type with a given size known at compile time. /// /// This function is intended for defining constant buffer type @@ -880,14 +895,24 @@ impl TypeSignature { /// Creates a string ASCII type with a given size known at compile time. /// - /// This function is intended for defining constant string type + /// This function is intended for defining constant ASCII type /// aliases (e.g., [`TypeSignature::STRING_ASCII_MIN`]) without repeating logic. const fn type_string_ascii() -> Self { SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), ))) } - + /* + /// Creates a string UTF8 type with a given size known at compile time. + /// + /// This function is intended for defining constant UFT8 type + /// aliases (e.g., [`TypeSignature::STRING_UTF8_MIN`]) without repeating logic. + const fn type_string_utf8() -> Self { + SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), + ))) + } + */ pub fn min_string_utf8() -> Result { Ok(SequenceType(SequenceSubtype::StringType( StringSubtype::UTF8(1_u32.try_into().map_err(|_| { From 7ebbea78d26fca6582eb4234cec2cec730da44bc Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 12:57:52 +0200 Subject: [PATCH 26/86] chore: ported min_string_utf8() to STRING_UTF8_MIN, #6467 --- clarity-types/src/tests/types/signatures.rs | 46 ++++++----------- clarity-types/src/types/mod.rs | 4 +- clarity-types/src/types/signatures.rs | 49 ++++++++++++------- .../type_checker/v2_05/natives/sequences.rs | 2 +- .../analysis/type_checker/v2_05/tests/mod.rs | 4 +- .../type_checker/v2_1/natives/sequences.rs | 2 +- .../analysis/type_checker/v2_1/tests/mod.rs | 8 +-- clarity/src/vm/tests/sequences.rs | 2 +- 8 files changed, 55 insertions(+), 62 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 9bd4e7692a..34e2bea683 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -267,34 +267,19 @@ fn test_string_utf8_length_try_from_i128_trait() { assert_eq!(CheckErrors::ValueOutOfBounds, err); } -/* #[test] -fn test_string_utf8_length_try_from_usize_trait() { - let buffer = BufferLength::try_from(0_usize).unwrap(); - assert_eq!(0, buffer.get_value()); - - let buffer = BufferLength::try_from(MAX_VALUE_SIZE as usize).unwrap(); - assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); +fn test_type_string_utf8_min() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::new_unsafe(1), + ))); + let actual = TypeSignature::STRING_UTF8_MIN; - let err = BufferLength::try_from(MAX_VALUE_SIZE as usize + 1).unwrap_err(); - assert_eq!(CheckErrors::ValueTooLarge, err); + assert_eq!(expected, actual); + assert_eq!(8, actual.size().unwrap(), "size should be 8"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); } -#[test] -fn test_buffer_length_try_from_i128_trait() { - let buffer = BufferLength::try_from(0_i128).unwrap(); - assert_eq!(0, buffer.get_value()); - - let buffer = BufferLength::try_from(MAX_VALUE_SIZE as i128).unwrap(); - assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); - - let err = BufferLength::try_from(MAX_VALUE_SIZE as i128 + 1).unwrap_err(); - assert_eq!(CheckErrors::ValueTooLarge, err); - - let err = BufferLength::try_from(-1_i128).unwrap_err(); - assert_eq!(CheckErrors::ValueOutOfBounds, err); -} -*/ #[test] fn test_least_supertype() { let callables = [ @@ -565,7 +550,7 @@ fn test_least_supertype() { ), ( ( - TypeSignature::min_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MIN, TypeSignature::max_string_utf8().unwrap(), ), TypeSignature::max_string_utf8().unwrap(), @@ -700,13 +685,10 @@ fn test_least_supertype() { TypeSignature::list_of(TypeSignature::IntType, 42).unwrap(), ), ( - TypeSignature::min_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MIN, TypeSignature::bound_string_ascii_type(17).unwrap(), ), - ( - TypeSignature::min_string_utf8().unwrap(), - TypeSignature::BUFFER_MIN, - ), + (TypeSignature::STRING_UTF8_MIN, TypeSignature::BUFFER_MIN), ( TypeSignature::TupleType( TupleTypeSignature::try_from(vec![("a".into(), TypeSignature::IntType)]).unwrap(), @@ -717,7 +699,7 @@ fn test_least_supertype() { ), ( TypeSignature::new_option(TypeSignature::IntType).unwrap(), - TypeSignature::new_option(TypeSignature::min_string_utf8().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::STRING_UTF8_MIN).unwrap(), ), ( TypeSignature::new_response(TypeSignature::IntType, TypeSignature::BoolType).unwrap(), @@ -759,7 +741,7 @@ fn test_least_supertype() { ), ( TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), - TypeSignature::new_option(TypeSignature::min_string_utf8().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::STRING_UTF8_MIN).unwrap(), ), ( TypeSignature::new_response(TypeSignature::PrincipalType, list_union).unwrap(), diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index b8fce796a3..a63021a66e 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -344,7 +344,7 @@ impl SequenceData { SequenceData::Buffer(..) => TypeSignature::BUFFER_MIN.size(), SequenceData::List(data) => data.type_signature.get_list_item_type().size(), SequenceData::String(CharType::ASCII(..)) => TypeSignature::STRING_ASCII_MIN.size(), - SequenceData::String(CharType::UTF8(..)) => TypeSignature::min_string_utf8()?.size(), + SequenceData::String(CharType::UTF8(..)) => TypeSignature::STRING_UTF8_MIN.size(), }?; Ok(out) } @@ -504,7 +504,7 @@ impl SequenceData { } } else { Err(CheckErrors::TypeValueError( - Box::new(TypeSignature::min_string_utf8()?), + Box::new(TypeSignature::STRING_UTF8_MIN), Box::new(to_find), ) .into()) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 145ddb6a67..5616ab6e0b 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -186,6 +186,23 @@ impl TryFrom for BufferLength { pub struct StringUTF8Length(u32); impl StringUTF8Length { + /// Attempts to create a [`StringUTF8Length`] from a [`u32`] as an [`Option`]. + /// + /// This function is primarily intended for internal use when defining + /// `const` values, since it returns an [`Option`] that can be unwrapped + /// with [`Option::expect`] in a `const fn`. + const fn try_from_u32_as_opt(value: u32) -> Option { + let len = match value.checked_mul(4) { + Some(v) => v, + None => return None, + }; + if len > MAX_VALUE_SIZE { + None + } else { + Some(StringUTF8Length(value)) + } + } + /// Attempts to create a [`StringUTF8Length`] from a [`i128`] as a [`Result`]. /// /// This function is primarily intended for internal runtime use, @@ -303,7 +320,9 @@ impl SequenceSubtype { SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { Ok(TypeSignature::STRING_ASCII_MIN) } - SequenceSubtype::StringType(StringSubtype::UTF8(_)) => TypeSignature::min_string_utf8(), + SequenceSubtype::StringType(StringSubtype::UTF8(_)) => { + Ok(TypeSignature::STRING_UTF8_MIN) + } } } @@ -881,7 +900,7 @@ impl TypeSignature { pub const STRING_ASCII_40: TypeSignature = Self::type_string_ascii::<40>(); /// String UTF8 type with minimum size (`1`). - //pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8::<1>(); + pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8::<1>(); /// Creates a buffer type with a given size known at compile time. /// @@ -899,25 +918,17 @@ impl TypeSignature { /// aliases (e.g., [`TypeSignature::STRING_ASCII_MIN`]) without repeating logic. const fn type_string_ascii() -> Self { SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( - BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), + BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid ascii size!"), ))) } - /* - /// Creates a string UTF8 type with a given size known at compile time. - /// - /// This function is intended for defining constant UFT8 type - /// aliases (e.g., [`TypeSignature::STRING_UTF8_MIN`]) without repeating logic. - const fn type_string_utf8() -> Self { - SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( - BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), - ))) - } - */ - pub fn min_string_utf8() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(1_u32.try_into().map_err(|_| { - CheckErrors::Expects("FAIL: Min clarity value size is not realizable".into()) - })?), + + /// Creates a string UTF8 type with a given size known at compile time. + /// + /// This function is intended for defining constant UFT8 type + /// aliases (e.g., [`TypeSignature::STRING_UTF8_MIN`]) without repeating logic. + const fn type_string_utf8() -> Self { + SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::try_from_u32_as_opt(VALUE).expect("Invalid utf8 size!"), ))) } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index 9cab80d2b4..f0cc12b2a1 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -86,7 +86,7 @@ pub fn check_special_map( (TypeSignature::STRING_ASCII_MIN, ascii_data.into()) } StringType(UTF8(utf8_data)) => { - (TypeSignature::min_string_utf8()?, utf8_data.into()) + (TypeSignature::STRING_UTF8_MIN, utf8_data.into()) } }; min_args = min_args.min(len); diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index 19f6833f5d..f0cfe1068a 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -723,12 +723,12 @@ fn test_index_of() { Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( Box::new(TypeSignature::STRING_ASCII_MIN), - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), ), CheckErrors::CouldNotDetermineType, CheckErrors::CouldNotDetermineType, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 06bbebf286..2ce71a5d38 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -92,7 +92,7 @@ pub fn check_special_map( (TypeSignature::STRING_ASCII_MIN, ascii_data.into()) } StringType(UTF8(utf8_data)) => { - (TypeSignature::min_string_utf8()?, utf8_data.into()) + (TypeSignature::STRING_UTF8_MIN, utf8_data.into()) } }; min_args = min_args.min(len); diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 6b84210013..94eb174615 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -1093,12 +1093,12 @@ fn test_index_of() { Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( Box::new(TypeSignature::STRING_ASCII_MIN), - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), ), CheckErrors::TypeError( Box::new(TypeSignature::list_of(TypeSignature::IntType, 1).unwrap()), @@ -1114,12 +1114,12 @@ fn test_index_of() { Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrors::TypeError( Box::new(TypeSignature::STRING_ASCII_MIN), - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), ), CheckErrors::CouldNotDetermineType, CheckErrors::CouldNotDetermineType, diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index efabf0f37d..85675f0ad5 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -118,7 +118,7 @@ fn test_index_of() { Box::new(execute("\"a\"").unwrap().unwrap()), ), CheckErrors::TypeValueError( - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), Box::new(execute("\"a\"").unwrap().unwrap()), ), CheckErrors::TypeValueError( From b6316d1adb66f370c8ffe56eb1a253faca32801e Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 13:27:36 +0200 Subject: [PATCH 27/86] chore: ported max_string_utf8() to STRING_UTF8_MAX, #6467 --- clarity-types/src/tests/types/signatures.rs | 66 ++++++++++++------- clarity-types/src/types/mod.rs | 2 +- clarity-types/src/types/signatures.rs | 35 +++------- .../src/vm/analysis/type_checker/v2_1/mod.rs | 2 +- .../analysis/type_checker/v2_1/natives/mod.rs | 6 +- .../type_checker/v2_1/tests/contracts.rs | 2 +- .../analysis/type_checker/v2_1/tests/mod.rs | 8 +-- clarity/src/vm/functions/arithmetic.rs | 2 +- clarity/src/vm/functions/conversions.rs | 4 +- clarity/src/vm/tests/conversions.rs | 4 +- clarity/src/vm/tests/simple_apply_eval.rs | 8 +-- 11 files changed, 72 insertions(+), 67 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 34e2bea683..af5333ac77 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -18,7 +18,7 @@ use crate::errors::CheckErrors; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - BufferLength, MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, + BufferLength, MAX_VALUE_SIZE, MAX_UTF8_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, TraitIdentifier, TupleTypeSignature, }; @@ -27,6 +27,11 @@ fn test_max_value_size() { assert_eq!(1024 * 1024, MAX_VALUE_SIZE); } +#[test] +fn test_max_utf8_value_size() { + assert_eq!(MAX_VALUE_SIZE / 4, MAX_UTF8_VALUE_SIZE); +} + #[test] fn test_buffer_length_try_from_u32_trait() { let buffer = BufferLength::try_from(0_u32).unwrap(); @@ -218,13 +223,13 @@ fn test_string_utf8_length_try_from_u32_trait() { let string = StringUTF8Length::try_from(1_u32).unwrap(); assert_eq!(1, string.get_value()); - let string = StringUTF8Length::try_from(MAX_VALUE_SIZE / 4).unwrap(); - assert_eq!(MAX_VALUE_SIZE / 4, string.get_value()); + let string = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE).unwrap(); + assert_eq!(MAX_UTF8_VALUE_SIZE, string.get_value()); - let err = StringUTF8Length::try_from((MAX_VALUE_SIZE / 4) + 1).unwrap_err(); + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 1).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); - let err = StringUTF8Length::try_from((MAX_VALUE_SIZE / 4) + 4).unwrap_err(); + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 4).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); } @@ -236,13 +241,13 @@ fn test_string_utf8_length_try_from_usize_trait() { let string = StringUTF8Length::try_from(1_usize).unwrap(); assert_eq!(1, string.get_value()); - let string = StringUTF8Length::try_from(MAX_VALUE_SIZE as usize / 4).unwrap(); - assert_eq!(MAX_VALUE_SIZE / 4, string.get_value()); + let string = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as usize).unwrap(); + assert_eq!(MAX_UTF8_VALUE_SIZE, string.get_value()); - let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as usize / 4) + 1).unwrap_err(); + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as usize + 1).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); - let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as usize / 4) + 4).unwrap_err(); + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as usize + 4).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); } @@ -254,13 +259,13 @@ fn test_string_utf8_length_try_from_i128_trait() { let string = StringUTF8Length::try_from(1_i128).unwrap(); assert_eq!(1, string.get_value()); - let string = StringUTF8Length::try_from(MAX_VALUE_SIZE as i128 / 4).unwrap(); - assert_eq!(MAX_VALUE_SIZE / 4, string.get_value()); + let string = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as i128).unwrap(); + assert_eq!(MAX_UTF8_VALUE_SIZE, string.get_value()); - let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as i128 / 4) + 1).unwrap_err(); + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 1).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); - let err = StringUTF8Length::try_from((MAX_VALUE_SIZE as i128 / 4) + 4).unwrap_err(); + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 4).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); let err = StringUTF8Length::try_from(-1_i128).unwrap_err(); @@ -280,6 +285,24 @@ fn test_type_string_utf8_min() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_string_utf8_max() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::new_unsafe(MAX_UTF8_VALUE_SIZE), + ))); + let actual = TypeSignature::STRING_UTF8_MAX; + + assert_eq!(expected, actual); + assert_eq!(TypeSignature::STRING_UTF8_MAX, actual); + assert_eq!( + MAX_VALUE_SIZE + 4, + actual.size().unwrap(), + "size should be 1048580" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ @@ -344,11 +367,8 @@ fn test_least_supertype() { TypeSignature::bound_string_ascii_type(17).unwrap(), ), ( - ( - TypeSignature::NoType, - TypeSignature::max_string_utf8().unwrap(), - ), - TypeSignature::max_string_utf8().unwrap(), + (TypeSignature::NoType, TypeSignature::STRING_UTF8_MAX), + TypeSignature::STRING_UTF8_MAX, ), ( (TypeSignature::NoType, TypeSignature::PrincipalType), @@ -446,10 +466,10 @@ fn test_least_supertype() { ), ( ( - TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, + TypeSignature::STRING_UTF8_MAX, ), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ), ( (TypeSignature::PrincipalType, TypeSignature::PrincipalType), @@ -551,9 +571,9 @@ fn test_least_supertype() { ( ( TypeSignature::STRING_UTF8_MIN, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ), ( ( diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index a63021a66e..cd357b1f17 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -38,11 +38,11 @@ pub use self::signatures::{ }; use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; use crate::representations::{ClarityName, ContractName, SymbolicExpression}; -// use crate::vm::ClarityVersion; pub const MAX_VALUE_SIZE: u32 = 1024 * 1024; // 1MB pub const BOUND_VALUE_SERIALIZATION_BYTES: u32 = MAX_VALUE_SIZE * 2; pub const BOUND_VALUE_SERIALIZATION_HEX: u32 = BOUND_VALUE_SERIALIZATION_BYTES * 2; +pub const MAX_UTF8_VALUE_SIZE: u32 = MAX_VALUE_SIZE / 4; pub const MAX_TYPE_DEPTH: u8 = 32; // this is the charged size for wrapped values, i.e., response or optionals diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 5616ab6e0b..bf417dca53 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -26,9 +26,9 @@ use stacks_common::types::StacksEpochId; use crate::errors::CheckErrors; use crate::representations::{CONTRACT_MAX_NAME_LENGTH, ClarityName, ContractName}; use crate::types::{ - CharType, MAX_TYPE_DEPTH, MAX_VALUE_SIZE, PrincipalData, QualifiedContractIdentifier, - SequenceData, SequencedValue, StandardPrincipalData, TraitIdentifier, Value, - WRAPPER_VALUE_SIZE, + CharType, MAX_TYPE_DEPTH, MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, PrincipalData, + QualifiedContractIdentifier, SequenceData, SequencedValue, StandardPrincipalData, + TraitIdentifier, Value, WRAPPER_VALUE_SIZE, }; #[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Serialize, Deserialize, Hash)] @@ -192,11 +192,7 @@ impl StringUTF8Length { /// `const` values, since it returns an [`Option`] that can be unwrapped /// with [`Option::expect`] in a `const fn`. const fn try_from_u32_as_opt(value: u32) -> Option { - let len = match value.checked_mul(4) { - Some(v) => v, - None => return None, - }; - if len > MAX_VALUE_SIZE { + if value > MAX_UTF8_VALUE_SIZE { None } else { Some(StringUTF8Length(value)) @@ -207,16 +203,13 @@ impl StringUTF8Length { /// /// This function is primarily intended for internal runtime use, /// and serves as the central place for all integer validation logic. - fn try_from_i128(data: i128) -> Result { - let len = data - .checked_mul(4) - .ok_or_else(|| CheckErrors::ValueTooLarge)?; - if len > (MAX_VALUE_SIZE as i128) { + fn try_from_i128(value: i128) -> Result { + if value > MAX_UTF8_VALUE_SIZE as i128 { Err(CheckErrors::ValueTooLarge) - } else if data < 0 { + } else if value < 0 { Err(CheckErrors::ValueOutOfBounds) } else { - Ok(StringUTF8Length(data as u32)) + Ok(StringUTF8Length(value as u32)) } } } @@ -901,6 +894,8 @@ impl TypeSignature { /// String UTF8 type with minimum size (`1`). pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8::<1>(); + /// String UTF8 type with maximum size. Depends on [`MAX_UTF8_VALUE_SIZE`]. + pub const STRING_UTF8_MAX: TypeSignature = Self::type_string_utf8::(); /// Creates a buffer type with a given size known at compile time. /// @@ -932,16 +927,6 @@ impl TypeSignature { ))) } - pub fn max_string_utf8() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(StringUTF8Length::try_from(MAX_VALUE_SIZE / 4).map_err(|_| { - CheckErrors::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in UTF8 Type".into(), - ) - })?), - ))) - } - pub fn contract_name_string_ascii_type() -> Result { TypeSignature::bound_string_ascii_type(CONTRACT_MAX_NAME_LENGTH.try_into().map_err( |_| CheckErrors::Expects("FAIL: contract name max length exceeds u32 space".into()), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 4debaf2563..fa1656601d 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -408,7 +408,7 @@ impl FunctionType { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_UTF8_MAX, TypeSignature::BUFFER_MAX, ], Box::new(first.clone()), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 32d03104c8..25355a9933 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -933,14 +933,14 @@ impl TypedNativeFunction { StringToInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_UTF8_MAX, ], TypeSignature::OptionalType(Box::new(TypeSignature::IntType)), ))), StringToUInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_UTF8_MAX, ], TypeSignature::OptionalType(Box::new(TypeSignature::UIntType)), ))), @@ -1201,7 +1201,7 @@ impl TypedNativeFunction { TypeSignature::BoolType, TypeSignature::PrincipalType, TO_ASCII_MAX_BUFF.clone(), - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_UTF8_MAX, ], TypeSignature::new_response( TO_ASCII_RESPONSE_STRING.clone(), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 82dd67ce0e..0a002d41ac 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -3512,7 +3512,7 @@ fn test_to_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) TypeSignature::BoolType, TypeSignature::PrincipalType, TO_ASCII_MAX_BUFF.clone(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ]; let test_cases = [ ( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 94eb174615..461aacc953 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -2232,7 +2232,7 @@ fn test_string_to_ints() { CheckErrors::UnionTypeError( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ], Box::new(SequenceType(BufferType( BufferLength::try_from(17_u32).unwrap(), @@ -2241,7 +2241,7 @@ fn test_string_to_ints() { CheckErrors::UnionTypeError( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ], Box::new(IntType), ), @@ -2250,7 +2250,7 @@ fn test_string_to_ints() { CheckErrors::UnionTypeError( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ], Box::new(SequenceType(BufferType( BufferLength::try_from(17_u32).unwrap(), @@ -2259,7 +2259,7 @@ fn test_string_to_ints() { CheckErrors::UnionTypeError( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ], Box::new(IntType), ), diff --git a/clarity/src/vm/functions/arithmetic.rs b/clarity/src/vm/functions/arithmetic.rs index df7e4ebabd..fcbbc5ab48 100644 --- a/clarity/src/vm/functions/arithmetic.rs +++ b/clarity/src/vm/functions/arithmetic.rs @@ -124,7 +124,7 @@ macro_rules! type_force_binary_comparison_v2 { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_UTF8_MAX, TypeSignature::BUFFER_MAX, ], Box::new(x), diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index c5f883b0d1..01ddc89aa0 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -152,7 +152,7 @@ pub fn native_string_to_int_generic( _ => Err(CheckErrors::UnionTypeValueError( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_UTF8_MAX, ], Box::new(value), ) @@ -278,7 +278,7 @@ pub fn special_to_ascii( TypeSignature::BoolType, TypeSignature::PrincipalType, TO_ASCII_MAX_BUFF.clone(), - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_UTF8_MAX, ], Box::new(value), ) diff --git a/clarity/src/vm/tests/conversions.rs b/clarity/src/vm/tests/conversions.rs index e8a07fe4ea..565f4e09dc 100644 --- a/clarity/src/vm/tests/conversions.rs +++ b/clarity/src/vm/tests/conversions.rs @@ -315,7 +315,7 @@ fn test_simple_string_to_int() { CheckErrors::UnionTypeValueError( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ], Box::new(Value::Int(1)) ) @@ -380,7 +380,7 @@ fn test_simple_string_to_uint() { CheckErrors::UnionTypeValueError( vec![ TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ], Box::new(Value::Int(1)) ) diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 471ed1721c..ac584a98e8 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -1015,7 +1015,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, TypeSignature::BUFFER_MAX, ], Box::new(Value::Int(0)), @@ -1026,7 +1026,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, TypeSignature::BUFFER_MAX, ], Box::new(Value::Int(0)), @@ -1049,7 +1049,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, TypeSignature::BUFFER_MAX, ], Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( @@ -1064,7 +1064,7 @@ fn test_sequence_comparisons_mismatched_types() { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::STRING_ASCII_MAX, - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, TypeSignature::BUFFER_MAX, ], Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( From f6e870b45c38611888b712829f5e3afafb2d174d Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 13:35:01 +0200 Subject: [PATCH 28/86] chore: ported UTF8_40 to STRING_UTF8_40, #6467 --- clarity-types/src/tests/types/signatures.rs | 28 +++++++++++-------- clarity-types/src/types/signatures.rs | 12 ++++---- .../analysis/type_checker/v2_1/natives/mod.rs | 4 +-- clarity/src/vm/types/signatures.rs | 2 +- 4 files changed, 24 insertions(+), 22 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index af5333ac77..da8c0c7041 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -18,8 +18,8 @@ use crate::errors::CheckErrors; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - BufferLength, MAX_VALUE_SIZE, MAX_UTF8_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, - StringUTF8Length, TraitIdentifier, TupleTypeSignature, + BufferLength, MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, QualifiedContractIdentifier, + SequenceSubtype, StringSubtype, StringUTF8Length, TraitIdentifier, TupleTypeSignature, }; #[test] @@ -228,9 +228,6 @@ fn test_string_utf8_length_try_from_u32_trait() { let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 1).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); - - let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 4).unwrap_err(); - assert_eq!(CheckErrors::ValueTooLarge, err); } #[test] @@ -246,9 +243,6 @@ fn test_string_utf8_length_try_from_usize_trait() { let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as usize + 1).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); - - let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as usize + 4).unwrap_err(); - assert_eq!(CheckErrors::ValueTooLarge, err); } #[test] @@ -262,10 +256,7 @@ fn test_string_utf8_length_try_from_i128_trait() { let string = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as i128).unwrap(); assert_eq!(MAX_UTF8_VALUE_SIZE, string.get_value()); - let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 1).unwrap_err(); - assert_eq!(CheckErrors::ValueTooLarge, err); - - let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 4).unwrap_err(); + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as i128 + 1).unwrap_err(); assert_eq!(CheckErrors::ValueTooLarge, err); let err = StringUTF8Length::try_from(-1_i128).unwrap_err(); @@ -303,6 +294,19 @@ fn test_type_string_utf8_max() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_string_utf8_40() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::new_unsafe(40), + ))); + let actual = TypeSignature::STRING_UTF8_40; + + assert_eq!(expected, actual); + assert_eq!(164, actual.size().unwrap(), "size should be 164"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index bf417dca53..6a6457a387 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -368,10 +368,6 @@ lazy_static! { }; } -pub const UTF8_40: TypeSignature = SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( - StringUTF8Length(40), -))); - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ListTypeData { max_len: u32, @@ -870,7 +866,7 @@ impl TupleTypeSignature { impl TypeSignature { /// Buffer type with minimum size. Alias for [`TypeSignature::BUFFER_1`]. pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; - /// Buffer type with maximum size. Depends on [`MAX_VALUE_SIZE`]. + /// Buffer type with maximum size ([`MAX_VALUE_SIZE`]). pub const BUFFER_MAX: TypeSignature = Self::type_buffer_of_size::(); /// Buffer type with size 1. pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); @@ -887,15 +883,17 @@ impl TypeSignature { /// String ASCII type with minimum size (`1`). pub const STRING_ASCII_MIN: TypeSignature = Self::type_string_ascii::<1>(); - /// String ASCII type with maximum size. Depends on [`MAX_VALUE_SIZE`]. + /// String ASCII type with maximum size ([`MAX_VALUE_SIZE`]). pub const STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::(); /// String ASCII type with size 40. pub const STRING_ASCII_40: TypeSignature = Self::type_string_ascii::<40>(); /// String UTF8 type with minimum size (`1`). pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8::<1>(); - /// String UTF8 type with maximum size. Depends on [`MAX_UTF8_VALUE_SIZE`]. + /// String UTF8 type with maximum size ([`MAX_UTF8_VALUE_SIZE`]). pub const STRING_UTF8_MAX: TypeSignature = Self::type_string_utf8::(); + /// String UTF8 type with size 40. + pub const STRING_UTF8_40: TypeSignature = Self::type_string_utf8::<40>(); /// Creates a buffer type with a given size known at compile time. /// diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 25355a9933..72c37937f0 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -27,7 +27,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::signatures::{ CallableSubtype, FunctionArgSignature, FunctionReturnsSignature, SequenceSubtype, - TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, + TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, }; use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, @@ -952,7 +952,7 @@ impl TypedNativeFunction { IntToUtf8 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![TypeSignature::IntType, TypeSignature::UIntType], // 40 is the longest string one can get from int->string conversion. - UTF8_40, + TypeSignature::STRING_UTF8_40, ))), Not => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 75274c19e1..d75dcf835d 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -20,7 +20,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, TypeSignature, MAX_TO_ASCII_BUFFER_LEN, - TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, + TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; From bc8061c6be49efbc754d9e8452a987969594194f Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 14:18:45 +0200 Subject: [PATCH 29/86] chore: ported TO_ASCII_MAX_BUFF to TO_ASCII_BUFFER_MAX, #6467 --- clarity-types/src/tests/types/signatures.rs | 18 +++++++++++-- clarity-types/src/types/mod.rs | 9 +++++++ clarity-types/src/types/signatures.rs | 26 +++++-------------- .../analysis/type_checker/v2_1/natives/mod.rs | 4 +-- .../type_checker/v2_1/tests/contracts.rs | 4 +-- clarity/src/vm/functions/conversions.rs | 3 +-- clarity/src/vm/tests/conversions.rs | 2 +- clarity/src/vm/types/signatures.rs | 3 +-- 8 files changed, 39 insertions(+), 30 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index da8c0c7041..4a6ababfba 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -18,8 +18,9 @@ use crate::errors::CheckErrors; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - BufferLength, MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, QualifiedContractIdentifier, - SequenceSubtype, StringSubtype, StringUTF8Length, TraitIdentifier, TupleTypeSignature, + BufferLength, MAX_TO_ASCII_BUFFER_LEN, MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, + QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, TraitIdentifier, + TupleTypeSignature, }; #[test] @@ -307,6 +308,19 @@ fn test_type_string_utf8_40() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_buffer_for_to_ascii_call() { + let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType( + BufferLength::new_unsafe(MAX_TO_ASCII_BUFFER_LEN), + )); + let actual = TypeSignature::TO_ASCII_BUFFER_MAX; + + assert_eq!(expected, actual); + assert_eq!(524_288, actual.size().unwrap(), "size should be 524_288"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index cd357b1f17..b9cfc3685a 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -42,8 +42,17 @@ use crate::representations::{ClarityName, ContractName, SymbolicExpression}; pub const MAX_VALUE_SIZE: u32 = 1024 * 1024; // 1MB pub const BOUND_VALUE_SERIALIZATION_BYTES: u32 = MAX_VALUE_SIZE * 2; pub const BOUND_VALUE_SERIALIZATION_HEX: u32 = BOUND_VALUE_SERIALIZATION_BYTES * 2; + +/// Maximum length for UFT8 string. pub const MAX_UTF8_VALUE_SIZE: u32 = MAX_VALUE_SIZE / 4; +/// Maximum string length returned from `to-ascii?`. +/// 5 bytes reserved for embedding in response. +const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; +/// Maximum buffer length returned from `to-ascii?`. +/// 2 bytes reserved for "0x" prefix and 2 characters per byte. +pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; + pub const MAX_TYPE_DEPTH: u8 = 32; // this is the charged size for wrapped values, i.e., response or optionals pub const WRAPPER_VALUE_SIZE: u32 = 1; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 6a6457a387..45389e033a 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -26,9 +26,9 @@ use stacks_common::types::StacksEpochId; use crate::errors::CheckErrors; use crate::representations::{CONTRACT_MAX_NAME_LENGTH, ClarityName, ContractName}; use crate::types::{ - CharType, MAX_TYPE_DEPTH, MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, PrincipalData, - QualifiedContractIdentifier, SequenceData, SequencedValue, StandardPrincipalData, - TraitIdentifier, Value, WRAPPER_VALUE_SIZE, + CharType, MAX_TO_ASCII_BUFFER_LEN, MAX_TO_ASCII_RESULT_LEN, MAX_TYPE_DEPTH, + MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, PrincipalData, QualifiedContractIdentifier, SequenceData, + SequencedValue, StandardPrincipalData, TraitIdentifier, Value, WRAPPER_VALUE_SIZE, }; #[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Serialize, Deserialize, Hash)] @@ -341,23 +341,7 @@ use self::TypeSignature::{ ResponseType, SequenceType, TraitReferenceType, TupleType, UIntType, }; -/// Maximum string length returned from `to-ascii?`. -/// 5 bytes reserved for embedding in response. -const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; - -/// Maximum buffer length returned from `to-ascii?`. -/// 2 bytes reserved for "0x" prefix and 2 characters per byte. -pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; - lazy_static! { - /// Maximum-sized buffer allowed for `to-ascii?` call. - pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(MAX_TO_ASCII_BUFFER_LEN) - .expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; /// Maximum-length string returned from `to-ascii?` pub static ref TO_ASCII_RESPONSE_STRING: TypeSignature = { #[allow(clippy::expect_used)] @@ -895,6 +879,10 @@ impl TypeSignature { /// String UTF8 type with size 40. pub const STRING_UTF8_40: TypeSignature = Self::type_string_utf8::<40>(); + /// Maximum-sized ([`MAX_TO_ASCII_BUFFER_LEN`]) buffer allowed for `to-ascii?` call. + pub const TO_ASCII_BUFFER_MAX: TypeSignature = + Self::type_buffer_of_size::(); + /// Creates a buffer type with a given size known at compile time. /// /// This function is intended for defining constant buffer type diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 72c37937f0..7cb715380b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -27,7 +27,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::signatures::{ CallableSubtype, FunctionArgSignature, FunctionReturnsSignature, SequenceSubtype, - TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, + TO_ASCII_RESPONSE_STRING, }; use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, @@ -1200,7 +1200,7 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::BoolType, TypeSignature::PrincipalType, - TO_ASCII_MAX_BUFF.clone(), + TypeSignature::TO_ASCII_BUFFER_MAX, TypeSignature::STRING_UTF8_MAX, ], TypeSignature::new_response( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 0a002d41ac..03e78bbf7d 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -31,7 +31,7 @@ use crate::vm::ast::parse; use crate::vm::costs::LimitedCostTracker; use crate::vm::database::MemoryBackingStore; use crate::vm::tests::test_clarity_versions; -use crate::vm::types::signatures::{CallableSubtype, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING}; +use crate::vm::types::signatures::{CallableSubtype, TO_ASCII_RESPONSE_STRING}; use crate::vm::types::{ BufferLength, ListTypeData, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, TypeSignature, @@ -3511,7 +3511,7 @@ fn test_to_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) TypeSignature::UIntType, TypeSignature::BoolType, TypeSignature::PrincipalType, - TO_ASCII_MAX_BUFF.clone(), + TypeSignature::TO_ASCII_BUFFER_MAX, TypeSignature::STRING_UTF8_MAX, ]; let test_cases = [ diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 01ddc89aa0..12ceca6aec 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -22,7 +22,6 @@ use crate::vm::errors::{ check_argument_count, CheckErrors, InterpreterError, InterpreterResult as Result, }; use crate::vm::representations::SymbolicExpression; -use crate::vm::types::signatures::TO_ASCII_MAX_BUFF; use crate::vm::types::SequenceSubtype::BufferType; use crate::vm::types::TypeSignature::SequenceType; use crate::vm::types::{ @@ -277,7 +276,7 @@ pub fn special_to_ascii( TypeSignature::UIntType, TypeSignature::BoolType, TypeSignature::PrincipalType, - TO_ASCII_MAX_BUFF.clone(), + TypeSignature::TO_ASCII_BUFFER_MAX, TypeSignature::STRING_UTF8_MAX, ], Box::new(value), diff --git a/clarity/src/vm/tests/conversions.rs b/clarity/src/vm/tests/conversions.rs index 565f4e09dc..34381479b0 100644 --- a/clarity/src/vm/tests/conversions.rs +++ b/clarity/src/vm/tests/conversions.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use clarity_types::types::MAX_TO_ASCII_BUFFER_LEN; use stacks_common::types::StacksEpochId; pub use crate::vm::analysis::errors::CheckErrors; use crate::vm::tests::test_clarity_versions; -use crate::vm::types::signatures::MAX_TO_ASCII_BUFFER_LEN; use crate::vm::types::SequenceSubtype::BufferType; use crate::vm::types::TypeSignature::SequenceType; use crate::vm::types::{ diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index d75dcf835d..1cc90cca50 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,8 +19,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, MAX_TO_ASCII_BUFFER_LEN, - TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, + StringUTF8Length, TupleTypeSignature, TypeSignature, TO_ASCII_RESPONSE_STRING, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; From 01a680a62f6075f86ba11437afbb8e7b8a4f7cc1 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 14:39:38 +0200 Subject: [PATCH 30/86] chore: ported TO_ASCII_RESPONSE_STRING to TO_ASCII_STRING_ASCII_MAX, #6467 --- clarity-types/src/tests/types/signatures.rs | 45 ++++++++++++------- clarity-types/src/types/mod.rs | 2 +- clarity-types/src/types/signatures.rs | 16 ++----- .../analysis/type_checker/v2_1/natives/mod.rs | 3 +- .../type_checker/v2_1/tests/contracts.rs | 9 ++-- clarity/src/vm/types/signatures.rs | 2 +- 6 files changed, 43 insertions(+), 34 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 4a6ababfba..c5064038e6 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -18,19 +18,17 @@ use crate::errors::CheckErrors; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - BufferLength, MAX_TO_ASCII_BUFFER_LEN, MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, - QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, TraitIdentifier, - TupleTypeSignature, + BufferLength, MAX_TO_ASCII_BUFFER_LEN, MAX_TO_ASCII_RESULT_LEN, MAX_UTF8_VALUE_SIZE, + MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, + TraitIdentifier, TupleTypeSignature, }; #[test] -fn test_max_value_size() { - assert_eq!(1024 * 1024, MAX_VALUE_SIZE); -} - -#[test] -fn test_max_utf8_value_size() { - assert_eq!(MAX_VALUE_SIZE / 4, MAX_UTF8_VALUE_SIZE); +fn test_core_constants() { + assert_eq!(1_048_576, MAX_VALUE_SIZE); + assert_eq!(262_144, MAX_UTF8_VALUE_SIZE); + assert_eq!(1_048_571, MAX_TO_ASCII_RESULT_LEN); + assert_eq!(524_284, MAX_TO_ASCII_BUFFER_LEN); } #[test] @@ -93,7 +91,7 @@ fn test_type_buffer_max() { assert_eq!(expected, actual); assert_eq!( - MAX_VALUE_SIZE + 4, + 1_048_580, actual.size().unwrap(), "size should be 1_048_580" ); @@ -195,7 +193,7 @@ fn test_type_string_ascii_max() { assert_eq!(expected, actual); assert_eq!( - MAX_VALUE_SIZE + 4, + 1_048_580, actual.size().unwrap(), "size should be 1_048_580" ); @@ -287,9 +285,9 @@ fn test_type_string_utf8_max() { assert_eq!(expected, actual); assert_eq!(TypeSignature::STRING_UTF8_MAX, actual); assert_eq!( - MAX_VALUE_SIZE + 4, + 1_048_580, actual.size().unwrap(), - "size should be 1048580" + "size should be 1_048_580" ); assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); assert_eq!(1, actual.depth(), "depth should be 1"); @@ -309,7 +307,7 @@ fn test_type_string_utf8_40() { } #[test] -fn test_type_buffer_for_to_ascii_call() { +fn test_type_buffer_max_for_to_ascii_call() { let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType( BufferLength::new_unsafe(MAX_TO_ASCII_BUFFER_LEN), )); @@ -321,6 +319,23 @@ fn test_type_buffer_for_to_ascii_call() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_string_max_ascii_for_to_ascii_call() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(MAX_TO_ASCII_RESULT_LEN), + ))); + let actual = TypeSignature::TO_ASCII_STRING_ASCII_MAX; + + assert_eq!(expected, actual); + assert_eq!( + 1_048_575, + actual.size().unwrap(), + "size should be 1_048_575" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index b9cfc3685a..8910c44a98 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -48,7 +48,7 @@ pub const MAX_UTF8_VALUE_SIZE: u32 = MAX_VALUE_SIZE / 4; /// Maximum string length returned from `to-ascii?`. /// 5 bytes reserved for embedding in response. -const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; +pub const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; /// Maximum buffer length returned from `to-ascii?`. /// 2 bytes reserved for "0x" prefix and 2 characters per byte. pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 45389e033a..83c51cdc07 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -19,7 +19,6 @@ use std::hash::Hash; use std::sync::Arc; use std::{cmp, fmt}; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use stacks_common::types::StacksEpochId; @@ -341,17 +340,6 @@ use self::TypeSignature::{ ResponseType, SequenceType, TraitReferenceType, TupleType, UIntType, }; -lazy_static! { - /// Maximum-length string returned from `to-ascii?` - pub static ref TO_ASCII_RESPONSE_STRING: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength::try_from(MAX_TO_ASCII_RESULT_LEN) - .expect("BUG: Legal Clarity buffer length marked invalid")), - )) - }; -} - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ListTypeData { max_len: u32, @@ -883,6 +871,10 @@ impl TypeSignature { pub const TO_ASCII_BUFFER_MAX: TypeSignature = Self::type_buffer_of_size::(); + /// Maximum-sized ([`MAX_TO_ASCII_RESULT_LEN`]) string allowed for `to-ascii?` call. + pub const TO_ASCII_STRING_ASCII_MAX: TypeSignature = + Self::type_string_ascii::(); + /// Creates a buffer type with a given size known at compile time. /// /// This function is intended for defining constant buffer type diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 7cb715380b..ab65fc71c6 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -27,7 +27,6 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::signatures::{ CallableSubtype, FunctionArgSignature, FunctionReturnsSignature, SequenceSubtype, - TO_ASCII_RESPONSE_STRING, }; use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, @@ -1204,7 +1203,7 @@ impl TypedNativeFunction { TypeSignature::STRING_UTF8_MAX, ], TypeSignature::new_response( - TO_ASCII_RESPONSE_STRING.clone(), + TypeSignature::TO_ASCII_STRING_ASCII_MAX, TypeSignature::UIntType, ) .map_err(|_| { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 03e78bbf7d..78e52eab85 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -31,7 +31,7 @@ use crate::vm::ast::parse; use crate::vm::costs::LimitedCostTracker; use crate::vm::database::MemoryBackingStore; use crate::vm::tests::test_clarity_versions; -use crate::vm::types::signatures::{CallableSubtype, TO_ASCII_RESPONSE_STRING}; +use crate::vm::types::signatures::CallableSubtype; use crate::vm::types::{ BufferLength, ListTypeData, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, TypeSignature, @@ -3503,8 +3503,11 @@ fn test_contract_hash(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc #[apply(test_clarity_versions)] fn test_to_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let to_ascii_response_type = Some( - TypeSignature::new_response(TO_ASCII_RESPONSE_STRING.clone(), TypeSignature::UIntType) - .unwrap(), + TypeSignature::new_response( + TypeSignature::TO_ASCII_STRING_ASCII_MAX, + TypeSignature::UIntType, + ) + .unwrap(), ); let to_ascii_expected_types = vec![ TypeSignature::IntType, diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 1cc90cca50..70867cc5c6 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,7 +19,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TO_ASCII_RESPONSE_STRING, + StringUTF8Length, TupleTypeSignature, TypeSignature, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; From 7e7ef190d88bd06d2289d8ef4ed47e61cf32b093 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 22 Sep 2025 16:27:57 +0200 Subject: [PATCH 31/86] chore: rename type_buffer_of_size() to type_buffer(), #6467 --- clarity-types/src/types/signatures.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 83c51cdc07..b196faff2f 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -839,19 +839,19 @@ impl TypeSignature { /// Buffer type with minimum size. Alias for [`TypeSignature::BUFFER_1`]. pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; /// Buffer type with maximum size ([`MAX_VALUE_SIZE`]). - pub const BUFFER_MAX: TypeSignature = Self::type_buffer_of_size::(); + pub const BUFFER_MAX: TypeSignature = Self::type_buffer::(); /// Buffer type with size 1. - pub const BUFFER_1: TypeSignature = Self::type_buffer_of_size::<1>(); + pub const BUFFER_1: TypeSignature = Self::type_buffer::<1>(); /// Buffer type with size 20. - pub const BUFFER_20: TypeSignature = Self::type_buffer_of_size::<20>(); + pub const BUFFER_20: TypeSignature = Self::type_buffer::<20>(); /// Buffer type with size 32. - pub const BUFFER_32: TypeSignature = Self::type_buffer_of_size::<32>(); + pub const BUFFER_32: TypeSignature = Self::type_buffer::<32>(); /// Buffer type with size 33. - pub const BUFFER_33: TypeSignature = Self::type_buffer_of_size::<33>(); + pub const BUFFER_33: TypeSignature = Self::type_buffer::<33>(); /// Buffer type with size 64. - pub const BUFFER_64: TypeSignature = Self::type_buffer_of_size::<64>(); + pub const BUFFER_64: TypeSignature = Self::type_buffer::<64>(); /// Buffer type with size 65. - pub const BUFFER_65: TypeSignature = Self::type_buffer_of_size::<65>(); + pub const BUFFER_65: TypeSignature = Self::type_buffer::<65>(); /// String ASCII type with minimum size (`1`). pub const STRING_ASCII_MIN: TypeSignature = Self::type_string_ascii::<1>(); @@ -869,7 +869,7 @@ impl TypeSignature { /// Maximum-sized ([`MAX_TO_ASCII_BUFFER_LEN`]) buffer allowed for `to-ascii?` call. pub const TO_ASCII_BUFFER_MAX: TypeSignature = - Self::type_buffer_of_size::(); + Self::type_buffer::(); /// Maximum-sized ([`MAX_TO_ASCII_RESULT_LEN`]) string allowed for `to-ascii?` call. pub const TO_ASCII_STRING_ASCII_MAX: TypeSignature = @@ -879,7 +879,7 @@ impl TypeSignature { /// /// This function is intended for defining constant buffer type /// aliases (e.g., [`TypeSignature::BUFFER_1`]) without repeating logic. - const fn type_buffer_of_size() -> Self { + const fn type_buffer() -> Self { SequenceType(SequenceSubtype::BufferType( BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), )) From 339b3b38e13753da380ff9e15265f3fc5ba1fd60 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 23 Sep 2025 09:41:37 +0200 Subject: [PATCH 32/86] chore: ported contract_name_string_ascii_type() to CONTRACT_NAME_STRING_ASCII_MAX, #6467 --- clarity-types/src/tests/types/signatures.rs | 49 ++++++++++--------- clarity-types/src/types/signatures.rs | 26 ++++------ .../analysis/type_checker/v2_1/natives/mod.rs | 4 +- .../analysis/type_checker/v2_1/tests/mod.rs | 6 +-- clarity/src/vm/functions/principals.rs | 4 +- clarity/src/vm/tests/principals.rs | 2 +- 6 files changed, 44 insertions(+), 47 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index c5064038e6..8155591607 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -15,6 +15,7 @@ use std::collections::HashSet; use crate::errors::CheckErrors; +use crate::representations::CONTRACT_MAX_NAME_LENGTH; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ @@ -336,6 +337,19 @@ fn test_type_string_max_ascii_for_to_ascii_call() { assert_eq!(1, actual.depth(), "depth should be 1"); } +#[test] +fn test_type_string_max_ascii_for_contract_name() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(CONTRACT_MAX_NAME_LENGTH as u32), + ))); + let actual = TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX; + + assert_eq!(expected, actual); + assert_eq!(44, actual.size().unwrap(), "size should be 44"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ @@ -395,9 +409,9 @@ fn test_least_supertype() { ( ( TypeSignature::NoType, - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), ), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), ), ( (TypeSignature::NoType, TypeSignature::STRING_UTF8_MAX), @@ -492,10 +506,10 @@ fn test_least_supertype() { ), ( ( - TypeSignature::bound_string_ascii_type(17).unwrap(), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), + TypeSignature::bound_string_ascii_type(17), ), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), ), ( ( @@ -597,9 +611,9 @@ fn test_least_supertype() { ( ( TypeSignature::STRING_ASCII_MIN, - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), ), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), ), ( ( @@ -681,7 +695,7 @@ fn test_least_supertype() { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), )]) .unwrap(), ), @@ -689,7 +703,7 @@ fn test_least_supertype() { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), )]) .unwrap(), ), @@ -697,10 +711,10 @@ fn test_least_supertype() { ( ( TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17).unwrap()) + TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)) .unwrap(), ), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17).unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)).unwrap(), ), ( ( @@ -739,7 +753,7 @@ fn test_least_supertype() { ), ( TypeSignature::STRING_UTF8_MIN, - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::bound_string_ascii_type(17), ), (TypeSignature::STRING_UTF8_MIN, TypeSignature::BUFFER_MIN), ( @@ -819,14 +833,3 @@ fn test_least_supertype() { ); } } - -#[test] -fn test_type_signature_bound_string_ascii_type_returns_check_errors() { - let err = TypeSignature::bound_string_ascii_type(MAX_VALUE_SIZE + 1).unwrap_err(); - assert_eq!( - CheckErrors::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".to_string() - ), - err - ); -} diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index b196faff2f..0226fa65fb 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -868,13 +868,15 @@ impl TypeSignature { pub const STRING_UTF8_40: TypeSignature = Self::type_string_utf8::<40>(); /// Maximum-sized ([`MAX_TO_ASCII_BUFFER_LEN`]) buffer allowed for `to-ascii?` call. - pub const TO_ASCII_BUFFER_MAX: TypeSignature = - Self::type_buffer::(); - + pub const TO_ASCII_BUFFER_MAX: TypeSignature = Self::type_buffer::(); /// Maximum-sized ([`MAX_TO_ASCII_RESULT_LEN`]) string allowed for `to-ascii?` call. pub const TO_ASCII_STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::(); + /// Maximum-sized ([`CONTRACT_MAX_NAME_LENGTH`]) string allowed for `contract-name`. + pub const CONTRACT_NAME_STRING_ASCII_MAX: TypeSignature = + Self::type_string_ascii::<{ CONTRACT_MAX_NAME_LENGTH as u32 }>(); + /// Creates a buffer type with a given size known at compile time. /// /// This function is intended for defining constant buffer type @@ -905,19 +907,11 @@ impl TypeSignature { ))) } - pub fn contract_name_string_ascii_type() -> Result { - TypeSignature::bound_string_ascii_type(CONTRACT_MAX_NAME_LENGTH.try_into().map_err( - |_| CheckErrors::Expects("FAIL: contract name max length exceeds u32 space".into()), - )?) - } - - pub fn bound_string_ascii_type(max_len: u32) -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength::try_from(max_len).map_err(|_| { - CheckErrors::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".into(), - ) - })?), + /// Create a string ASCII type with a given len. + #[cfg(any(test, feature = "testing"))] + pub const fn bound_string_ascii_type(len: u32) -> Self { + SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::try_from_u32_as_opt(len).expect("Invalid ascii size!"), ))) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index ab65fc71c6..397d4eb323 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -680,7 +680,7 @@ fn check_principal_construct( checker.type_check_expects( &args[2], context, - &TypeSignature::contract_name_string_ascii_type()?, + &TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX, )?; } Ok(TypeSignature::new_response( @@ -1037,7 +1037,7 @@ impl TypedNativeFunction { ( "name".into(), TypeSignature::new_option( - TypeSignature::contract_name_string_ascii_type()?, + TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX, ) .map_err(|_| CheckErrors::Expects("Bad constructor".into()))?, ), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 461aacc953..e391264b8c 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -3436,15 +3436,15 @@ fn test_principal_construct() { ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320 "foooooooooooooooooooooooooooooooooooooooo")"#, CheckErrors::TypeError( - Box::new(TypeSignature::contract_name_string_ascii_type().unwrap()), - Box::new(TypeSignature::bound_string_ascii_type(41).unwrap()), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), + Box::new(TypeSignature::bound_string_ascii_type(41)), ), ), // bad argument type for `name` ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320 u123)"#, CheckErrors::TypeError( - Box::new(TypeSignature::contract_name_string_ascii_type().unwrap()), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(UIntType), ), ), diff --git a/clarity/src/vm/functions/principals.rs b/clarity/src/vm/functions/principals.rs index c510ffbb71..c2511881e7 100644 --- a/clarity/src/vm/functions/principals.rs +++ b/clarity/src/vm/functions/principals.rs @@ -285,7 +285,7 @@ pub fn special_principal_construct( Value::Sequence(SequenceData::String(CharType::ASCII(ascii_data))) => ascii_data, _ => { return Err(CheckErrors::TypeValueError( - Box::new(TypeSignature::contract_name_string_ascii_type()?), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(name), ) .into()) @@ -302,7 +302,7 @@ pub fn special_principal_construct( // if it's too long, then this should have been caught by the type-checker if name_bytes.data.len() > CONTRACT_MAX_NAME_LENGTH { return Err(CheckErrors::TypeValueError( - Box::new(TypeSignature::contract_name_string_ascii_type()?), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(Value::from(name_bytes)), ) .into()); diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index cb45e32f92..b76d944b89 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -1010,7 +1010,7 @@ fn test_principal_construct_check_errors() { let input = r#"(principal-construct? 0x16 0x0102030405060708091011121314151617181920 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")"#; assert_eq!( Err(CheckErrors::TypeValueError( - Box::new(TypeSignature::contract_name_string_ascii_type().unwrap()), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( ASCIIData { data: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" From 426ec21dc26bf4aea97fcdbc93dd92f82ec3b51c Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 23 Sep 2025 09:45:35 +0200 Subject: [PATCH 33/86] chore: improve doc, #6467 --- clarity-types/src/types/signatures.rs | 34 +++++++++++++-------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 0226fa65fb..3890ebf591 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -836,44 +836,44 @@ impl TupleTypeSignature { } impl TypeSignature { - /// Buffer type with minimum size. Alias for [`TypeSignature::BUFFER_1`]. + /// Buffer type with minimum length. Alias for [`TypeSignature::BUFFER_1`]. pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; - /// Buffer type with maximum size ([`MAX_VALUE_SIZE`]). + /// Buffer type with maximum length ([`MAX_VALUE_SIZE`]). pub const BUFFER_MAX: TypeSignature = Self::type_buffer::(); - /// Buffer type with size 1. + /// Buffer type with length 1. pub const BUFFER_1: TypeSignature = Self::type_buffer::<1>(); - /// Buffer type with size 20. + /// Buffer type with length 20. pub const BUFFER_20: TypeSignature = Self::type_buffer::<20>(); - /// Buffer type with size 32. + /// Buffer type with length 32. pub const BUFFER_32: TypeSignature = Self::type_buffer::<32>(); - /// Buffer type with size 33. + /// Buffer type with length 33. pub const BUFFER_33: TypeSignature = Self::type_buffer::<33>(); - /// Buffer type with size 64. + /// Buffer type with length 64. pub const BUFFER_64: TypeSignature = Self::type_buffer::<64>(); - /// Buffer type with size 65. + /// Buffer type with length 65. pub const BUFFER_65: TypeSignature = Self::type_buffer::<65>(); - /// String ASCII type with minimum size (`1`). + /// String ASCII type with minimum length (`1`). pub const STRING_ASCII_MIN: TypeSignature = Self::type_string_ascii::<1>(); - /// String ASCII type with maximum size ([`MAX_VALUE_SIZE`]). + /// String ASCII type with maximum length ([`MAX_VALUE_SIZE`]). pub const STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::(); - /// String ASCII type with size 40. + /// String ASCII type with length 40. pub const STRING_ASCII_40: TypeSignature = Self::type_string_ascii::<40>(); - /// String UTF8 type with minimum size (`1`). + /// String UTF8 type with minimum length (`1`). pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8::<1>(); - /// String UTF8 type with maximum size ([`MAX_UTF8_VALUE_SIZE`]). + /// String UTF8 type with maximum length ([`MAX_UTF8_VALUE_SIZE`]). pub const STRING_UTF8_MAX: TypeSignature = Self::type_string_utf8::(); - /// String UTF8 type with size 40. + /// String UTF8 type with length 40. pub const STRING_UTF8_40: TypeSignature = Self::type_string_utf8::<40>(); - /// Maximum-sized ([`MAX_TO_ASCII_BUFFER_LEN`]) buffer allowed for `to-ascii?` call. + /// Longest ([`MAX_TO_ASCII_BUFFER_LEN`]) buffer allowed for `to-ascii?` call. pub const TO_ASCII_BUFFER_MAX: TypeSignature = Self::type_buffer::(); - /// Maximum-sized ([`MAX_TO_ASCII_RESULT_LEN`]) string allowed for `to-ascii?` call. + /// Longest ([`MAX_TO_ASCII_RESULT_LEN`]) string allowed for `to-ascii?` call. pub const TO_ASCII_STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::(); - /// Maximum-sized ([`CONTRACT_MAX_NAME_LENGTH`]) string allowed for `contract-name`. + /// Longest ([`CONTRACT_MAX_NAME_LENGTH`]) string allowed for `contract-name`. pub const CONTRACT_NAME_STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::<{ CONTRACT_MAX_NAME_LENGTH as u32 }>(); From c75264c15353ccb7fdc75e5ffbd1dea12a1d0156 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 23 Sep 2025 10:10:15 +0200 Subject: [PATCH 34/86] chore: improve clarity test for TO_ASCII_BUFFER_MAX, #6467 --- clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 78e52eab85..9a694ada7b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -3548,6 +3548,11 @@ fn test_to_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) "buffer type", Ok(to_ascii_response_type.clone()), ), + ( + &format!("(to-ascii? 0x{})", "ff".repeat(524284)), + "max len buffer type", + Ok(to_ascii_response_type.clone()), + ), ( &format!("(to-ascii? 0x{})", "ff".repeat(524285)), "oversized buffer type", From 146d71cd00c24082c2ab901288b7f14d19683706 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 23 Sep 2025 11:19:36 +0200 Subject: [PATCH 35/86] chore: remove Result form unit_type() signature, #6467 --- clarity-types/src/tests/types/signatures.rs | 3 +-- clarity-types/src/types/signatures.rs | 14 +++++--------- .../type_checker/v2_05/natives/sequences.rs | 6 +++--- .../type_checker/v2_1/natives/sequences.rs | 8 ++++---- clarity/src/vm/functions/sequences.rs | 2 +- 5 files changed, 14 insertions(+), 19 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 8155591607..615725ba02 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -711,8 +711,7 @@ fn test_least_supertype() { ( ( TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)) - .unwrap(), + TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)).unwrap(), ), TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)).unwrap(), ), diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 3890ebf591..fc7621ec98 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -305,16 +305,12 @@ pub enum SequenceSubtype { } impl SequenceSubtype { - pub fn unit_type(&self) -> Result { + pub fn unit_type(&self) -> TypeSignature { match &self { - SequenceSubtype::ListType(list_data) => Ok(list_data.clone().destruct().0), - SequenceSubtype::BufferType(_) => Ok(TypeSignature::BUFFER_MIN), - SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { - Ok(TypeSignature::STRING_ASCII_MIN) - } - SequenceSubtype::StringType(StringSubtype::UTF8(_)) => { - Ok(TypeSignature::STRING_UTF8_MIN) - } + SequenceSubtype::ListType(list_data) => list_data.clone().destruct().0, + SequenceSubtype::BufferType(_) => TypeSignature::BUFFER_MIN, + SequenceSubtype::StringType(StringSubtype::ASCII(_)) => TypeSignature::STRING_ASCII_MIN, + SequenceSubtype::StringType(StringSubtype::UTF8(_)) => TypeSignature::STRING_UTF8_MIN, } } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index f0cc12b2a1..9881bf9448 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -129,7 +129,7 @@ pub fn check_special_filter( { let input_type = match argument_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrors::ExpectedSequence(Box::new( argument_type.clone(), ))), @@ -172,7 +172,7 @@ pub fn check_special_fold( let argument_type = checker.type_check(&args[1], context)?; let input_type = match argument_type { - TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrors::ExpectedSequence(Box::new(argument_type))), }?; @@ -420,7 +420,7 @@ pub fn check_special_index_of( let list_type = checker.type_check(&args[0], context)?; let expected_input_type = match list_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrors::ExpectedSequence(Box::new(list_type))), }?; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 2ce71a5d38..c55656b9bf 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -178,7 +178,7 @@ pub fn check_special_filter( { let input_type = match argument_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrors::ExpectedSequence(Box::new( argument_type.clone(), ))), @@ -221,7 +221,7 @@ pub fn check_special_fold( let argument_type = checker.type_check(&args[1], context)?; let input_type = match argument_type { - TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrors::ExpectedSequence(Box::new(argument_type))), }?; @@ -469,7 +469,7 @@ pub fn check_special_index_of( let list_type = checker.type_check(&args[0], context)?; let expected_input_type = match list_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrors::ExpectedSequence(Box::new(list_type))), }?; @@ -519,7 +519,7 @@ pub fn check_special_replace_at( TypeSignature::SequenceType(seq) => seq, _ => return Err(CheckErrors::ExpectedSequence(Box::new(input_type)).into()), }; - let unit_seq = seq_type.unit_type()?; + let unit_seq = seq_type.unit_type(); // Check index argument checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; // Check element argument diff --git a/clarity/src/vm/functions/sequences.rs b/clarity/src/vm/functions/sequences.rs index cafc795d36..f15fb7811e 100644 --- a/clarity/src/vm/functions/sequences.rs +++ b/clarity/src/vm/functions/sequences.rs @@ -437,7 +437,7 @@ pub fn special_replace_at( runtime_cost(ClarityCostFunction::ReplaceAt, env, seq_type.size()?)?; let expected_elem_type = if let TypeSignature::SequenceType(seq_subtype) = &seq_type { - seq_subtype.unit_type()? + seq_subtype.unit_type() } else { return Err(CheckErrors::ExpectedSequence(Box::new(seq_type)).into()); }; From d222bfde266ac4425c3f2cb848c32701fa1986d9 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 23 Sep 2025 11:46:03 +0200 Subject: [PATCH 36/86] chore: improve const doc, #6467 --- clarity-types/src/tests/types/signatures.rs | 8 +++++--- clarity-types/src/types/mod.rs | 9 +++++---- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 615725ba02..84395f7feb 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -19,9 +19,9 @@ use crate::representations::CONTRACT_MAX_NAME_LENGTH; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - BufferLength, MAX_TO_ASCII_BUFFER_LEN, MAX_TO_ASCII_RESULT_LEN, MAX_UTF8_VALUE_SIZE, - MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, - TraitIdentifier, TupleTypeSignature, + BufferLength, MAX_TO_ASCII_BUFFER_LEN, MAX_TO_ASCII_RESULT_LEN, MAX_TYPE_DEPTH, + MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, + StringSubtype, StringUTF8Length, TraitIdentifier, TupleTypeSignature, WRAPPER_VALUE_SIZE, }; #[test] @@ -30,6 +30,8 @@ fn test_core_constants() { assert_eq!(262_144, MAX_UTF8_VALUE_SIZE); assert_eq!(1_048_571, MAX_TO_ASCII_RESULT_LEN); assert_eq!(524_284, MAX_TO_ASCII_BUFFER_LEN); + assert_eq!(32, MAX_TYPE_DEPTH); + assert_eq!(1, WRAPPER_VALUE_SIZE); } #[test] diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index 8910c44a98..83deaab146 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -39,22 +39,23 @@ pub use self::signatures::{ use crate::errors::{CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType}; use crate::representations::{ClarityName, ContractName, SymbolicExpression}; +/// Maximum size in bytes allowed for types. pub const MAX_VALUE_SIZE: u32 = 1024 * 1024; // 1MB +/// Bytes serialization upper limit. pub const BOUND_VALUE_SERIALIZATION_BYTES: u32 = MAX_VALUE_SIZE * 2; +/// Hex serialization upper limit. pub const BOUND_VALUE_SERIALIZATION_HEX: u32 = BOUND_VALUE_SERIALIZATION_BYTES * 2; - /// Maximum length for UFT8 string. pub const MAX_UTF8_VALUE_SIZE: u32 = MAX_VALUE_SIZE / 4; - /// Maximum string length returned from `to-ascii?`. /// 5 bytes reserved for embedding in response. pub const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; /// Maximum buffer length returned from `to-ascii?`. /// 2 bytes reserved for "0x" prefix and 2 characters per byte. pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; - +/// Maximum allowed nesting depth of types. pub const MAX_TYPE_DEPTH: u8 = 32; -// this is the charged size for wrapped values, i.e., response or optionals +/// this is the charged size for wrapped values, i.e., response or optionals pub const WRAPPER_VALUE_SIZE: u32 = 1; #[derive(Debug, Clone, Eq, Serialize, Deserialize)] From 6a1f0e951f9e136d57b5195213c9505a203028f8 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 23 Sep 2025 12:07:58 +0200 Subject: [PATCH 37/86] chore: limit cfg test to crate, #6467 --- clarity-types/src/types/signatures.rs | 2 +- clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index fc7621ec98..6fff144ff1 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -904,7 +904,7 @@ impl TypeSignature { } /// Create a string ASCII type with a given len. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub const fn bound_string_ascii_type(len: u32) -> Self { SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( BufferLength::try_from_u32_as_opt(len).expect("Invalid ascii size!"), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index e391264b8c..413ae80f29 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -3437,7 +3437,7 @@ fn test_principal_construct() { r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320 "foooooooooooooooooooooooooooooooooooooooo")"#, CheckErrors::TypeError( Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), - Box::new(TypeSignature::bound_string_ascii_type(41)), + Box::new(SequenceType(StringType(ASCII(41_u32.try_into().unwrap())))), ), ), // bad argument type for `name` From 3b8963b5fdb18b989c4adcf51acf30a8a1eb7740 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 23 Sep 2025 13:15:35 +0200 Subject: [PATCH 38/86] chore: improve const type function to be used in test context, #6467 --- clarity-types/src/tests/types/signatures.rs | 24 +++---- clarity-types/src/types/signatures.rs | 74 +++++++++++---------- 2 files changed, 50 insertions(+), 48 deletions(-) diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 84395f7feb..dc516fde61 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -411,9 +411,9 @@ fn test_least_supertype() { ( ( TypeSignature::NoType, - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), ), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), ), ( (TypeSignature::NoType, TypeSignature::STRING_UTF8_MAX), @@ -508,10 +508,10 @@ fn test_least_supertype() { ), ( ( - TypeSignature::bound_string_ascii_type(17), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), + TypeSignature::new_ascii_type_checked(17), ), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), ), ( ( @@ -613,9 +613,9 @@ fn test_least_supertype() { ( ( TypeSignature::STRING_ASCII_MIN, - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), ), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), ), ( ( @@ -697,7 +697,7 @@ fn test_least_supertype() { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), )]) .unwrap(), ), @@ -705,7 +705,7 @@ fn test_least_supertype() { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), )]) .unwrap(), ), @@ -713,9 +713,9 @@ fn test_least_supertype() { ( ( TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)).unwrap(), + TypeSignature::new_option(TypeSignature::new_ascii_type_checked(17)).unwrap(), ), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17)).unwrap(), + TypeSignature::new_option(TypeSignature::new_ascii_type_checked(17)).unwrap(), ), ( ( @@ -754,7 +754,7 @@ fn test_least_supertype() { ), ( TypeSignature::STRING_UTF8_MIN, - TypeSignature::bound_string_ascii_type(17), + TypeSignature::new_ascii_type_checked(17), ), (TypeSignature::STRING_UTF8_MIN, TypeSignature::BUFFER_MIN), ( diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 6fff144ff1..c30e5b046b 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -835,80 +835,82 @@ impl TypeSignature { /// Buffer type with minimum length. Alias for [`TypeSignature::BUFFER_1`]. pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; /// Buffer type with maximum length ([`MAX_VALUE_SIZE`]). - pub const BUFFER_MAX: TypeSignature = Self::type_buffer::(); + pub const BUFFER_MAX: TypeSignature = Self::type_buffer_const(MAX_VALUE_SIZE); /// Buffer type with length 1. - pub const BUFFER_1: TypeSignature = Self::type_buffer::<1>(); + pub const BUFFER_1: TypeSignature = Self::type_buffer_const(1); /// Buffer type with length 20. - pub const BUFFER_20: TypeSignature = Self::type_buffer::<20>(); + pub const BUFFER_20: TypeSignature = Self::type_buffer_const(20); /// Buffer type with length 32. - pub const BUFFER_32: TypeSignature = Self::type_buffer::<32>(); + pub const BUFFER_32: TypeSignature = Self::type_buffer_const(32); /// Buffer type with length 33. - pub const BUFFER_33: TypeSignature = Self::type_buffer::<33>(); + pub const BUFFER_33: TypeSignature = Self::type_buffer_const(33); /// Buffer type with length 64. - pub const BUFFER_64: TypeSignature = Self::type_buffer::<64>(); + pub const BUFFER_64: TypeSignature = Self::type_buffer_const(64); /// Buffer type with length 65. - pub const BUFFER_65: TypeSignature = Self::type_buffer::<65>(); + pub const BUFFER_65: TypeSignature = Self::type_buffer_const(65); /// String ASCII type with minimum length (`1`). - pub const STRING_ASCII_MIN: TypeSignature = Self::type_string_ascii::<1>(); + pub const STRING_ASCII_MIN: TypeSignature = Self::type_ascii_const(1); /// String ASCII type with maximum length ([`MAX_VALUE_SIZE`]). - pub const STRING_ASCII_MAX: TypeSignature = Self::type_string_ascii::(); + pub const STRING_ASCII_MAX: TypeSignature = Self::type_ascii_const(MAX_VALUE_SIZE); /// String ASCII type with length 40. - pub const STRING_ASCII_40: TypeSignature = Self::type_string_ascii::<40>(); + pub const STRING_ASCII_40: TypeSignature = Self::type_ascii_const(40); /// String UTF8 type with minimum length (`1`). - pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8::<1>(); + pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8(1); /// String UTF8 type with maximum length ([`MAX_UTF8_VALUE_SIZE`]). - pub const STRING_UTF8_MAX: TypeSignature = Self::type_string_utf8::(); + pub const STRING_UTF8_MAX: TypeSignature = Self::type_string_utf8(MAX_UTF8_VALUE_SIZE); /// String UTF8 type with length 40. - pub const STRING_UTF8_40: TypeSignature = Self::type_string_utf8::<40>(); + pub const STRING_UTF8_40: TypeSignature = Self::type_string_utf8(40); /// Longest ([`MAX_TO_ASCII_BUFFER_LEN`]) buffer allowed for `to-ascii?` call. - pub const TO_ASCII_BUFFER_MAX: TypeSignature = Self::type_buffer::(); + pub const TO_ASCII_BUFFER_MAX: TypeSignature = Self::type_buffer_const(MAX_TO_ASCII_BUFFER_LEN); /// Longest ([`MAX_TO_ASCII_RESULT_LEN`]) string allowed for `to-ascii?` call. pub const TO_ASCII_STRING_ASCII_MAX: TypeSignature = - Self::type_string_ascii::(); + Self::type_ascii_const(MAX_TO_ASCII_RESULT_LEN); /// Longest ([`CONTRACT_MAX_NAME_LENGTH`]) string allowed for `contract-name`. pub const CONTRACT_NAME_STRING_ASCII_MAX: TypeSignature = - Self::type_string_ascii::<{ CONTRACT_MAX_NAME_LENGTH as u32 }>(); + Self::type_ascii_const(CONTRACT_MAX_NAME_LENGTH as u32); - /// Creates a buffer type with a given size known at compile time. + /// Creates a buffer type with the specified length. /// - /// This function is intended for defining constant buffer type - /// aliases (e.g., [`TypeSignature::BUFFER_1`]) without repeating logic. - const fn type_buffer() -> Self { + /// # Note + /// This function is intended for use in constant contexts or for testing purposes. + /// It may panic if the provided length is invalid. + const fn type_buffer_const(len: u32) -> Self { SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid buffer size!"), + BufferLength::try_from_u32_as_opt(len).expect("Invalid buffer length!"), )) } - /// Creates a string ASCII type with a given size known at compile time. + /// Creates a string ASCII type with the specified length. /// - /// This function is intended for defining constant ASCII type - /// aliases (e.g., [`TypeSignature::STRING_ASCII_MIN`]) without repeating logic. - const fn type_string_ascii() -> Self { + /// # Note + /// This function is intended for use in constant contexts or for testing purposes. + /// It may panic if the provided length is invalid. + const fn type_ascii_const(len: u32) -> Self { SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( - BufferLength::try_from_u32_as_opt(VALUE).expect("Invalid ascii size!"), + BufferLength::try_from_u32_as_opt(len).expect("Invalid ascii length!"), ))) } - /// Creates a string UTF8 type with a given size known at compile time. + /// Creates a string UTF8 type with the specified length. /// - /// This function is intended for defining constant UFT8 type - /// aliases (e.g., [`TypeSignature::STRING_UTF8_MIN`]) without repeating logic. - const fn type_string_utf8() -> Self { + /// # Note + /// This function is intended for use in constant contexts or for testing purposes. + /// It may panic if the provided length is invalid. + const fn type_string_utf8(len: u32) -> Self { SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( - StringUTF8Length::try_from_u32_as_opt(VALUE).expect("Invalid utf8 size!"), + StringUTF8Length::try_from_u32_as_opt(len).expect("Invalid utf8 length!"), ))) } - /// Create a string ASCII type with a given len. + /// Creates a string ASCII type with the specified length. + /// It may panic if the provided length is invalid. #[cfg(test)] - pub const fn bound_string_ascii_type(len: u32) -> Self { - SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( - BufferLength::try_from_u32_as_opt(len).expect("Invalid ascii size!"), - ))) + pub const fn new_ascii_type_checked(len: u32) -> Self { + Self::type_ascii_const(len) } /// If one of the types is a NoType, return Ok(the other type), otherwise return least_supertype(a, b) From b242c82074b04576212faaae8796d61b80d26bc7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 23 Sep 2025 11:57:10 -0700 Subject: [PATCH 39/86] fix: integration tests as a result of miner changes --- .../src/tests/nakamoto_integrations.rs | 70 ++++---- stacks-node/src/tests/signer/v0.rs | 149 +++++++----------- 2 files changed, 98 insertions(+), 121 deletions(-) diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index e784728fb6..649c27dfbf 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -5495,27 +5495,46 @@ fn forked_tenure_is_ignored() { .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - let block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_some(); + // We don't expect a block in this tenure, because the miner should instead be building off + // of a previous tenure + let block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none(); Ok(commits_count > commits_before && blocks_count > blocks_before && blocks_processed > blocks_processed_before && block_in_tenure) }) - .unwrap(); + .unwrap_or_else(|_| { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none(); + error!("Tenure C failed to produce a block"; + "commits_count" => commits_count, + "commits_before" => commits_before, + "blocks_count" => blocks_count, + "blocks_before" => blocks_before, + "blocks_processed" => blocks_processed, + "blocks_processed_before" => blocks_processed_before, + "block_in_tenure" => block_in_tenure, + ); + panic!("Tenure C failed to produce a block"); + }); info!("Tenure C produced a block!"); - let block_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate).unwrap(); + let block_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate); + assert!(block_tenure_c.is_none()); let blocks = test_observer::get_mined_nakamoto_blocks(); let block_c = blocks.last().unwrap(); - info!("Tenure C tip block: {}", &block_tenure_c.index_block_hash()); info!("Tenure C last block: {}", &block_c.block_id); - assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash()); // Block C was built AFTER Block B was built, but BEFORE it was broadcasted (processed), so it should be built off of Block A assert_eq!( - block_tenure_c.stacks_block_height, - block_tenure_a.stacks_block_height + 1 + block_c.stacks_height, + block_tenure_b.header.chain_length + 1 ); // Now let's produce a second block for tenure C and ensure it builds off of block C. @@ -5558,14 +5577,11 @@ fn forked_tenure_is_ignored() { info!("Tenure C produced a second block!"); - let block_2_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate).unwrap(); + let block_2_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate); + assert!(block_2_tenure_c.is_none()); let blocks = test_observer::get_mined_nakamoto_blocks(); let block_2_c = blocks.last().unwrap(); - info!( - "Tenure C tip block: {}", - &block_2_tenure_c.index_block_hash() - ); info!("Tenure C last block: {}", &block_2_c.block_id); info!("Starting tenure D."); @@ -5597,8 +5613,6 @@ fn forked_tenure_is_ignored() { info!("Tenure D last block: {}", block_d.block_id); assert_ne!(block_tenure_b.block_id(), block_tenure_a.index_block_hash()); - assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash()); - assert_ne!(block_tenure_c, block_tenure_a); // Block B was built atop block A assert_eq!( @@ -5610,39 +5624,29 @@ fn forked_tenure_is_ignored() { block_tenure_a.index_block_hash().to_string() ); - // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block B assert_eq!( - block_tenure_c.stacks_block_height, - block_tenure_a.stacks_block_height + 1 + block_c.stacks_height, + block_tenure_b.header.chain_length + 1 ); assert_eq!( block_c.parent_block_id, - block_tenure_a.index_block_hash().to_string() + block_tenure_b.header.block_id().to_string() ); - assert_ne!(block_tenure_c, block_2_tenure_c); - assert_ne!(block_2_tenure_c, block_tenure_d); - assert_ne!(block_tenure_c, block_tenure_d); - // Second block of tenure C builds off of block C assert_eq!( - block_2_tenure_c.stacks_block_height, - block_tenure_c.stacks_block_height + 1, - ); - assert_eq!( - block_2_c.parent_block_id, - block_tenure_c.index_block_hash().to_string() + block_2_c.stacks_height, + block_tenure_b.header.chain_length + 2, ); + assert_eq!(block_2_c.parent_block_id, block_c.block_id); // Tenure D builds off of the second block of tenure C assert_eq!( block_tenure_d.stacks_block_height, - block_2_tenure_c.stacks_block_height + 1, - ); - assert_eq!( - block_d.parent_block_id, - block_2_tenure_c.index_block_hash().to_string() + block_2_c.stacks_height + 1, ); + assert_eq!(block_d.parent_block_id, block_2_c.block_id); coord_channel .lock() diff --git a/stacks-node/src/tests/signer/v0.rs b/stacks-node/src/tests/signer/v0.rs index a7d8dd5a9b..60658306c9 100644 --- a/stacks-node/src/tests/signer/v0.rs +++ b/stacks-node/src/tests/signer/v0.rs @@ -2422,15 +2422,21 @@ fn forked_tenure_invalid() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let result = forked_tenure_testing(Duration::from_secs(5), None, Duration::from_secs(7), false); + let Some(result) = forked_tenure_testing(Duration::from_secs(5), Duration::from_secs(7), false) + else { + warn!("Snapshot created. Run test again."); + return; + }; assert_ne!( result.tip_b.index_block_hash(), - result.tip_a.index_block_hash() + result.tip_a.index_block_hash(), + "Tip B should not be the same as tip A" ); - assert_eq!( + assert_ne!( result.tip_b.index_block_hash(), - result.tip_c.index_block_hash() + result.tip_c.index_block_hash(), + "Tip B should not be the same as tip C" ); assert_ne!(result.tip_c, result.tip_a); @@ -2444,12 +2450,13 @@ fn forked_tenure_invalid() { result.tip_a.index_block_hash().to_string() ); - // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, + // but it should still be extended from block B assert_eq!( result.mined_c.parent_block_id, - result.tip_a.index_block_hash().to_string() + result.tip_b.index_block_hash().to_string() ); - assert_ne!( + assert_eq!( result .tip_c .anchored_header @@ -2457,7 +2464,7 @@ fn forked_tenure_invalid() { .unwrap() .signer_signature_hash(), result.mined_c.signer_signature_hash, - "Mined block during tenure C should not have become the chain tip" + "Mined block during tenure C should have become the chain tip" ); assert!(result.tip_c_2.is_none()); @@ -2471,14 +2478,14 @@ fn forked_tenure_invalid() { ); assert_ne!(result.tip_a, result.tip_d); - // Tenure D builds off of Tenure B + // Tenure D builds off of Tenure c assert_eq!( result.tip_d.stacks_block_height, - result.tip_b.stacks_block_height + 1, + result.tip_c.stacks_block_height + 1, ); assert_eq!( result.mined_d.parent_block_id, - result.tip_b.index_block_hash().to_string() + result.tip_c.index_block_hash().to_string() ); } @@ -2489,8 +2496,12 @@ fn forked_tenure_okay() { return; } - let result = - forked_tenure_testing(Duration::from_secs(360), None, Duration::from_secs(0), true); + let Some(result) = + forked_tenure_testing(Duration::from_secs(360), Duration::from_secs(0), true) + else { + warn!("Snapshot created. Run test again."); + return; + }; assert_ne!(result.tip_b, result.tip_a); assert_ne!(result.tip_b, result.tip_c); @@ -2676,54 +2687,49 @@ fn reloads_signer_set_in() { /// * tenure C ignores b_0, and correctly builds off of block a_x. fn forked_tenure_testing( proposal_limit: Duration, - odd_proposal_limit: Option, post_btc_block_pause: Duration, expect_tenure_c: bool, -) -> TenureForkingResult { +) -> Option { tracing_subscriber::registry() .with(fmt::layer()) .with(EnvFilter::from_default_env()) .init(); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::random(); + let sender_sk = Secp256k1PrivateKey::from_seed("sender".as_bytes()); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - |config| { - // make the duration long enough that the reorg attempt will definitely be accepted - config.first_proposal_burn_block_timing = odd_proposal_limit - .map(|limit| { - if config.endpoint.port() % 2 == 1 { - // 2/5 or 40% of signers will have this seperate limit - limit - } else { - // 3/5 or 60% of signers will have this original limit - proposal_limit - } - }) - .unwrap_or(proposal_limit); - // don't allow signers to post signed blocks (limits the amount of fault injection we - // need) - TEST_SKIP_BLOCK_BROADCAST.set(true); - }, - |config| { - config.miner.tenure_cost_limit_per_block_percentage = None; - // this test relies on the miner submitting these timed out commits. - // the test still passes without this override, but the default timeout - // makes the test take longer than strictly necessary - config.miner.block_commit_delay = Duration::from_secs(10); - }, - None, - None, - ); + let signer_test: SignerTest = + SignerTest::new_with_config_modifications_and_snapshot( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + |config| { + // make the duration long enough that the reorg attempt will definitely be accepted + config.first_proposal_burn_block_timing = proposal_limit; + // don't allow signers to post signed blocks (limits the amount of fault injection we + // need) + TEST_SKIP_BLOCK_BROADCAST.set(true); + }, + |config| { + config.miner.tenure_cost_limit_per_block_percentage = None; + // this test relies on the miner submitting these timed out commits. + // the test still passes without this override, but the default timeout + // makes the test take longer than strictly necessary + config.miner.block_commit_delay = Duration::from_secs(10); + }, + None, + None, + Some(format!("forked_tenure_testing_{expect_tenure_c}").as_str()), + ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - signer_test.boot_to_epoch_3(); + if signer_test.bootstrap_snapshot() { + signer_test.shutdown_and_snapshot(); + return None; + } + sleep_ms(1000); info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -2744,7 +2750,6 @@ fn forked_tenure_testing( naka_submitted_commits: commits_submitted, naka_mined_blocks: mined_blocks, naka_proposed_blocks: proposed_blocks, - naka_rejected_blocks: rejected_blocks, naka_skip_commit_op: skip_commit_op, .. } = signer_test.running_nodes.counters.clone(); @@ -2877,7 +2882,6 @@ fn forked_tenure_testing( } else { proposed_blocks.load(Ordering::SeqCst) }; - let rejected_before = rejected_blocks.load(Ordering::SeqCst); skip_commit_op.set(false); next_block_and( @@ -2889,49 +2893,22 @@ fn forked_tenure_testing( // now allow block B to process if it hasn't already. TEST_BLOCK_ANNOUNCE_STALL.set(false); } - let rejected_count = rejected_blocks.load(Ordering::SeqCst); - let (blocks_count, rbf_count, has_reject_count) = if expect_tenure_c { - // if tenure C is going to be canonical, then we expect the miner to RBF its commit - // once (i.e. for the block it mines and gets signed), and we expect zero - // rejections. - (mined_blocks.load(Ordering::SeqCst), 1, true) - } else { - // if tenure C is NOT going to be canonical, then we expect no RBFs (since the - // miner can't get its block signed), and we expect at least one rejection - ( - proposed_blocks.load(Ordering::SeqCst), - 0, - rejected_count > rejected_before, - ) - }; + let blocks_count = mined_blocks.load(Ordering::SeqCst); + let rbf_count = if expect_tenure_c { 1 } else { 0 }; - Ok(commits_count > commits_before + rbf_count - && blocks_count > blocks_before - && has_reject_count) + Ok(commits_count > commits_before + rbf_count && blocks_count > blocks_before) }, ) .unwrap_or_else(|_| { let commits_count = commits_submitted.load(Ordering::SeqCst); - let rejected_count = rejected_blocks.load(Ordering::SeqCst); - // see above for comments - let (blocks_count, rbf_count, has_reject_count) = if expect_tenure_c { - (mined_blocks.load(Ordering::SeqCst), 1, true) - } else { - ( - proposed_blocks.load(Ordering::SeqCst), - 0, - rejected_count > rejected_before, - ) - }; + let blocks_count = mined_blocks.load(Ordering::SeqCst); + let rbf_count = if expect_tenure_c { 1 } else { 0 }; error!("Tenure C failed to produce a block"; "commits_count" => commits_count, "commits_before" => commits_before, "rbf_count" => rbf_count as u64, "blocks_count" => blocks_count, "blocks_before" => blocks_before, - "rejected_count" => rejected_count, - "rejected_before" => rejected_before, - "has_reject_count" => has_reject_count, ); panic!(); }); @@ -2954,11 +2931,7 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_c = blocks.last().unwrap().clone(); - if expect_tenure_c { - assert_ne!(tip_b.index_block_hash(), tip_c.index_block_hash()); - } else { - assert_eq!(tip_b.index_block_hash(), tip_c.index_block_hash()); - } + assert_ne!(tip_b.index_block_hash(), tip_c.index_block_hash()); assert_ne!(tip_c, tip_a); let (tip_c_2, mined_c_2) = if !expect_tenure_c { @@ -3022,7 +2995,7 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_d = blocks.last().unwrap().clone(); signer_test.shutdown(); - TenureForkingResult { + Some(TenureForkingResult { tip_a, tip_b, tip_c, @@ -3032,7 +3005,7 @@ fn forked_tenure_testing( mined_c, mined_c_2, mined_d, - } + }) } #[test] From 9e7ebfa15b3b2e3dd2aa3b552a6b83669fcd8800 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 23 Sep 2025 12:11:53 -0700 Subject: [PATCH 40/86] Implement TestChainstate Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/mod.rs | 2 + .../src/chainstate/nakamoto/tests/node.rs | 149 +-- stackslib/src/chainstate/tests/mod.rs | 1044 +++++++++++++++++ 3 files changed, 1124 insertions(+), 71 deletions(-) create mode 100644 stackslib/src/chainstate/tests/mod.rs diff --git a/stackslib/src/chainstate/mod.rs b/stackslib/src/chainstate/mod.rs index 3887650d8e..0d848acf63 100644 --- a/stackslib/src/chainstate/mod.rs +++ b/stackslib/src/chainstate/mod.rs @@ -24,3 +24,5 @@ pub mod burn; pub mod coordinator; pub mod nakamoto; pub mod stacks; +#[cfg(test)] +pub mod tests; diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 2b657ccf27..1c9a64b959 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -696,6 +696,7 @@ impl TestStacksNode { mut after_block: G, malleablize: bool, mined_canonical: bool, + timestamp: Option, ) -> Result)>, ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), @@ -804,6 +805,10 @@ impl TestStacksNode { &coinbase.clone().unwrap(), ) }; + // Optionally overwrite the timestamp to enable predictable blocks. + if let Some(timestamp) = timestamp { + builder.header.timestamp = timestamp; + } miner_setup(&mut builder); tenure_change = None; @@ -1060,82 +1065,82 @@ impl TestStacksNode { } } -impl TestPeer<'_> { - /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or - /// Stacks 2.x block. - /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) - fn get_nakamoto_parent( - miner: &TestMiner, - stacks_node: &TestStacksNode, - sortdb: &SortitionDB, - ) -> ( - StacksBlockId, - Option, - Option>, - ) { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { - debug!("Parent will be a Nakamoto block"); - - // parent is an epoch 3 nakamoto block - let first_parent = parent_blocks.first().unwrap(); - debug!("First parent is {:?}", first_parent); +/// Get the Nakamoto parent linkage data for building atop the last-produced tenure or +/// Stacks 2.x block. +/// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) +pub fn get_nakamoto_parent( + miner: &TestMiner, + stacks_node: &TestStacksNode, + sortdb: &SortitionDB, +) -> ( + StacksBlockId, + Option, + Option>, +) { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { + debug!("Parent will be a Nakamoto block"); + + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + debug!("First parent is {:?}", first_parent); + + // sanity check -- this parent must correspond to a sortition + assert!( + SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &first_parent.header.consensus_hash, + ) + .unwrap() + .unwrap() + .sortition + ); - // sanity check -- this parent must correspond to a sortition - assert!( - SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &first_parent.header.consensus_hash, - ) - .unwrap() - .unwrap() - .sortition + let last_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + (last_tenure_id, None, Some(parent_blocks)) + } else { + // parent may be an epoch 2.x block + let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = + stacks_node.get_last_anchored_block(miner) + { + debug!("Parent will be a Stacks 2.x block"); + let ic = sortdb.index_conn(); + let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap(); + if sort_opt.is_none() { + warn!("No parent sortition in epoch2: tip.sortition_id = {}, parent_block.block_hash() = {}", &tip.sortition_id, &parent_block.block_hash()); + } + (Some(parent_block), sort_opt) + } else { + warn!( + "No parent sortition in epoch2: tip.sortition_id = {}", + &tip.sortition_id ); + (None, None) + }; - let last_tenure_id = StacksBlockId::new( - &first_parent.header.consensus_hash, - &first_parent.header.block_hash(), - ); - (last_tenure_id, None, Some(parent_blocks)) + let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { + let parent_sort = parent_sortition_opt.as_ref().unwrap(); + StacksBlockId::new( + &parent_sort.consensus_hash, + &last_epoch2_block.header.block_hash(), + ) } else { - // parent may be an epoch 2.x block - let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = - stacks_node.get_last_anchored_block(miner) - { - debug!("Parent will be a Stacks 2.x block"); - let ic = sortdb.index_conn(); - let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &parent_block.block_hash(), - ) - .unwrap(); - if sort_opt.is_none() { - warn!("No parent sortition in epoch2: tip.sortition_id = {}, parent_block.block_hash() = {}", &tip.sortition_id, &parent_block.block_hash()); - } - (Some(parent_block), sort_opt) - } else { - warn!( - "No parent sortition in epoch2: tip.sortition_id = {}", - &tip.sortition_id - ); - (None, None) - }; - - let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { - let parent_sort = parent_sortition_opt.as_ref().unwrap(); - StacksBlockId::new( - &parent_sort.consensus_hash, - &last_epoch2_block.header.block_hash(), - ) - } else { - // must be a genesis block (testing only!) - StacksBlockId(BOOT_BLOCK_HASH.0) - }; - (last_tenure_id, parent_opt, None) - } + // must be a genesis block (testing only!) + StacksBlockId(BOOT_BLOCK_HASH.0) + }; + (last_tenure_id, parent_opt, None) } +} +impl TestPeer<'_> { /// Start the next Nakamoto tenure. /// This generates the VRF key and block-commit txs, as well as the TenureChange and /// leader key this commit references @@ -1161,7 +1166,7 @@ impl TestPeer<'_> { Some(nakamoto_parent_tenure.clone()), ) } else { - Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) + get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) }; // find the VRF leader key register tx to use. @@ -1464,6 +1469,7 @@ impl TestPeer<'_> { after_block, peer.mine_malleablized_blocks, peer.nakamoto_parent_tenure_opt.is_none(), + None, )?; let just_blocks = blocks @@ -1552,6 +1558,7 @@ impl TestPeer<'_> { |_| true, self.mine_malleablized_blocks, self.nakamoto_parent_tenure_opt.is_none(), + None, ) .unwrap(); diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs new file mode 100644 index 0000000000..ce38b60bad --- /dev/null +++ b/stackslib/src/chainstate/tests/mod.rs @@ -0,0 +1,1044 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::fs; + +use clarity::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::database::STXBalance; +use clarity::vm::types::*; +use clarity::vm::ContractName; +use rand; +use rand::{thread_rng, Rng}; +use stacks_common::address::*; +use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::*; +use stacks_common::util::vrf::*; + +use self::nakamoto::test_signers::TestSigners; +use super::*; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; +use crate::burnchains::tests::*; +use crate::burnchains::*; +use crate::chainstate::burn::db::sortdb::*; +use crate::chainstate::burn::operations::*; +use crate::chainstate::burn::*; +use crate::chainstate::coordinator::tests::*; +use crate::chainstate::coordinator::*; +use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::tests::node::{get_nakamoto_parent, TestStacker}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, StacksDBIndexed}; +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::test::get_parent_tip; +use crate::chainstate::stacks::db::{StacksChainState, *}; +use crate::chainstate::stacks::tests::*; +use crate::chainstate::stacks::{Error as ChainstateError, StacksMicroblockHeader, *}; +use crate::core::{EpochList, StacksEpoch, StacksEpochExtension, BOOT_BLOCK_HASH}; +use crate::net::test::{TestEventObserver, TestPeerConfig}; +use crate::util_lib::boot::{boot_code_test_addr, boot_code_tx_auth}; +use crate::util_lib::strings::*; + +// describes a chainstate's initial configuration +#[derive(Debug, Clone)] +pub struct TestChainstateConfig { + pub network_id: u32, + pub current_block: u64, + pub burnchain: Burnchain, + pub test_name: String, + pub initial_balances: Vec<(PrincipalData, u64)>, + pub initial_lockups: Vec, + pub spending_account: TestMiner, + pub setup_code: String, + pub epochs: Option, + pub test_stackers: Option>, + pub test_signers: Option, + /// aggregate public key to use + /// (NOTE: will be used post-Nakamoto) + pub aggregate_public_key: Option>, + pub txindex: bool, +} + +impl Default for TestChainstateConfig { + fn default() -> Self { + let chain_config = TestPeerConfig::default(); + Self::from(chain_config) + } +} +impl TestChainstateConfig { + pub fn new(test_name: &str) -> Self { + Self { + test_name: test_name.into(), + ..Self::default() + } + } +} +pub struct TestChainstate<'a> { + pub config: TestChainstateConfig, + pub sortdb: Option, + pub miner: TestMiner, + pub stacks_node: Option, + pub chainstate_path: String, + pub indexer: Option, + pub coord: ChainsCoordinator< + 'a, + TestEventObserver, + (), + OnChainRewardSetProvider<'a, TestEventObserver>, + (), + (), + BitcoinIndexer, + >, + pub nakamoto_parent_tenure_opt: Option>, + /// list of malleablized blocks produced when mining. + pub malleablized_blocks: Vec, + pub mine_malleablized_blocks: bool, +} + +impl From for TestChainstateConfig { + fn from(chain_config: TestPeerConfig) -> Self { + Self { + network_id: chain_config.network_id, + current_block: chain_config.current_block, + burnchain: chain_config.burnchain, + test_name: chain_config.test_name, + initial_balances: chain_config.initial_balances, + initial_lockups: chain_config.initial_lockups, + spending_account: chain_config.spending_account, + setup_code: chain_config.setup_code, + epochs: chain_config.epochs, + test_stackers: chain_config.test_stackers, + test_signers: chain_config.test_signers, + aggregate_public_key: chain_config.aggregate_public_key, + txindex: chain_config.txindex, + } + } +} +impl<'a> TestChainstate<'a> { + pub fn new(config: TestChainstateConfig) -> TestChainstate<'a> { + Self::new_with_observer(config, None) + } + + pub fn test_path(config: &TestChainstateConfig) -> String { + let random = thread_rng().gen::(); + let random_bytes = to_hex(&random.to_be_bytes()); + let cleaned_config_test_name = config.test_name.replace("::", "_"); + format!( + "/tmp/stacks-node-tests/units-test-consensus/{cleaned_config_test_name}-{random_bytes}" + ) + } + + pub fn make_test_path(config: &TestChainstateConfig) -> String { + let test_path = Self::test_path(config); + if fs::metadata(&test_path).is_ok() { + fs::remove_dir_all(&test_path).unwrap(); + }; + + fs::create_dir_all(&test_path).unwrap(); + test_path + } + + pub fn new_with_observer( + mut config: TestChainstateConfig, + observer: Option<&'a TestEventObserver>, + ) -> TestChainstate<'a> { + let test_path = Self::test_path(&config); + let chainstate_path = get_chainstate_path_str(&test_path); + let mut miner_factory = TestMinerFactory::new(); + miner_factory.chain_id = config.network_id; + let mut miner = miner_factory.next_miner( + config.burnchain.clone(), + 1, + 1, + AddressHashMode::SerializeP2PKH, + ); + // manually set fees + miner.test_with_tx_fees = false; + + config.burnchain.working_dir = get_burnchain(&test_path, None).working_dir; + + let epochs = config.epochs.clone().unwrap_or_else(|| { + StacksEpoch::unit_test_pre_2_05(config.burnchain.first_block_height) + }); + + let mut sortdb = SortitionDB::connect( + &config.burnchain.get_db_path(), + config.burnchain.first_block_height, + &config.burnchain.first_block_hash, + 0, + &epochs, + config.burnchain.pox_constants.clone(), + None, + true, + ) + .unwrap(); + + let first_burnchain_block_height = config.burnchain.first_block_height; + let first_burnchain_block_hash = config.burnchain.first_block_hash.clone(); + + let _burnchain_blocks_db = BurnchainDB::connect( + &config.burnchain.get_burnchaindb_path(), + &config.burnchain, + true, + ) + .unwrap(); + + let agg_pub_key_opt = config.aggregate_public_key.clone(); + + let conf = config.clone(); + let post_flight_callback = move |clarity_tx: &mut ClarityTx| { + let mut receipts = vec![]; + + if let Some(agg_pub_key) = agg_pub_key_opt { + debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); + } else { + debug!("Not setting aggregate public key"); + } + // add test-specific boot code + if !conf.setup_code.is_empty() { + let receipt = clarity_tx.connection().as_transaction(|clarity| { + let boot_code_addr = boot_code_test_addr(); + let boot_code_account = StacksAccount { + principal: boot_code_addr.to_account_principal(), + nonce: 0, + stx_balance: STXBalance::zero(), + }; + + let boot_code_auth = boot_code_tx_auth(boot_code_addr.clone()); + + debug!( + "Instantiate test-specific boot code contract '{}.{}' ({} bytes)...", + &boot_code_addr.to_string(), + &conf.test_name, + conf.setup_code.len() + ); + + let smart_contract = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from( + conf.test_name + .replace("::", "-") + .chars() + .skip( + conf.test_name + .len() + .saturating_sub(CONTRACT_MAX_NAME_LENGTH), + ) + .collect::() + .trim_start_matches(|c: char| !c.is_alphabetic()) + .to_string(), + ) + .expect("FATAL: invalid boot-code contract name"), + code_body: StacksString::from_str(&conf.setup_code) + .expect("FATAL: invalid boot code body"), + }, + None, + ); + + let boot_code_smart_contract = StacksTransaction::new( + TransactionVersion::Testnet, + boot_code_auth, + smart_contract, + ); + StacksChainState::process_transaction_payload( + clarity, + &boot_code_smart_contract, + &boot_code_account, + None, + ) + .unwrap() + }); + receipts.push(receipt); + } + debug!("Bootup receipts: {receipts:?}"); + }; + + let mut boot_data = ChainStateBootData::new( + &config.burnchain, + config.initial_balances.clone(), + Some(Box::new(post_flight_callback)), + ); + + if !config.initial_lockups.is_empty() { + let lockups = config.initial_lockups.clone(); + boot_data.get_bulk_initial_lockups = + Some(Box::new(move || Box::new(lockups.into_iter()))); + } + + let (chainstate, _) = StacksChainState::open_and_exec( + false, + config.network_id, + &chainstate_path, + Some(&mut boot_data), + None, + ) + .unwrap(); + + let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + let mut coord = ChainsCoordinator::test_new_full( + &config.burnchain, + config.network_id, + &test_path, + OnChainRewardSetProvider(observer), + observer, + indexer, + None, + config.txindex, + ); + coord.handle_new_burnchain_block().unwrap(); + + let mut stacks_node = TestStacksNode::from_chainstate(chainstate); + + { + // pre-populate burnchain, if running on bitcoin + let prev_snapshot = SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(); + let mut fork = TestBurnchainFork::new( + prev_snapshot.block_height, + &prev_snapshot.burn_header_hash, + &prev_snapshot.index_root, + 0, + ); + for i in prev_snapshot.block_height..config.current_block { + let burn_block = { + let ic = sortdb.index_conn(); + let mut burn_block = fork.next_block(&ic); + stacks_node.add_key_register(&mut burn_block, &mut miner); + burn_block + }; + fork.append_block(burn_block); + + fork.mine_pending_blocks_pox(&mut sortdb, &config.burnchain, &mut coord); + } + } + + let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + + TestChainstate { + config, + sortdb: Some(sortdb), + miner, + stacks_node: Some(stacks_node), + chainstate_path, + coord, + indexer: Some(indexer), + nakamoto_parent_tenure_opt: None, + malleablized_blocks: vec![], + mine_malleablized_blocks: true, + } + } + + pub fn next_burnchain_block( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, false); + (x.0, x.1, x.2) + } + + pub fn set_ops_consensus_hash( + blockstack_ops: &mut [BlockstackOperationType], + ch: &ConsensusHash, + ) { + for op in blockstack_ops.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); + } + } + } + + pub fn set_ops_burn_header_hash( + blockstack_ops: &mut [BlockstackOperationType], + bhh: &BurnchainHeaderHash, + ) { + for op in blockstack_ops.iter_mut() { + op.set_burn_header_hash(bhh.clone()); + } + } + + pub fn make_next_burnchain_block( + burnchain: &Burnchain, + tip_block_height: u64, + tip_block_hash: &BurnchainHeaderHash, + num_ops: u64, + ops_determine_block_header: bool, + ) -> BurnchainBlockHeader { + test_debug!( + "make_next_burnchain_block: tip_block_height={tip_block_height} tip_block_hash={tip_block_hash} num_ops={num_ops}" + ); + let indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); + let parent_hdr = indexer + .read_burnchain_header(tip_block_height) + .unwrap() + .unwrap(); + + test_debug!("parent hdr ({tip_block_height}): {parent_hdr:?}"); + assert_eq!(&parent_hdr.block_hash, tip_block_hash); + + let now = BURNCHAIN_TEST_BLOCK_TIME; + let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header( + &parent_hdr.block_hash, + (now as u32) + + if ops_determine_block_header { + num_ops as u32 + } else { + 0 + }, + ) + .bitcoin_hash(), + ); + test_debug!( + "Block header hash at {} is {block_header_hash}", + tip_block_height + 1 + ); + + BurnchainBlockHeader { + block_height: tip_block_height + 1, + block_hash: block_header_hash.clone(), + parent_block_hash: parent_hdr.block_hash.clone(), + num_txs: num_ops, + timestamp: now, + } + } + + pub fn add_burnchain_block( + burnchain: &Burnchain, + block_header: &BurnchainBlockHeader, + blockstack_ops: Vec, + ) { + let mut burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); + + let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); + + test_debug!( + "Store header and block ops for {}-{} ({})", + &block_header.block_hash, + &block_header.parent_block_hash, + block_header.block_height + ); + indexer.raw_store_header(block_header.clone()).unwrap(); + burnchain_db + .raw_store_burnchain_block(burnchain, &indexer, block_header.clone(), blockstack_ops) + .unwrap(); + } + + /// Generate and commit the next burnchain block with the given block operations. + /// * if `set_consensus_hash` is true, then each op's consensus_hash field will be set to + /// that of the resulting block snapshot. + /// * if `set_burn_hash` is true, then each op's burnchain header hash field will be set to + /// that of the resulting block snapshot. + /// + /// Returns ( + /// burnchain tip block height, + /// burnchain tip block hash, + /// burnchain tip consensus hash, + /// Option + /// ) + fn inner_next_burnchain_block( + &mut self, + mut blockstack_ops: Vec, + set_consensus_hash: bool, + set_burn_hash: bool, + update_burnchain: bool, + ops_determine_block_header: bool, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { + let sortdb = self.sortdb.take().unwrap(); + let (block_height, block_hash, epoch_id) = { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), tip.block_height + 1) + .unwrap() + .unwrap() + .epoch_id; + + if set_consensus_hash { + Self::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); + } + + let block_header = Self::make_next_burnchain_block( + &self.config.burnchain, + tip.block_height, + &tip.burn_header_hash, + blockstack_ops.len() as u64, + ops_determine_block_header, + ); + + if set_burn_hash { + Self::set_ops_burn_header_hash(&mut blockstack_ops, &block_header.block_hash); + } + + if update_burnchain { + Self::add_burnchain_block( + &self.config.burnchain, + &block_header, + blockstack_ops.clone(), + ); + } + (block_header.block_height, block_header.block_hash, epoch_id) + }; + + let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { + self.coord + .handle_new_burnchain_block() + .unwrap() + .into_missing_block_hash() + } else if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { + None + } else { + Some(BlockHeaderHash([0x00; 32])) + }; + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + + test_debug!("\n\nafter burn block {block_hash:?}, tip PoX ID is {pox_id:?}\n\n"); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + self.sortdb = Some(sortdb); + ( + block_height, + block_hash, + tip.consensus_hash, + missing_pox_anchor_block_hash_opt, + ) + } + + /// Store the given epoch 2.x Stacks block and microblock to staging, and then try and + /// process them. + pub fn process_stacks_epoch_at_tip( + &mut self, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + node.chainstate + .preprocess_stacks_epoch(&ic, &tip, block, microblocks) + .unwrap(); + } + self.coord.handle_new_stacks_block().unwrap(); + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + test_debug!( + "\n\nafter stacks block {:?}, tip PoX ID is {pox_id:?}\n\n", + block.block_hash() + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + } + + pub fn sortdb(&mut self) -> &mut SortitionDB { + self.sortdb.as_mut().unwrap() + } + + pub fn sortdb_ref(&mut self) -> &SortitionDB { + self.sortdb.as_ref().unwrap() + } + + /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments + /// the provided reference. + pub fn tenure_with_txs( + &mut self, + txs: &[StacksTransaction], + coinbase_nonce: &mut usize, + ) -> StacksBlockId { + let microblock_privkey = self.miner.next_microblock_privkey(); + let microblock_pubkeyhash = + Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); + let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.as_ref().unwrap().conn()) + .unwrap(); + let burnchain = self.config.burnchain.clone(); + + let (burn_ops, stacks_block, microblocks) = self.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); + let coinbase_tx = make_coinbase(miner, *coinbase_nonce); + + let mut block_txs = vec![coinbase_tx]; + block_txs.extend_from_slice(txs); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof, + tip.total_burn, + µblock_pubkeyhash, + ) + .unwrap(); + let (anchored_block, _size, _cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + block_txs, + ) + .unwrap(); + (anchored_block, vec![]) + }, + ); + + let (_, _, consensus_hash) = self.next_burnchain_block(burn_ops); + self.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + *coinbase_nonce += 1; + + StacksBlockId::new(&consensus_hash, &stacks_block.block_hash()) + } + + /// Make a tenure, using `tenure_builder` to generate a Stacks block and a list of + /// microblocks. + pub fn make_tenure( + &mut self, + mut tenure_builder: F, + ) -> ( + Vec, + StacksBlock, + Vec, + ) + where + F: FnMut( + &mut TestMiner, + &mut SortitionDB, + &mut StacksChainState, + &VRFProof, + Option<&StacksBlock>, + Option<&StacksMicroblockHeader>, + ) -> (StacksBlock, Vec), + { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); + let parent_sortition_opt = parent_block_opt.as_ref().and_then(|parent_block| { + let ic = sortdb.index_conn(); + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap() + }); + + let parent_microblock_header_opt = + get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); + let last_key = stacks_node.get_last_key(&self.miner); + + let network_id = self.config.network_id; + let chainstate_path = self.chainstate_path.clone(); + let burn_block_height = burn_block.block_height; + + let proof = self + .miner + .make_proof( + &last_key.public_key, + &burn_block.parent_snapshot.sortition_hash, + ) + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", last_key.public_key)); + + let (stacks_block, microblocks) = tenure_builder( + &mut self.miner, + &mut sortdb, + &mut stacks_node.chainstate, + &proof, + parent_block_opt.as_ref(), + parent_microblock_header_opt.as_ref(), + ); + + let mut block_commit_op = stacks_node.make_tenure_commitment( + &sortdb, + &mut burn_block, + &mut self.miner, + &stacks_block, + microblocks.clone(), + 1000, + &last_key, + parent_sortition_opt.as_ref(), + ); + + // patch up block-commit -- these blocks all mine off of genesis + if stacks_block.header.parent_block == BlockHeaderHash([0u8; 32]) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + + // patch in reward set info + let recipients = get_next_recipients( + &tip, + &mut stacks_node.chainstate, + &mut sortdb, + &self.config.burnchain, + &OnChainRewardSetProvider::new(), + ) + .unwrap_or_else(|e| panic!("Failure fetching recipient set: {e:?}")); + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + ( + vec![ + BlockstackOperationType::LeaderKeyRegister(leader_key_op), + BlockstackOperationType::LeaderBlockCommit(block_commit_op), + ], + stacks_block, + microblocks, + ) + } + + pub fn get_burn_block_height(&self) -> u64 { + SortitionDB::get_canonical_burn_chain_tip( + self.sortdb.as_ref().expect("Failed to get sortdb").conn(), + ) + .expect("Failed to get canonical burn chain tip") + .block_height + } + + pub fn get_reward_cycle(&self) -> u64 { + let block_height = self.get_burn_block_height(); + self.config + .burnchain + .block_height_to_reward_cycle(block_height) + .unwrap_or_else(|| panic!("Failed to get reward cycle for block height {block_height}")) + } + + /// Start the next Nakamoto tenure. + /// This generates the VRF key and block-commit txs, as well as the TenureChange and + /// leader key this commit references + pub fn begin_nakamoto_tenure( + &mut self, + tenure_change_cause: TenureChangeCause, + ) -> ( + Vec, + TenureChangePayload, + LeaderKeyRegisterOp, + ) { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let (last_tenure_id, parent_block_opt, parent_tenure_opt) = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + ( + nakamoto_parent_tenure.first().as_ref().unwrap().block_id(), + None, + Some(nakamoto_parent_tenure.clone()), + ) + } else { + get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) + }; + + // find the VRF leader key register tx to use. + // it's the one pointed to by the parent tenure + let parent_consensus_hash_and_tenure_start_id_opt = + if let Some(parent_tenure) = parent_tenure_opt.as_ref() { + let tenure_start_block = parent_tenure.first().unwrap(); + Some(( + tenure_start_block.header.consensus_hash.clone(), + tenure_start_block.block_id(), + )) + } else if let Some(parent_block) = parent_block_opt.as_ref() { + let parent_header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + stacks_node.chainstate.db(), + &last_tenure_id, + ) + .unwrap() + .unwrap(); + Some(( + parent_header_info.consensus_hash.clone(), + parent_header_info.index_block_hash(), + )) + } else { + None + }; + + let (ch, parent_tenure_start_block_id) = parent_consensus_hash_and_tenure_start_id_opt + .clone() + .expect("No leader key"); + // it's possible that the parent was a shadow block. + // if so, find the highest non-shadow ancestor's block-commit, so we can + let mut cursor = ch; + let (tenure_sn, tenure_block_commit) = loop { + let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) + .unwrap() + .unwrap(); + + let Some(tenure_block_commit) = get_block_commit_by_txid( + sortdb.conn(), + &tenure_sn.sortition_id, + &tenure_sn.winning_block_txid, + ) + .unwrap() else { + // parent must be a shadow block + let header = NakamotoChainState::get_block_header_nakamoto( + stacks_node.chainstate.db(), + &parent_tenure_start_block_id, + ) + .unwrap() + .unwrap() + .anchored_header + .as_stacks_nakamoto() + .cloned() + .unwrap(); + + assert!(header.is_shadow_block(), "Parent tenure start block ID {parent_tenure_start_block_id} has no block-commit and is not a shadow block"); + + cursor = stacks_node + .chainstate + .index_conn() + .get_parent_tenure_consensus_hash(&parent_tenure_start_block_id, &cursor) + .unwrap() + .unwrap(); + + continue; + }; + break (tenure_sn, tenure_block_commit); + }; + + let last_key = SortitionDB::get_leader_key_at( + &sortdb.index_conn(), + tenure_block_commit.key_block_ptr.into(), + tenure_block_commit.key_vtxindex.into(), + &tenure_sn.sortition_id, + ) + .unwrap() + .unwrap(); + + let network_id = self.config.network_id; + let chainstate_path = self.chainstate_path.clone(); + let burn_block_height = burn_block.block_height; + + let (mut block_commit_op, tenure_change_payload) = stacks_node.begin_nakamoto_tenure( + &sortdb, + &mut self.miner, + &mut burn_block, + &last_key, + parent_block_opt.as_ref(), + parent_tenure_opt.as_deref(), + 1000, + tenure_change_cause, + ); + + // patch up block-commit -- these blocks all mine off of genesis + if last_tenure_id == StacksBlockId(BOOT_BLOCK_HASH.0) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let mut burn_ops = vec![]; + if self.miner.last_VRF_public_key().is_none() { + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + burn_ops.push(BlockstackOperationType::LeaderKeyRegister(leader_key_op)); + } + + // patch in reward set info + let recipients = get_nakamoto_next_recipients( + &tip, + &mut sortdb, + &mut stacks_node.chainstate, + &tenure_change_payload.previous_tenure_end, + &self.config.burnchain, + ) + .unwrap_or_else(|e| panic!("Failure fetching recipient set: {e:?}")); + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + + burn_ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit_op)); + + // prepare to mine + let miner_addr = self.miner.origin_address().unwrap(); + let miner_account = get_account(&mut stacks_node.chainstate, &sortdb, &miner_addr); + self.miner.set_nonce(miner_account.nonce); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + (burn_ops, tenure_change_payload, last_key) + } + + /// Make the VRF proof for this tenure. + /// Call after processing the block-commit + pub fn make_nakamoto_vrf_proof(&mut self, miner_key: LeaderKeyRegisterOp) -> VRFProof { + let sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let proof = self + .miner + .make_proof(&miner_key.public_key, &tip.sortition_hash) + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", miner_key.public_key)); + self.sortdb = Some(sortdb); + debug!( + "VRF proof made from {:?} over {}: {proof:?}", + miner_key.public_key, &tip.sortition_hash + ); + proof + } + + /// Produce and process a Nakamoto tenure, after processing the block-commit from + /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), + /// take the consensus hash, and feed it in here. + /// + /// Returns the blocks, their sizes, and runtime costs + pub fn make_nakamoto_tenure( + &mut self, + tenure_change: StacksTransaction, + coinbase: StacksTransaction, + timestamp: Option, + ) -> Result, ChainstateError> { + let cycle = self.get_reward_cycle(); + let mut signers = self.config.test_signers.clone().unwrap_or_default(); + signers.generate_aggregate_key(cycle); + + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks( + &mut stacks_node.chainstate, + &mut sortdb, + &mut self.miner, + &mut signers, + &tenure_change + .try_as_tenure_change() + .unwrap() + .tenure_consensus_hash + .clone(), + Some(tenure_change), + Some(coinbase), + &mut self.coord, + |_| {}, + |_, _, _, _| vec![], + |_| true, + self.mine_malleablized_blocks, + self.nakamoto_parent_tenure_opt.is_none(), + timestamp, + )?; + + let just_blocks = blocks + .clone() + .into_iter() + .map(|(block, _, _, _)| block) + .collect(); + + stacks_node.add_nakamoto_tenure_blocks(just_blocks); + + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .flat_map(|(_, _, _, malleablized)| malleablized) + .collect(); + + self.malleablized_blocks.append(&mut malleablized_blocks); + + let block_data = blocks + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + Ok(block_data) + } +} From 2376c98684e53a4ab0f487a83a88c0c2332e24cd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 23 Sep 2025 12:45:59 -0700 Subject: [PATCH 41/86] Add ConsensusTest boiler plate for appending a block Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 335 ++++++++++++++++++++ stackslib/src/chainstate/tests/mod.rs | 2 + stackslib/src/net/tests/mod.rs | 218 +++++++++++++ 3 files changed, 555 insertions(+) create mode 100644 stackslib/src/chainstate/tests/consensus.rs diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs new file mode 100644 index 0000000000..996078e9a6 --- /dev/null +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -0,0 +1,335 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashMap; + +use clarity::codec::StacksMessageCodec; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, TrieHash}; +use clarity::types::{Address, StacksEpochId}; +use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; +use clarity::util::secp256k1::MessageSignature; +use clarity::vm::costs::ExecutionCost; +use serde::{Deserialize, Serialize}; +use stacks_common::bitvec::BitVec; + +use crate::burnchains::PoxConstants; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::{ + StacksTransaction, TenureChangeCause, TransactionAuth, TransactionPayload, TransactionVersion, +}; +use crate::chainstate::tests::TestChainstate; +use crate::net::tests::NakamotoBootPlan; + +pub struct ConsensusTest<'a> { + pub chain: TestChainstate<'a>, + pub test_vector: ConsensusTestVector, +} + +impl ConsensusTest<'_> { + pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { + let privk = StacksPrivateKey::from_hex( + "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", + ) + .unwrap(); + + let initial_balances = test_vector + .initial_balances + .iter() + .map(|(addr, amount)| (StacksAddress::from_string(addr).unwrap().into(), *amount)) + .collect(); + let epoch_id = StacksEpochId::try_from(test_vector.epoch_id).unwrap(); + let chain = match epoch_id { + StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 + | StacksEpochId::Epoch32 + | StacksEpochId::Epoch33 => { + let mut chain = NakamotoBootPlan::new(test_name) + .with_pox_constants(10, 3) + .with_initial_balances(initial_balances) + .with_private_key(privk) + .boot_nakamoto_chainstate(None); + let (burn_ops, mut tenure_change, miner_key) = + chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); + let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); + + let blocks_and_sizes = + chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + chain + } + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => { + unimplemented!("Not bothering with pre nakamoto tests."); + } + }; + Self { chain, test_vector } + } + + /// Run a single test vector, validating consensus. + pub fn run(mut self) { + debug!("--------- Running test vector ---------"); + let txs: Vec<_> = self + .test_vector + .payloads + .iter() + .map(|payload_str| { + let payload: TransactionPayload = serde_json::from_str(payload_str).unwrap(); + StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&StacksPrivateKey::random()).unwrap(), + payload, + ) + }) + .collect(); + + let expected_state_index_root = + TrieHash::from_hex(&self.test_vector.expected_state_index_root).unwrap(); + + let (block, block_size) = self.construct_nakamoto_block(txs, expected_state_index_root); + let test_vector = self.test_vector.clone(); + + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let chain_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let pox_constants = PoxConstants::test_default(); + + let (mut chainstate_tx, clarity_instance) = + stacks_node.chainstate.chainstate_tx_begin().unwrap(); + + let mut burndb_conn = sortdb.index_handle_at_tip(); + + debug!("--------- Appending block {} ---------", block.header.signer_signature_hash(); "block" => ?block); + let result = NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut burndb_conn, + &chain_tip.consensus_hash, + &pox_constants, + &chain_tip, + &chain_tip.burn_header_hash, + chain_tip.burn_header_height, + chain_tip.burn_header_timestamp, + &block, + block_size.try_into().unwrap(), + block.header.burn_spent, + 1500, + &RewardSet::empty(), + false, + ); + + let mut mismatches = Vec::new(); + + match (&result, &test_vector.expected_result) { + (Ok((epoch_receipt, _, _, tx_events)), ExpectedResult::Success(expected_outputs)) => { + debug!("--------- Appended Block ---------"; + "epoch_receipt" => ?epoch_receipt, + "tx_events" => ?tx_events + ); + + let actual_results = ExpectedOutputs { + transaction_return_types: epoch_receipt + .tx_receipts + .iter() + .map(|r| serde_json::to_string(&r.result).unwrap()) + .collect(), + transaction_costs: epoch_receipt + .tx_receipts + .iter() + .map(|r| r.execution_cost.clone()) + .collect(), + total_block_cost: epoch_receipt.anchored_block_cost.clone(), + marf_hash: epoch_receipt.header.index_root.to_hex(), + }; + + if actual_results != *expected_outputs { + if actual_results.transaction_return_types + != expected_outputs.transaction_return_types + { + mismatches.push(format!( + "Tx return types mismatch: actual {:?}, expected {:?}", + actual_results.transaction_return_types, + expected_outputs.transaction_return_types + )); + } + if actual_results.transaction_costs != expected_outputs.transaction_costs { + mismatches.push(format!( + "Tx costs mismatch: actual {:?}, expected {:?}", + actual_results.transaction_costs, expected_outputs.transaction_costs + )); + } + if actual_results.total_block_cost != expected_outputs.total_block_cost { + mismatches.push(format!( + "Total block cost mismatch: actual {:?}, expected {:?}", + actual_results.total_block_cost, expected_outputs.total_block_cost + )); + } + if actual_results.marf_hash != expected_outputs.marf_hash { + mismatches.push(format!( + "MARF hash mismatch: actual {}, expected {}", + actual_results.marf_hash, expected_outputs.marf_hash + )); + } + } + } + (Ok(_), ExpectedResult::Failure(_)) => { + mismatches.push("Expected failure but got success".to_string()); + } + (Err(e), ExpectedResult::Failure(expected_err)) => { + debug!("--------- Block Errored: {e} ---------"); + let actual_err = e.to_string(); + if !actual_err.contains(expected_err) { + mismatches.push(format!( + "Error mismatch: actual '{actual_err}', expected contains '{expected_err}'" + )); + } + } + (Err(_), ExpectedResult::Success(_)) => { + mismatches.push("Expected success but got failure".to_string()); + } + } + assert!(mismatches.is_empty(), "Mismatches: {mismatches:?}"); + } + + /// Construct a NakamotoBlock from the test vector. + fn construct_nakamoto_block( + &self, + txs: Vec, + state_index_root: TrieHash, + ) -> (NakamotoBlock, usize) { + let chain_tip = NakamotoChainState::get_canonical_block_header( + self.chain.stacks_node.as_ref().unwrap().chainstate.db(), + self.chain.sortdb.as_ref().unwrap(), + ) + .unwrap() + .unwrap(); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: chain_tip.stacks_block_height + 1, + burn_spent: 17000, + consensus_hash: chain_tip.consensus_hash.clone(), + parent_block_id: chain_tip.index_block_hash(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), + state_index_root, + timestamp: 1, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs, + }; + + let tx_merkle_root = { + let txid_vecs: Vec<_> = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + block.header.tx_merkle_root = tx_merkle_root; + self.chain.miner.sign_nakamoto_block(&mut block); + let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); + signers.sign_nakamoto_block(&mut block, self.chain.get_reward_cycle()); + let block_len = block.serialize_to_vec().len(); + + (block, block_len) + } +} + +/// Test vector struct for `append_block` consensus testing. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsensusTestVector { + /// A hex stacks address and amount pair for populating initial balances + pub initial_balances: HashMap, + /// Desired epoch of chainstate + pub epoch_id: u32, + /// Transaction payloads to stuff into the block + pub payloads: Vec, + /// Expected state root trie hash + pub expected_state_index_root: String, + /// Expected result: success with outputs or failure with error + pub expected_result: ExpectedResult, +} + +/// Enum representing expected result: success with outputs or failure with error +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ExpectedResult { + Success(ExpectedOutputs), + // TODO: should match maybe on actual Error type? + Failure(String), +} + +/// Expected outputs for a successful block append +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedOutputs { + pub transaction_return_types: Vec, + pub transaction_costs: Vec, + pub total_block_cost: ExecutionCost, + pub marf_hash: String, +} + +fn default_test_vector() -> ConsensusTestVector { + let outputs = ExpectedOutputs { + transaction_return_types: vec![], + transaction_costs: vec![], + total_block_cost: ExecutionCost::ZERO, + marf_hash: "f86c9ceaf2a17a4d9e502af73b6f00f89c18e5b58be501b3840f707f7b372dea".into(), + }; + ConsensusTestVector { + initial_balances: HashMap::new(), + expected_state_index_root: + "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + payloads: vec![], + expected_result: ExpectedResult::Success(outputs), + } +} + +fn failing_test_vector() -> ConsensusTestVector { + ConsensusTestVector { + initial_balances: HashMap::new(), + expected_state_index_root: + "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + payloads: vec![], + expected_result: ExpectedResult::Failure("state root mismatch".to_string()), + } +} + +#[test] +fn test_append_empty_block() { + ConsensusTest::new(function_name!(), default_test_vector()).run() +} + +#[test] +fn test_append_state_index_root_mismatch() { + ConsensusTest::new(function_name!(), failing_test_vector()).run() +} diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index ce38b60bad..c16c1201cf 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -12,6 +12,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod consensus; + use std::fs; use clarity::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 7762c4b68b..2857fba8f7 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -61,6 +61,7 @@ use crate::chainstate::stacks::{ TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, TransactionVersion, }; +use crate::chainstate::tests::{TestChainstate, TestChainstateConfig}; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::net::relay::Relayer; @@ -348,6 +349,223 @@ impl NakamotoBootPlan { } } + /// Make a chsintate and transition it into the Nakamoto epoch. + /// The node needs to be stacking; otherwise, Nakamoto won't activate. + pub fn boot_nakamoto_chainstate( + mut self, + observer: Option<&TestEventObserver>, + ) -> TestChainstate<'_> { + let mut chainstate_config = TestChainstateConfig::new(&self.test_name); + chainstate_config.txindex = self.txindex; + chainstate_config.network_id = self.network_id; + + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&self.private_key)], + ) + .unwrap(); + + // reward cycles are 5 blocks long + // first 25 blocks are boot-up + // reward cycle 6 instantiates pox-3 + // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + chainstate_config.epochs = Some(StacksEpoch::unit_test_3_0_only( + (self.pox_constants.pox_4_activation_height + + self.pox_constants.reward_cycle_length + + 1) + .into(), + )); + chainstate_config.initial_balances = vec![]; + if self.add_default_balance { + chainstate_config + .initial_balances + .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); + } + chainstate_config + .initial_balances + .append(&mut self.initial_balances.clone()); + + // Create some balances for test Stackers + // They need their stacking amount + enough to pay fees + let fee_payment_balance = 10_000; + let stacker_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), + u64::try_from(test_stacker.amount).expect("Stacking amount too large"), + ) + }); + let signer_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.signer_private_key)), + fee_payment_balance, + ) + }); + + chainstate_config.initial_balances.extend(stacker_balances); + chainstate_config.initial_balances.extend(signer_balances); + chainstate_config.test_signers = Some(self.test_signers.clone()); + chainstate_config.test_stackers = Some(self.test_stackers.clone()); + chainstate_config.burnchain.pox_constants = self.pox_constants.clone(); + let mut chain = TestChainstate::new_with_observer(chainstate_config.clone(), observer); + + chain.mine_malleablized_blocks = self.malleablized_blocks; + + self.advance_to_nakamoto_chainstate(&mut chain); + chain + } + + /// Bring a TestPeer into the Nakamoto Epoch + fn advance_to_nakamoto_chainstate(&mut self, chain: &mut TestChainstate) { + let mut chain_nonce = 0; + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); + + let mut sortition_height = chain.get_burn_block_height(); + debug!("\n\n======================"); + debug!( + "PoxConstants = {:#?}", + &chain.config.burnchain.pox_constants + ); + debug!("tip = {sortition_height}"); + debug!("========================\n\n"); + + let epoch_25_height = chain + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height; + + let epoch_30_height = chain + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .unwrap() + .start_height; + + // advance to just past pox-4 instantiation + let mut blocks_produced = false; + while sortition_height <= epoch_25_height { + chain.tenure_with_txs(&[], &mut chain_nonce); + sortition_height = chain.get_burn_block_height(); + blocks_produced = true; + } + + // need to produce at least 1 block before making pox-4 lockups: + // the way `burn-block-height` constant works in Epoch 2.5 is such + // that if its the first block produced, this will be 0 which will + // prevent the lockups from being valid. + if !blocks_produced { + chain.tenure_with_txs(&[], &mut chain_nonce); + sortition_height = chain.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Make PoX-4 lockups"); + debug!("========================\n\n"); + + let reward_cycle = chain + .config + .burnchain + .block_height_to_reward_cycle(sortition_height) + .unwrap(); + + // Make all the test Stackers stack + let stack_txs: Vec<_> = chain + .config + .test_stackers + .clone() + .unwrap_or_default() + .iter() + .map(|test_stacker| { + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &test_stacker.signer_private_key, + reward_cycle.into(), + &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, + chain.config.network_id, + 12, + max_amount, + 1, + ) + .unwrap() + .to_rsv(); + make_pox_4_lockup_chain_id( + &test_stacker.stacker_private_key, + 0, + test_stacker.amount, + &pox_addr, + 12, + &StacksPublicKey::from_private(&test_stacker.signer_private_key), + sortition_height + 1, + Some(signature), + max_amount, + 1, + chain.config.network_id, + ) + }) + .collect(); + + let mut stacks_block = chain.tenure_with_txs(&stack_txs, &mut chain_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(stacks_block, stacks_tip); + + debug!("\n\n======================"); + debug!("Advance to the Prepare Phase"); + debug!("========================\n\n"); + while !chain.config.burnchain.is_in_prepare_phase(sortition_height) { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + stacks_block = chain.tenure_with_txs(&[], &mut chain_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = chain.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Advance to Epoch 3.0"); + debug!("========================\n\n"); + + // advance to the start of epoch 3.0 + while sortition_height < epoch_30_height - 1 { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + chain.tenure_with_txs(&[], &mut chain_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = chain.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Welcome to Nakamoto!"); + debug!("========================\n\n"); + } + /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. fn boot_nakamoto_peers( From 9fd672848cd6c3464677589ea49cbb856dadfcf7 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 24 Sep 2025 16:46:43 +0200 Subject: [PATCH 42/86] bugfix: enforce bitcoin rpc client to be created only for miner node --- .../burnchains/bitcoin_regtest_controller.rs | 76 ++++++++++++++----- 1 file changed, 56 insertions(+), 20 deletions(-) diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 5a54d6121c..b1dfcfaabc 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -93,7 +93,7 @@ pub struct BitcoinRegtestController { burnchain_config: Option, ongoing_block_commit: Option, should_keep_running: Option>, - rpc_client: BitcoinRpcClient, + rpc_client: Option, } #[derive(Clone)] @@ -371,8 +371,7 @@ impl BitcoinRegtestController { should_keep_running: should_keep_running.clone(), }; - let rpc_client = BitcoinRpcClient::from_stx_config(&config) - .expect("unable to instantiate the RPC client!"); + let rpc_client = Self::try_create_rpc_client(&config); Self { use_coordinator: coordinator_channel, @@ -421,8 +420,7 @@ impl BitcoinRegtestController { should_keep_running: None, }; - let rpc_client = BitcoinRpcClient::from_stx_config(&config) - .expect("unable to instantiate the RPC client!"); + let rpc_client = Self::try_create_rpc_client(&config); Self { use_coordinator: None, @@ -477,6 +475,36 @@ impl BitcoinRegtestController { } } + /// Attempt to create a new [`BitcoinRpcClient`] from the given [`Config`]. + /// + /// If the provided config indicates that the node is a **miner**, + /// tries to instantiate it or **panics** otherwise. + /// If the node is **not** a miner, returns None (e.g. follower node). + fn try_create_rpc_client(config: &Config) -> Option { + if config.node.miner { + Some( + BitcoinRpcClient::from_stx_config(&config) + .expect("unable to instantiate the RPC client!"), + ) + } else { + None + } + } + + /// Attempt to get a reference to the underlying [`BitcoinRpcClient`]. + /// + /// This function will panic if the RPC client has not been configured + /// (i.e. [`Self::try_create_rpc_client`] returned `None` during initialization), + /// but an attempt is made to use it anyway. + /// + /// In practice, this means the node is expected to act as a miner, + /// yet no [`BitcoinRpcClient`] was created or properly configured. + fn try_get_rpc_client(&self) -> &BitcoinRpcClient { + self.rpc_client + .as_ref() + .expect("BUG: BitcoinRpcClient is required, but it has not been configured properly!") + } + /// Helium (devnet) blocks receiver. Returns the new burnchain tip. fn receive_blocks_helium(&mut self) -> BurnchainTip { let mut burnchain = self.get_burnchain(); @@ -686,7 +714,7 @@ impl BitcoinRegtestController { /// Retrieve all loaded wallets. pub fn list_wallets(&self) -> BitcoinRegtestControllerResult> { - Ok(self.rpc_client.list_wallets()?) + Ok(self.try_get_rpc_client().list_wallets()?) } /// Checks if the config-supplied wallet exists. @@ -695,7 +723,8 @@ impl BitcoinRegtestController { let wallets = self.list_wallets()?; let wallet = self.get_wallet_name(); if !wallets.contains(wallet) { - self.rpc_client.create_wallet(wallet, Some(true))? + self.try_get_rpc_client() + .create_wallet(wallet, Some(true))? } Ok(()) } @@ -1861,7 +1890,7 @@ impl BitcoinRegtestController { const UNCAPPED_FEE: f64 = 0.0; const MAX_BURN_AMOUNT: u64 = 1_000_000; - self.rpc_client + self.try_get_rpc_client() .send_raw_transaction(tx, Some(UNCAPPED_FEE), Some(MAX_BURN_AMOUNT)) .map(|txid| { debug!("Transaction {txid} sent successfully"); @@ -1933,7 +1962,9 @@ impl BitcoinRegtestController { .expect("FATAL: invalid public key bytes"); let address = self.get_miner_address(StacksEpochId::Epoch21, &public_key); - let result = self.rpc_client.generate_to_address(num_blocks, &address); + let result = self + .try_get_rpc_client() + .generate_to_address(num_blocks, &address); /* Temporary: not using `BitcoinRpcClientResultExt::ok_or_log_panic` (test code related), because we need this logic available outside `#[cfg(test)]` due to Helium network. @@ -1966,7 +1997,7 @@ impl BitcoinRegtestController { .expect("FATAL: invalid public key bytes"); let address = self.get_miner_address(StacksEpochId::Epoch21, &public_key); - self.rpc_client + self.try_get_rpc_client() .generate_block(&address, &[]) .ok_or_log_panic("generating block") } @@ -1975,7 +2006,7 @@ impl BitcoinRegtestController { #[cfg(test)] pub fn invalidate_block(&self, block: &BurnchainHeaderHash) { info!("Invalidating block {block}"); - self.rpc_client + self.try_get_rpc_client() .invalidate_block(block) .ok_or_log_panic("invalidate block") } @@ -1983,7 +2014,7 @@ impl BitcoinRegtestController { /// Retrieve the hash (as a [`BurnchainHeaderHash`]) of the block at the given height. #[cfg(test)] pub fn get_block_hash(&self, height: u64) -> BurnchainHeaderHash { - self.rpc_client + self.try_get_rpc_client() .get_block_hash(height) .unwrap_or_log_panic("retrieve block") } @@ -2041,7 +2072,7 @@ impl BitcoinRegtestController { /// Retrieves a raw [`Transaction`] by its [`Txid`] #[cfg(test)] pub fn get_raw_transaction(&self, txid: &Txid) -> Transaction { - self.rpc_client + self.try_get_rpc_client() .get_raw_transaction(txid) .unwrap_or_log_panic("retrieve raw tx") } @@ -2069,7 +2100,7 @@ impl BitcoinRegtestController { "Generate to address '{address}' for public key '{}'", &pks[0].to_hex() ); - self.rpc_client + self.try_get_rpc_client() .generate_to_address(num_blocks, &address) .ok_or_log_panic("generating block"); return; @@ -2087,7 +2118,7 @@ impl BitcoinRegtestController { &pk.to_hex(), ); } - self.rpc_client + self.try_get_rpc_client() .generate_to_address(1, &address) .ok_or_log_panic("generating block"); } @@ -2105,7 +2136,7 @@ impl BitcoinRegtestController { /// * `false` if the transaction is unconfirmed or could not be found. pub fn is_transaction_confirmed(&self, txid: &Txid) -> bool { match self - .rpc_client + .try_get_rpc_client() .get_transaction(self.get_wallet_name(), txid) { Ok(info) => info.confirmations > 0, @@ -2158,7 +2189,7 @@ impl BitcoinRegtestController { ); let descriptor = format!("addr({address})"); - let info = self.rpc_client.get_descriptor_info(&descriptor)?; + let info = self.try_get_rpc_client().get_descriptor_info(&descriptor)?; let descr_req = ImportDescriptorsRequest { descriptor: format!("addr({address})#{}", info.checksum), @@ -2166,7 +2197,7 @@ impl BitcoinRegtestController { internal: Some(true), }; - self.rpc_client + self.try_get_rpc_client() .import_descriptors(self.get_wallet_name(), &[&descr_req])?; } Ok(()) @@ -2227,11 +2258,11 @@ impl BitcoinRegtestController { utxos_to_exclude: &Option, block_height: u64, ) -> BitcoinRpcClientResult { - let bhh = self.rpc_client.get_block_hash(block_height)?; + let bhh = self.try_get_rpc_client().get_block_hash(block_height)?; const MIN_CONFIRMATIONS: u64 = 0; const MAX_CONFIRMATIONS: u64 = 9_999_999; - let unspents = self.rpc_client.list_unspent( + let unspents = self.try_get_rpc_client().list_unspent( &self.get_wallet_name(), Some(MIN_CONFIRMATIONS), Some(MAX_CONFIRMATIONS), @@ -2446,6 +2477,7 @@ mod tests { pub fn create_config() -> Config { let mut config = Config::default(); + config.node.miner = true; config.burnchain.magic_bytes = "T3".as_bytes().into(); config.burnchain.username = Some(String::from("user")); config.burnchain.password = Some(String::from("12345")); @@ -2964,6 +2996,10 @@ mod tests { #[test] #[ignore] fn test_create_wallet_from_custom_name() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + let mut config = utils::create_config(); config.burnchain.wallet_name = String::from("mywallet"); From b7492a222a543530049a2444e9511f98e1aeb9d5 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 08:04:07 +0200 Subject: [PATCH 43/86] test: add unit-tests to nail btc rpc client creation behaviour based on node type --- .../burnchains/bitcoin_regtest_controller.rs | 168 ++++++++++++++---- 1 file changed, 133 insertions(+), 35 deletions(-) diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index b1dfcfaabc..47ea1b00c0 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -93,6 +93,9 @@ pub struct BitcoinRegtestController { burnchain_config: Option, ongoing_block_commit: Option, should_keep_running: Option>, + /// Optional Bitcoin RPC client used to interact with a `bitcoind` node. + /// - For **miner** node this field must be always `Some`. + /// - For **other** node (e.g. follower node), this field is `None`. rpc_client: Option, } @@ -2451,6 +2454,7 @@ mod tests { use std::env::{self, temp_dir}; use std::fs::File; use std::io::Write; + use std::panic::{self, AssertUnwindSafe}; use stacks::burnchains::BurnchainSigner; use stacks::config::DEFAULT_SATS_PER_VB; @@ -2461,7 +2465,9 @@ mod tests { use super::*; use crate::burnchains::bitcoin::core_controller::BitcoinCoreController; - use crate::burnchains::bitcoin_regtest_controller::tests::utils::to_address_legacy; + use crate::burnchains::bitcoin_regtest_controller::tests::utils::{ + create_follower_config, create_miner_config, to_address_legacy, + }; use crate::Keychain; mod utils { @@ -2475,7 +2481,7 @@ mod tests { use crate::burnchains::bitcoin::core_controller::BURNCHAIN_CONFIG_PEER_PORT_DISABLED; use crate::util::get_epoch_time_nanos; - pub fn create_config() -> Config { + pub fn create_miner_config() -> Config { let mut config = Config::default(); config.node.miner = true; config.burnchain.magic_bytes = "T3".as_bytes().into(); @@ -2715,6 +2721,18 @@ mod tests { burn_header_hash: BurnchainHeaderHash([0u8; 32]), } } + + pub fn create_follower_config() -> Config { + let mut config = Config::default(); + config.node.miner = false; + config.burnchain.magic_bytes = "T3".as_bytes().into(); + config.burnchain.username = None; + config.burnchain.password = None; + config.burnchain.peer_host = String::from("127.0.0.1"); + config.burnchain.peer_port = 8333; + config.node.working_dir = format!("/tmp/follower"); + config + } } #[test] @@ -2770,7 +2788,7 @@ mod tests { ]; // test serialize_tx() - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btc_controller = BitcoinRegtestController::new(config, None); let mut utxo_set = UTXOSet { @@ -2889,7 +2907,7 @@ mod tests { #[test] fn test_to_epoch_aware_pubkey() { - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); let pubkey = utils::create_miner1_pubkey(); config.miner.segwit = false; @@ -2927,7 +2945,7 @@ mod tests { #[test] fn test_get_miner_address() { - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); let pub_key = utils::create_miner1_pubkey(); config.miner.segwit = false; @@ -2965,6 +2983,86 @@ mod tests { ); } + #[test] + fn test_instantiate_with_burnchain_on_follower_node_ok() { + let config = create_follower_config(); + + let btc_controller = BitcoinRegtestController::with_burnchain(config, None, None, None); + + let result = panic::catch_unwind(AssertUnwindSafe(|| { + _ = btc_controller.try_get_rpc_client(); + })); + assert!( + result.is_err(), + "Invoking any Bitcoin RPC related method should panic." + ); + } + + #[test] + fn test_instantiate_with_burnchain_on_miner_node_ok() { + let config = create_miner_config(); + + let btc_controller = BitcoinRegtestController::with_burnchain(config, None, None, None); + + let _ = btc_controller.try_get_rpc_client(); + assert!(true, "Invoking any Bitcoin RPC related method should work."); + } + + #[test] + fn test_instantiate_with_burnchain_on_miner_node_failure() { + let mut config = create_miner_config(); + config.burnchain.username = None; + config.burnchain.password = None; + + let result = panic::catch_unwind(|| { + _ = BitcoinRegtestController::with_burnchain(config, None, None, None); + }); + assert!( + result.is_err(), + "Bitcoin RPC credentials are mandatory for miner node." + ); + } + + #[test] + fn test_instantiate_new_dummy_on_follower_node_ok() { + let config = create_follower_config(); + + let btc_controller = BitcoinRegtestController::new_dummy(config); + + let result = panic::catch_unwind(AssertUnwindSafe(|| { + _ = btc_controller.try_get_rpc_client(); + })); + assert!( + result.is_err(), + "Invoking any Bitcoin RPC related method should panic." + ); + } + + #[test] + fn test_instantiate_new_dummy_on_miner_node_ok() { + let config = create_miner_config(); + + let btc_controller = BitcoinRegtestController::new_dummy(config); + + let _ = btc_controller.try_get_rpc_client(); + assert!(true, "Invoking any Bitcoin RPC related method should work."); + } + + #[test] + fn test_instantiate_new_dummy_on_miner_node_failure() { + let mut config = create_miner_config(); + config.burnchain.username = None; + config.burnchain.password = None; + + let result = panic::catch_unwind(|| { + _ = BitcoinRegtestController::new_dummy(config); + }); + assert!( + result.is_err(), + "Bitcoin RPC credentials are mandatory for miner node." + ); + } + #[test] #[ignore] fn test_create_wallet_from_default_empty_name() { @@ -2972,7 +3070,7 @@ mod tests { return; } - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); btcd_controller @@ -3000,7 +3098,7 @@ mod tests { return; } - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.wallet_name = String::from("mywallet"); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3028,7 +3126,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3056,7 +3154,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3093,7 +3191,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); config.burnchain.max_unspent_utxos = Some(10); @@ -3121,7 +3219,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3162,7 +3260,7 @@ mod tests { let miner1_pubkey = utils::create_miner1_pubkey(); let miner2_pubkey = utils::create_miner2_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner1_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3200,7 +3298,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3248,7 +3346,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3279,7 +3377,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3307,7 +3405,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3343,7 +3441,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3373,7 +3471,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); btcd_controller @@ -3402,7 +3500,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); btcd_controller @@ -3436,7 +3534,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.miner.segwit = true; let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3472,7 +3570,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3531,7 +3629,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3580,7 +3678,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3631,7 +3729,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3714,7 +3812,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3787,7 +3885,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3832,7 +3930,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3882,7 +3980,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3932,7 +4030,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3968,7 +4066,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4008,7 +4106,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4053,7 +4151,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4100,7 +4198,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4133,7 +4231,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4174,7 +4272,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); From 07e44fbc1bd861d10c2ffc4858810f61d4650983 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 08:06:57 +0200 Subject: [PATCH 44/86] crc: rename try_get_rpc_client to get_rpc_client --- .../burnchains/bitcoin_regtest_controller.rs | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 47ea1b00c0..b19a785066 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -502,7 +502,7 @@ impl BitcoinRegtestController { /// /// In practice, this means the node is expected to act as a miner, /// yet no [`BitcoinRpcClient`] was created or properly configured. - fn try_get_rpc_client(&self) -> &BitcoinRpcClient { + fn get_rpc_client(&self) -> &BitcoinRpcClient { self.rpc_client .as_ref() .expect("BUG: BitcoinRpcClient is required, but it has not been configured properly!") @@ -717,7 +717,7 @@ impl BitcoinRegtestController { /// Retrieve all loaded wallets. pub fn list_wallets(&self) -> BitcoinRegtestControllerResult> { - Ok(self.try_get_rpc_client().list_wallets()?) + Ok(self.get_rpc_client().list_wallets()?) } /// Checks if the config-supplied wallet exists. @@ -726,7 +726,7 @@ impl BitcoinRegtestController { let wallets = self.list_wallets()?; let wallet = self.get_wallet_name(); if !wallets.contains(wallet) { - self.try_get_rpc_client() + self.get_rpc_client() .create_wallet(wallet, Some(true))? } Ok(()) @@ -1893,7 +1893,7 @@ impl BitcoinRegtestController { const UNCAPPED_FEE: f64 = 0.0; const MAX_BURN_AMOUNT: u64 = 1_000_000; - self.try_get_rpc_client() + self.get_rpc_client() .send_raw_transaction(tx, Some(UNCAPPED_FEE), Some(MAX_BURN_AMOUNT)) .map(|txid| { debug!("Transaction {txid} sent successfully"); @@ -1966,7 +1966,7 @@ impl BitcoinRegtestController { let address = self.get_miner_address(StacksEpochId::Epoch21, &public_key); let result = self - .try_get_rpc_client() + .get_rpc_client() .generate_to_address(num_blocks, &address); /* Temporary: not using `BitcoinRpcClientResultExt::ok_or_log_panic` (test code related), @@ -2000,7 +2000,7 @@ impl BitcoinRegtestController { .expect("FATAL: invalid public key bytes"); let address = self.get_miner_address(StacksEpochId::Epoch21, &public_key); - self.try_get_rpc_client() + self.get_rpc_client() .generate_block(&address, &[]) .ok_or_log_panic("generating block") } @@ -2009,7 +2009,7 @@ impl BitcoinRegtestController { #[cfg(test)] pub fn invalidate_block(&self, block: &BurnchainHeaderHash) { info!("Invalidating block {block}"); - self.try_get_rpc_client() + self.get_rpc_client() .invalidate_block(block) .ok_or_log_panic("invalidate block") } @@ -2017,7 +2017,7 @@ impl BitcoinRegtestController { /// Retrieve the hash (as a [`BurnchainHeaderHash`]) of the block at the given height. #[cfg(test)] pub fn get_block_hash(&self, height: u64) -> BurnchainHeaderHash { - self.try_get_rpc_client() + self.get_rpc_client() .get_block_hash(height) .unwrap_or_log_panic("retrieve block") } @@ -2075,7 +2075,7 @@ impl BitcoinRegtestController { /// Retrieves a raw [`Transaction`] by its [`Txid`] #[cfg(test)] pub fn get_raw_transaction(&self, txid: &Txid) -> Transaction { - self.try_get_rpc_client() + self.get_rpc_client() .get_raw_transaction(txid) .unwrap_or_log_panic("retrieve raw tx") } @@ -2103,7 +2103,7 @@ impl BitcoinRegtestController { "Generate to address '{address}' for public key '{}'", &pks[0].to_hex() ); - self.try_get_rpc_client() + self.get_rpc_client() .generate_to_address(num_blocks, &address) .ok_or_log_panic("generating block"); return; @@ -2121,7 +2121,7 @@ impl BitcoinRegtestController { &pk.to_hex(), ); } - self.try_get_rpc_client() + self.get_rpc_client() .generate_to_address(1, &address) .ok_or_log_panic("generating block"); } @@ -2139,7 +2139,7 @@ impl BitcoinRegtestController { /// * `false` if the transaction is unconfirmed or could not be found. pub fn is_transaction_confirmed(&self, txid: &Txid) -> bool { match self - .try_get_rpc_client() + .get_rpc_client() .get_transaction(self.get_wallet_name(), txid) { Ok(info) => info.confirmations > 0, @@ -2192,7 +2192,7 @@ impl BitcoinRegtestController { ); let descriptor = format!("addr({address})"); - let info = self.try_get_rpc_client().get_descriptor_info(&descriptor)?; + let info = self.get_rpc_client().get_descriptor_info(&descriptor)?; let descr_req = ImportDescriptorsRequest { descriptor: format!("addr({address})#{}", info.checksum), @@ -2200,7 +2200,7 @@ impl BitcoinRegtestController { internal: Some(true), }; - self.try_get_rpc_client() + self.get_rpc_client() .import_descriptors(self.get_wallet_name(), &[&descr_req])?; } Ok(()) @@ -2261,11 +2261,11 @@ impl BitcoinRegtestController { utxos_to_exclude: &Option, block_height: u64, ) -> BitcoinRpcClientResult { - let bhh = self.try_get_rpc_client().get_block_hash(block_height)?; + let bhh = self.get_rpc_client().get_block_hash(block_height)?; const MIN_CONFIRMATIONS: u64 = 0; const MAX_CONFIRMATIONS: u64 = 9_999_999; - let unspents = self.try_get_rpc_client().list_unspent( + let unspents = self.get_rpc_client().list_unspent( &self.get_wallet_name(), Some(MIN_CONFIRMATIONS), Some(MAX_CONFIRMATIONS), @@ -2990,7 +2990,7 @@ mod tests { let btc_controller = BitcoinRegtestController::with_burnchain(config, None, None, None); let result = panic::catch_unwind(AssertUnwindSafe(|| { - _ = btc_controller.try_get_rpc_client(); + _ = btc_controller.get_rpc_client(); })); assert!( result.is_err(), @@ -3004,7 +3004,7 @@ mod tests { let btc_controller = BitcoinRegtestController::with_burnchain(config, None, None, None); - let _ = btc_controller.try_get_rpc_client(); + let _ = btc_controller.get_rpc_client(); assert!(true, "Invoking any Bitcoin RPC related method should work."); } @@ -3030,7 +3030,7 @@ mod tests { let btc_controller = BitcoinRegtestController::new_dummy(config); let result = panic::catch_unwind(AssertUnwindSafe(|| { - _ = btc_controller.try_get_rpc_client(); + _ = btc_controller.get_rpc_client(); })); assert!( result.is_err(), @@ -3044,7 +3044,7 @@ mod tests { let btc_controller = BitcoinRegtestController::new_dummy(config); - let _ = btc_controller.try_get_rpc_client(); + let _ = btc_controller.get_rpc_client(); assert!(true, "Invoking any Bitcoin RPC related method should work."); } From bb536022e224c2fb77d2a43baa40183868b56776 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 08:10:36 +0200 Subject: [PATCH 45/86] crc: rename try_create_rpc_client to create_rpc_client_unchecked --- stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index b19a785066..9a73f6c7e6 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -374,7 +374,7 @@ impl BitcoinRegtestController { should_keep_running: should_keep_running.clone(), }; - let rpc_client = Self::try_create_rpc_client(&config); + let rpc_client = Self::create_rpc_client_unchecked(&config); Self { use_coordinator: coordinator_channel, @@ -423,7 +423,7 @@ impl BitcoinRegtestController { should_keep_running: None, }; - let rpc_client = Self::try_create_rpc_client(&config); + let rpc_client = Self::create_rpc_client_unchecked(&config); Self { use_coordinator: None, @@ -483,7 +483,7 @@ impl BitcoinRegtestController { /// If the provided config indicates that the node is a **miner**, /// tries to instantiate it or **panics** otherwise. /// If the node is **not** a miner, returns None (e.g. follower node). - fn try_create_rpc_client(config: &Config) -> Option { + fn create_rpc_client_unchecked(config: &Config) -> Option { if config.node.miner { Some( BitcoinRpcClient::from_stx_config(&config) @@ -497,7 +497,7 @@ impl BitcoinRegtestController { /// Attempt to get a reference to the underlying [`BitcoinRpcClient`]. /// /// This function will panic if the RPC client has not been configured - /// (i.e. [`Self::try_create_rpc_client`] returned `None` during initialization), + /// (i.e. [`Self::create_rpc_client_unchecked`] returned `None` during initialization), /// but an attempt is made to use it anyway. /// /// In practice, this means the node is expected to act as a miner, From a4310f148b7972015af124fe6144d4bb9d5518c6 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 08:42:37 +0200 Subject: [PATCH 46/86] crc: makes create_rpc_client_unchecked more idiomatic --- .../src/burnchains/bitcoin_regtest_controller.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 9a73f6c7e6..01131b752c 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -484,14 +484,10 @@ impl BitcoinRegtestController { /// tries to instantiate it or **panics** otherwise. /// If the node is **not** a miner, returns None (e.g. follower node). fn create_rpc_client_unchecked(config: &Config) -> Option { - if config.node.miner { - Some( - BitcoinRpcClient::from_stx_config(&config) - .expect("unable to instantiate the RPC client!"), - ) - } else { - None - } + config.node.miner.then_some( + BitcoinRpcClient::from_stx_config(&config) + .expect("unable to instantiate the RPC client for miner node!"), + ) } /// Attempt to get a reference to the underlying [`BitcoinRpcClient`]. @@ -726,8 +722,7 @@ impl BitcoinRegtestController { let wallets = self.list_wallets()?; let wallet = self.get_wallet_name(); if !wallets.contains(wallet) { - self.get_rpc_client() - .create_wallet(wallet, Some(true))? + self.get_rpc_client().create_wallet(wallet, Some(true))? } Ok(()) } From ce3c88a1044c045194fccb4ba8b264819f5fc921 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 08:49:09 +0200 Subject: [PATCH 47/86] crc: improve BitcoinRpcClient::from_stx_config documentation --- stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs b/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs index 58eed7e3d0..bef521be80 100644 --- a/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs +++ b/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs @@ -363,6 +363,10 @@ pub type BitcoinRpcClientResult = Result; impl BitcoinRpcClient { /// Create a [`BitcoinRpcClient`] from Stacks Configuration, mainly using [`stacks::config::BurnchainConfig`] + /// + /// # Notes + /// `username` and `password` configuration are mandatory (`bitcoind` requires authentication for rpc calls), + /// so a [`BitcoinRpcClientError::MissingCredentials`] is returned otherwise, pub fn from_stx_config(config: &Config) -> BitcoinRpcClientResult { let host = config.burnchain.peer_host.clone(); let port = config.burnchain.rpc_port; From 51ee5510baf9e093731a72350b96193fab5e9f5d Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 08:54:05 +0200 Subject: [PATCH 48/86] test: fix test antientropy_integration_test that was using a follower node to try to generate bitcoin blocks --- stacks-node/src/tests/neon_integrations.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index c2760a595f..b99b546fd2 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -6294,9 +6294,10 @@ fn antientropy_integration_test() { let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); let target_height = 3 + (3 * burnchain_config.pox_constants.reward_cycle_length); + let conf_bootstrap_node_threaded = conf_bootstrap_node.clone(); let bootstrap_node_thread = thread::spawn(move || { let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), + conf_bootstrap_node_threaded.clone(), None, Some(burnchain_config.clone()), None, @@ -6306,7 +6307,7 @@ fn antientropy_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node_threaded.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -6397,7 +6398,7 @@ fn antientropy_integration_test() { ); let btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_follower_node.clone(), + conf_bootstrap_node.clone(), None, Some(burnchain_config), None, From bafbfdef080687d2eecff2f3380ded6bb4284189 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 09:27:51 +0200 Subject: [PATCH 49/86] crc: makes create_rpc_client_unchecked idiomatic --- stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 01131b752c..506a75d2d6 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -484,10 +484,10 @@ impl BitcoinRegtestController { /// tries to instantiate it or **panics** otherwise. /// If the node is **not** a miner, returns None (e.g. follower node). fn create_rpc_client_unchecked(config: &Config) -> Option { - config.node.miner.then_some( + config.node.miner.then(|| { BitcoinRpcClient::from_stx_config(&config) - .expect("unable to instantiate the RPC client for miner node!"), - ) + .expect("unable to instantiate the RPC client for miner node!") + }) } /// Attempt to get a reference to the underlying [`BitcoinRpcClient`]. From 246510cf1b39bc396a883a7d3c2f9f88d1b4a81d Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 25 Sep 2025 10:21:55 +0200 Subject: [PATCH 50/86] crc: makes try_from code reuse architecture agnostic, #6467 --- clarity-types/src/types/signatures.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index c30e5b046b..8ab6f96fc6 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -163,7 +163,7 @@ impl From for u32 { impl TryFrom for BufferLength { type Error = CheckErrors; fn try_from(data: u32) -> Result { - Self::try_from(data as usize) + Self::try_from(data as i128) } } @@ -244,7 +244,7 @@ impl From for u32 { impl TryFrom for StringUTF8Length { type Error = CheckErrors; fn try_from(data: u32) -> Result { - Self::try_from(data as usize) + Self::try_from(data as i128) } } From 8b455aada10659a6038d10924f2a6f6248d46cd2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Sep 2025 12:08:41 -0700 Subject: [PATCH 51/86] Add a test mismatch structures Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 350 +++++++++++++------- 1 file changed, 228 insertions(+), 122 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 996078e9a6..8d115cf8c9 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -20,24 +20,226 @@ use clarity::types::{Address, StacksEpochId}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use clarity::vm::costs::ExecutionCost; +use clarity::vm::events::StacksTransactionEvent; +use clarity::vm::Value as ClarityValue; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; use crate::burnchains::PoxConstants; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; +use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::{ - StacksTransaction, TenureChangeCause, TransactionAuth, TransactionPayload, TransactionVersion, + Error as ChainstateError, StacksTransaction, TenureChangeCause, TransactionAuth, + TransactionPayload, TransactionVersion, }; use crate::chainstate::tests::TestChainstate; +use crate::clarity_vm::clarity::PreCommitClarityBlock; use crate::net::tests::NakamotoBootPlan; +/// Represents the expected output of a transaction in a test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedTransactionOutput { + /// The expected return value of the transaction. + pub return_type: ClarityValue, + /// The expected execution cost of the transaction. + pub cost: ExecutionCost, +} + +/// Represents the expected outputs for a block's transactions. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedOutputs { + /// The expected outputs for each transaction, in input order. + pub transactions: Vec, + /// The total execution cost of the block. + pub total_block_cost: ExecutionCost, +} + +/// Represents the expected result of a consensus test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ExpectedResult { + /// The test should succeed with the specified outputs. + Success(ExpectedOutputs), + /// The test should fail with an error containing the specified string. + Failure(String), +} + +/// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsensusTestVector { + /// Initial balances for Stacks addresses during chainstate instantiation. + pub initial_balances: HashMap, + /// Hex representation of the MARF hash for block construction. + pub marf_hash: String, + /// The epoch ID for the test environment. + pub epoch_id: u32, + /// Transaction payloads to include in the block, as serialized strings. + pub payloads: Vec, + /// The expected result after appending the constructed block. + pub expected_result: ExpectedResult, +} + +/// Tracks mismatches between actual and expected transaction results. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TransactionMismatch { + /// The index of the transaction with mismatches. + pub index: u32, + /// Mismatch between actual and expected return types, if any. + pub return_type: Option<(ClarityValue, ClarityValue)>, + /// Mismatch between actual and expected execution costs, if any. + pub cost: Option<(ExecutionCost, ExecutionCost)>, +} + +impl TransactionMismatch { + /// Creates a new `TransactionMismatch` for the given transaction index. + fn new(index: u32) -> Self { + Self { + index, + return_type: None, + cost: None, + } + } + + /// Adds a return type mismatch to the transaction. + fn with_return_type_mismatch(mut self, actual: ClarityValue, expected: ClarityValue) -> Self { + self.return_type = Some((actual, expected)); + self + } + + /// Adds an execution cost mismatch to the transaction. + fn with_cost_mismatch(mut self, actual: ExecutionCost, expected: ExecutionCost) -> Self { + self.cost = Some((actual, expected)); + self + } + + /// Returns true if no mismatches are recorded. + fn is_empty(&self) -> bool { + self.return_type.is_none() && self.cost.is_none() + } +} + +/// Aggregates all mismatches between actual and expected test results. +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] +pub struct ConsensusMismatch { + /// Mismatches for individual transactions. + pub transactions: Vec, + /// Mismatch between actual and expected total block costs, if any. + pub total_block_cost: Option<(ExecutionCost, ExecutionCost)>, + /// Mismatch between actual and expected error messages, if any. + pub error: Option<(String, String)>, +} + +impl ConsensusMismatch { + /// Creates a `ConsensusMismatch` from test results, if mismatches exist. + pub fn from_test_result<'a>( + append_result: Result< + ( + StacksEpochReceipt, + PreCommitClarityBlock<'a>, + Option, + Vec, + ), + ChainstateError, + >, + expected_result: ExpectedResult, + ) -> Option { + let mut mismatches = ConsensusMismatch::default(); + match (append_result, expected_result) { + (Ok((epoch_receipt, _, _, _)), ExpectedResult::Success(expected)) => { + // Convert transaction receipts to `ExpectedTransactionOutput` for comparison. + let actual_transactions: Vec<_> = epoch_receipt + .tx_receipts + .iter() + .map(|r| { + ( + r.tx_index, + ExpectedTransactionOutput { + return_type: r.result.clone(), + cost: r.execution_cost.clone(), + }, + ) + }) + .collect(); + + // Compare each transaction's actual vs expected outputs. + for ((tx_index, actual_tx), expected_tx) in + actual_transactions.iter().zip(expected.transactions.iter()) + { + let mut tx_mismatch = TransactionMismatch::new(*tx_index); + let mut has_mismatch = false; + + if actual_tx.return_type != expected_tx.return_type { + tx_mismatch = tx_mismatch.with_return_type_mismatch( + actual_tx.return_type.clone(), + expected_tx.return_type.clone(), + ); + has_mismatch = true; + } + + if actual_tx.cost != expected_tx.cost { + tx_mismatch = tx_mismatch + .with_cost_mismatch(actual_tx.cost.clone(), expected_tx.cost.clone()); + has_mismatch = true; + } + + if has_mismatch { + mismatches.add_transaction_mismatch(tx_mismatch); + } + } + + // Compare total block execution cost. + if epoch_receipt.anchored_block_cost != expected.total_block_cost { + mismatches.add_total_block_cost_mismatch( + &epoch_receipt.anchored_block_cost, + &expected.total_block_cost, + ); + } + // TODO: add any additional mismatches we might care about? + } + (Ok(_), ExpectedResult::Failure(expected_err)) => { + mismatches.error = Some(("Ok".to_string(), expected_err)); + } + (Err(actual_err), ExpectedResult::Failure(expected_err)) => { + if !actual_err.to_string().contains(&expected_err) { + mismatches.error = Some((actual_err.to_string(), expected_err)); + } + } + (Err(actual_err), ExpectedResult::Success(_)) => { + mismatches.error = Some((actual_err.to_string(), "Success".into())); + } + } + + if mismatches.is_empty() { + None + } else { + Some(mismatches) + } + } + + /// Adds a transaction mismatch to the collection. + fn add_transaction_mismatch(&mut self, mismatch: TransactionMismatch) { + self.transactions.push(mismatch); + } + + /// Records a total block cost mismatch. + fn add_total_block_cost_mismatch(&mut self, actual: &ExecutionCost, expected: &ExecutionCost) { + self.total_block_cost = Some((actual.clone(), expected.clone())); + } + + /// Returns true if no mismatches are recorded. + pub fn is_empty(&self) -> bool { + self.transactions.is_empty() && self.total_block_cost.is_none() && self.error.is_none() + } +} + +/// Represents a consensus test with chainstate and test vector. pub struct ConsensusTest<'a> { pub chain: TestChainstate<'a>, pub test_vector: ConsensusTestVector, } impl ConsensusTest<'_> { + /// Creates a new `ConsensusTest` with the given test name and vector. pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", @@ -82,13 +284,13 @@ impl ConsensusTest<'_> { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => { - unimplemented!("Not bothering with pre nakamoto tests."); + unimplemented!("Pre-Nakamoto epochs are not supported."); } }; Self { chain, test_vector } } - /// Run a single test vector, validating consensus. + /// Runs the consensus test, validating the results against the expected outcome. pub fn run(mut self) { debug!("--------- Running test vector ---------"); let txs: Vec<_> = self @@ -105,11 +307,9 @@ impl ConsensusTest<'_> { }) .collect(); - let expected_state_index_root = - TrieHash::from_hex(&self.test_vector.expected_state_index_root).unwrap(); + let marf_hash = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); - let (block, block_size) = self.construct_nakamoto_block(txs, expected_state_index_root); - let test_vector = self.test_vector.clone(); + let (block, block_size) = self.construct_nakamoto_block(txs, marf_hash); let mut stacks_node = self.chain.stacks_node.take().unwrap(); let sortdb = self.chain.sortdb.take().unwrap(); @@ -143,80 +343,18 @@ impl ConsensusTest<'_> { false, ); - let mut mismatches = Vec::new(); - - match (&result, &test_vector.expected_result) { - (Ok((epoch_receipt, _, _, tx_events)), ExpectedResult::Success(expected_outputs)) => { - debug!("--------- Appended Block ---------"; - "epoch_receipt" => ?epoch_receipt, - "tx_events" => ?tx_events - ); - - let actual_results = ExpectedOutputs { - transaction_return_types: epoch_receipt - .tx_receipts - .iter() - .map(|r| serde_json::to_string(&r.result).unwrap()) - .collect(), - transaction_costs: epoch_receipt - .tx_receipts - .iter() - .map(|r| r.execution_cost.clone()) - .collect(), - total_block_cost: epoch_receipt.anchored_block_cost.clone(), - marf_hash: epoch_receipt.header.index_root.to_hex(), - }; - - if actual_results != *expected_outputs { - if actual_results.transaction_return_types - != expected_outputs.transaction_return_types - { - mismatches.push(format!( - "Tx return types mismatch: actual {:?}, expected {:?}", - actual_results.transaction_return_types, - expected_outputs.transaction_return_types - )); - } - if actual_results.transaction_costs != expected_outputs.transaction_costs { - mismatches.push(format!( - "Tx costs mismatch: actual {:?}, expected {:?}", - actual_results.transaction_costs, expected_outputs.transaction_costs - )); - } - if actual_results.total_block_cost != expected_outputs.total_block_cost { - mismatches.push(format!( - "Total block cost mismatch: actual {:?}, expected {:?}", - actual_results.total_block_cost, expected_outputs.total_block_cost - )); - } - if actual_results.marf_hash != expected_outputs.marf_hash { - mismatches.push(format!( - "MARF hash mismatch: actual {}, expected {}", - actual_results.marf_hash, expected_outputs.marf_hash - )); - } - } - } - (Ok(_), ExpectedResult::Failure(_)) => { - mismatches.push("Expected failure but got success".to_string()); - } - (Err(e), ExpectedResult::Failure(expected_err)) => { - debug!("--------- Block Errored: {e} ---------"); - let actual_err = e.to_string(); - if !actual_err.contains(expected_err) { - mismatches.push(format!( - "Error mismatch: actual '{actual_err}', expected contains '{expected_err}'" - )); - } - } - (Err(_), ExpectedResult::Success(_)) => { - mismatches.push("Expected success but got failure".to_string()); - } - } - assert!(mismatches.is_empty(), "Mismatches: {mismatches:?}"); + debug!("--------- Appended block: {} ---------", result.is_ok()); + // Compare actual vs expected results. + let mismatches = + ConsensusMismatch::from_test_result(result, self.test_vector.expected_result); + let mismatch_str = mismatches + .as_ref() + .map(|m| serde_json::to_string_pretty(m).unwrap()) + .unwrap_or("".into()); + assert!(mismatches.is_none(), "Mismatches found: {mismatch_str}"); } - /// Construct a NakamotoBlock from the test vector. + /// Constructs a Nakamoto block with the given transactions and state index root. fn construct_nakamoto_block( &self, txs: Vec, @@ -228,6 +366,9 @@ impl ConsensusTest<'_> { ) .unwrap() .unwrap(); + + let cycle = self.chain.get_reward_cycle(); + let mut block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, @@ -251,73 +392,38 @@ impl ConsensusTest<'_> { .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); - MerkleTree::::new(&txid_vecs).root() }; + block.header.tx_merkle_root = tx_merkle_root; self.chain.miner.sign_nakamoto_block(&mut block); let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); - signers.sign_nakamoto_block(&mut block, self.chain.get_reward_cycle()); + signers.sign_nakamoto_block(&mut block, cycle); let block_len = block.serialize_to_vec().len(); - (block, block_len) } } -/// Test vector struct for `append_block` consensus testing. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ConsensusTestVector { - /// A hex stacks address and amount pair for populating initial balances - pub initial_balances: HashMap, - /// Desired epoch of chainstate - pub epoch_id: u32, - /// Transaction payloads to stuff into the block - pub payloads: Vec, - /// Expected state root trie hash - pub expected_state_index_root: String, - /// Expected result: success with outputs or failure with error - pub expected_result: ExpectedResult, -} - -/// Enum representing expected result: success with outputs or failure with error -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub enum ExpectedResult { - Success(ExpectedOutputs), - // TODO: should match maybe on actual Error type? - Failure(String), -} - -/// Expected outputs for a successful block append -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ExpectedOutputs { - pub transaction_return_types: Vec, - pub transaction_costs: Vec, - pub total_block_cost: ExecutionCost, - pub marf_hash: String, -} - +/// Creates a default test vector with empty transactions and zero cost. fn default_test_vector() -> ConsensusTestVector { let outputs = ExpectedOutputs { - transaction_return_types: vec![], - transaction_costs: vec![], + transactions: vec![], total_block_cost: ExecutionCost::ZERO, - marf_hash: "f86c9ceaf2a17a4d9e502af73b6f00f89c18e5b58be501b3840f707f7b372dea".into(), }; ConsensusTestVector { initial_balances: HashMap::new(), - expected_state_index_root: - "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), + marf_hash: "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), epoch_id: StacksEpochId::Epoch30 as u32, payloads: vec![], expected_result: ExpectedResult::Success(outputs), } } +/// Creates a test vector expecting a failure due to a state root mismatch. fn failing_test_vector() -> ConsensusTestVector { ConsensusTestVector { initial_balances: HashMap::new(), - expected_state_index_root: - "0000000000000000000000000000000000000000000000000000000000000000".into(), + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, payloads: vec![], expected_result: ExpectedResult::Failure("state root mismatch".to_string()), From 39a1b4ac169af40e4a4d42488a3de7c7bf96af78 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Sep 2025 13:11:47 -0700 Subject: [PATCH 52/86] CRC: directly compare Error as string, comment cleanup, and rename some struct Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 19 +++++++++++-------- stackslib/src/net/tests/mod.rs | 4 ++-- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 8d115cf8c9..6aa6f8412c 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -46,9 +46,9 @@ pub struct ExpectedTransactionOutput { pub cost: ExecutionCost, } -/// Represents the expected outputs for a block's transactions. +/// Represents the expected outputs for a block's execution. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ExpectedOutputs { +pub struct ExpectedBlockOutput { /// The expected outputs for each transaction, in input order. pub transactions: Vec, /// The total execution cost of the block. @@ -59,8 +59,10 @@ pub struct ExpectedOutputs { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ExpectedResult { /// The test should succeed with the specified outputs. - Success(ExpectedOutputs), - /// The test should fail with an error containing the specified string. + Success(ExpectedBlockOutput), + /// The test should fail with an error matching the specified string + /// Cannot match on the exact Error directly as they do not implement + /// Serialize/Deserialize or PartialEq Failure(String), } @@ -200,8 +202,9 @@ impl ConsensusMismatch { mismatches.error = Some(("Ok".to_string(), expected_err)); } (Err(actual_err), ExpectedResult::Failure(expected_err)) => { - if !actual_err.to_string().contains(&expected_err) { - mismatches.error = Some((actual_err.to_string(), expected_err)); + let actual_err_str = actual_err.to_string(); + if actual_err_str != expected_err { + mismatches.error = Some((actual_err_str, expected_err)); } } (Err(actual_err), ExpectedResult::Success(_)) => { @@ -406,7 +409,7 @@ impl ConsensusTest<'_> { /// Creates a default test vector with empty transactions and zero cost. fn default_test_vector() -> ConsensusTestVector { - let outputs = ExpectedOutputs { + let outputs = ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, }; @@ -426,7 +429,7 @@ fn failing_test_vector() -> ConsensusTestVector { marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, payloads: vec![], - expected_result: ExpectedResult::Failure("state root mismatch".to_string()), + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into()).to_string()), } } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 2857fba8f7..9bbb455f35 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -349,7 +349,7 @@ impl NakamotoBootPlan { } } - /// Make a chsintate and transition it into the Nakamoto epoch. + /// Make a chainstate and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. pub fn boot_nakamoto_chainstate( mut self, @@ -416,7 +416,7 @@ impl NakamotoBootPlan { chain } - /// Bring a TestPeer into the Nakamoto Epoch + /// Bring a TestChainstate into the Nakamoto Epoch fn advance_to_nakamoto_chainstate(&mut self, chain: &mut TestChainstate) { let mut chain_nonce = 0; let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); From 8f1f31a5219f1c1e24eaacd1f5111920f492a267 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Sat, 27 Sep 2025 17:57:01 -0700 Subject: [PATCH 53/86] Renaming block-replay script to block-validation - renamed stacks-inspect functions - replay_block -> validate_block - replay_block_nakamoto -> validate_block_nakamoto --- contrib/stacks-inspect/src/lib.rs | 4 +- contrib/stacks-inspect/src/main.rs | 10 +- .../{block-replay.sh => block-validation.sh} | 129 +++++++++--------- 3 files changed, 69 insertions(+), 74 deletions(-) rename contrib/tools/{block-replay.sh => block-validation.sh} (80%) diff --git a/contrib/stacks-inspect/src/lib.rs b/contrib/stacks-inspect/src/lib.rs index 444a016dbd..4557efbba1 100644 --- a/contrib/stacks-inspect/src/lib.rs +++ b/contrib/stacks-inspect/src/lib.rs @@ -120,7 +120,7 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { +pub fn command_validate_block(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -202,7 +202,7 @@ pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { +pub fn command_validate_block_nakamoto(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index 5743a8fc58..fd8aa98b60 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -21,7 +21,7 @@ use clarity::types::StacksEpochId; use clarity::types::chainstate::StacksPrivateKey; use clarity_cli::DEFAULT_CLI_EPOCH; use stacks_inspect::{ - command_contract_hash, command_replay_block, command_replay_block_nakamoto, + command_contract_hash, command_validate_block, command_validate_block_nakamoto, command_replay_mock_mining, command_try_mine, drain_common_opts, }; use stackslib::chainstate::stacks::miner::BlockBuilderSettings; @@ -1586,13 +1586,13 @@ check if the associated microblocks can be downloaded return; } - if argv[1] == "replay-block" { - command_replay_block(&argv[1..], common_opts.config.as_ref()); + if argv[1] == "validate-block" { + command_validate_block(&argv[1..], common_opts.config.as_ref()); process::exit(0); } - if argv[1] == "replay-naka-block" { - command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); + if argv[1] == "validate-naka-block" { + command_validate_block_nakamoto(&argv[1..], common_opts.config.as_ref()); process::exit(0); } diff --git a/contrib/tools/block-replay.sh b/contrib/tools/block-validation.sh similarity index 80% rename from contrib/tools/block-replay.sh rename to contrib/tools/block-validation.sh index 15f2d46843..ccf410c4c7 100755 --- a/contrib/tools/block-replay.sh +++ b/contrib/tools/block-validation.sh @@ -2,33 +2,28 @@ set -o pipefail -## Using 10 cpu cores, a full replay will take between 12-14 hours (assuming there are no other cpu/io bound processes running at the same time) +## Using 10 cpu cores, a full validation will take between 12-14 hours (assuming there are no other cpu/io bound processes running at the same time) ## ## ** Recommend to run this script in screen or tmux ** ## -## We'll need ~73GB per slice, plus an extra ~400GB for the chainstate archive and marf DB -## as of 02/2025: -## for 10 slices, this is about 1.1TB -## - 149GB for compressed chainstate -## - 232GB decompressed marf db -## - 73GB per slice dir (1 dir per cpu) -## for 15 slices, this is about 1.46TB -## for 20 slices, this is about 1.8TB +## We'll need ~217GB per slice, plus an extra ~4500GB for the chainstate archive and marf DB +## as of 09/2025: +## for 10 slices, this is about 2.5TB -NETWORK="mainnet" ## network to replay -REPO_DIR="$HOME/stacks-core" ## where to build the source -REMOTE_REPO="stacks-network/stacks-core" ## remote git repo to build stacks-inspect from -SCRATCH_DIR="$HOME/scratch" ## root folder for the replay slices -TIMESTAMP=$(date +%Y-%m-%d-%s) ## use a simple date format year-month-day-epoch -LOG_DIR="$HOME/replay_${TIMESTAMP}" ## location of logfiles for the replay -SLICE_DIR="${SCRATCH_DIR}/slice" ## location of slice dirs -TMUX_SESSION="replay" ## tmux session name to run the replay -TERM_OUT=false ## terminal friendly output -TESTING=false ## only run a replay on a few thousand blocks -BRANCH="develop" ## default branch to build stacks-inspect from -CORES=$(grep -c processor /proc/cpuinfo) ## retrieve total number of CORES on the system -RESERVED=8 ## reserve this many CORES for other processes as default -LOCAL_CHAINSTATE= ## path to local chainstate to use instead of snapshot download +NETWORK="mainnet" ## network to validate +REPO_DIR="$HOME/stacks-core" ## where to build the source +REMOTE_REPO="stacks-network/stacks-core" ## remote git repo to build stacks-inspect from +SCRATCH_DIR="$HOME/scratch" ## root folder for the validation slices +TIMESTAMP=$(date +%Y-%m-%d-%s) ## use a simple date format year-month-day-epoch +LOG_DIR="$HOME/block-validation_${TIMESTAMP}" ## location of logfiles for the validation +SLICE_DIR="${SCRATCH_DIR}/slice" ## location of slice dirs +TMUX_SESSION="validation" ## tmux session name to run the validation +TERM_OUT=false ## terminal friendly output +TESTING=false ## only run a validation on a few thousand blocks +BRANCH="develop" ## default branch to build stacks-inspect from +CORES=$(grep -c processor /proc/cpuinfo) ## retrieve total number of CORES on the system +RESERVED=8 ## reserve this many CORES for other processes as default +LOCAL_CHAINSTATE= ## path to local chainstate to use instead of snapshot download ## ansi color codes for terminal output COLRED=$'\033[31m' ## Red @@ -66,15 +61,15 @@ build_stacks_inspect() { } else echo "Cloning stacks-core ${BRANCH}" - (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { + (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { echo "${COLRED}Error${COLRESET} cloning https://github.com/${REMOTE_REPO} into ${REPO_DIR}" exit 1 } fi git pull - ## build stacks-inspect to: $HOME/stacks-inspect/target/release/stacks-inspect + ## build stacks-inspect to: ${REPO_DIR}/target/release/stacks-inspect echo "Building stacks-inspect binary" - cargo build --bin=stacks-inspect --release || { + cd contrib/stacks-inspect && cargo build --bin=stacks-inspect --release || { echo "${COLRED}Error${COLRESET} building stacks-inspect binary" exit 1 } @@ -82,7 +77,7 @@ build_stacks_inspect() { } ## create the slice dirs from an chainstate archive (symlinking marf.sqlite.blobs), 1 dir per CPU -configure_replay_slices() { +configure_validation_slices() { if [ -d "$HOME/scratch" ]; then echo "Deleting existing scratch dir: ${COLYELLOW}$HOME/scratch${COLRESET}" rm -rf "${HOME}/scratch" || { @@ -134,9 +129,9 @@ configure_replay_slices() { } ## setup the tmux sessions and create the logdir for storing output -setup_replay() { +setup_validation() { ## if there is an existing folder, rm it - if [ -d "${LOG_DIR}" ];then + if [ -d "${LOG_DIR}" ];then echo "Removing logdir ${LOG_DIR}" rm -rf "${LOG_DIR}" fi @@ -145,7 +140,7 @@ setup_replay() { echo "Creating logdir ${LOG_DIR}" mkdir -p "${LOG_DIR}" fi - ## if tmux session "replay" exists, kill it and start anew + ## if tmux session "${TMUX_SESSION}" exists, kill it and start anew if eval "tmux list-windows -t ${TMUX_SESSION} &> /dev/null"; then echo "Killing existing tmux session: ${TMUX_SESSION}" eval "tmux kill-session -t ${TMUX_SESSION} &> /dev/null" @@ -165,9 +160,9 @@ setup_replay() { return 0 } -## run the block replay -start_replay() { - local mode=$1 +## run the block validation +start_validation() { + local mode=$1 local total_blocks=0 local starting_block=0 local inspect_command @@ -177,11 +172,11 @@ start_replay() { ## nakamoto blocks echo "Mode: ${COLYELLOW}${mode}${COLRESET}" local log_append="_${mode}" - inspect_command="replay-naka-block" + inspect_command="validate-naka-block" ## get the total number of nakamoto blocks in db total_blocks=$(echo "select count(*) from nakamoto_block_headers" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) + ## use these values if `--testing` arg is provided (only validate 1_000 blocks) ${TESTING} && total_blocks=301883 ${TESTING} && starting_block=300883 ;; @@ -189,18 +184,18 @@ start_replay() { ## pre-nakamoto blocks echo "Mode: ${COLYELLOW}pre-nakamoto${COLRESET}" local log_append="" - inspect_command="replay-block" + inspect_command="validate-block" ## get the total number of blocks (with orphans) in db total_blocks=$(echo "select count(*) from staging_blocks where orphaned = 0" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) Note: 2.5 epoch is at 153106 + ## use these values if `--testing` arg is provided (only validate 1_000 blocks) Note: 2.5 epoch is at 153106 ${TESTING} && total_blocks=153000 ${TESTING} && starting_block=152000 ;; esac - local block_diff=$((total_blocks - starting_block)) ## how many blocks are being replayed - local slices=$((CORES - RESERVED)) ## how many replay slices to use - local slice_blocks=$((block_diff / slices)) ## how many blocks to replay per slice + local block_diff=$((total_blocks - starting_block)) ## how many blocks are being validated + local slices=$((CORES - RESERVED)) ## how many validation slices to use + local slice_blocks=$((block_diff / slices)) ## how many blocks to validate per slice ${TESTING} && echo "${COLRED}Testing: ${TESTING}${COLRESET}" echo "Total blocks: ${COLYELLOW}${total_blocks}${COLRESET}" echo "Staring Block: ${COLYELLOW}$starting_block${COLRESET}" @@ -215,9 +210,9 @@ start_replay() { if [[ "${end_block_count}" -gt "${total_blocks}" ]] || [[ "${slice_counter}" -eq $((slices - 1)) ]]; then end_block_count="${total_blocks}" fi - if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're replaying nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes + if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're validating nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes if [ "${slice_counter}" -gt 0 ];then - tmux new-window -t replay -d -n "slice${slice_counter}" || { + tmux new-window -t "${TMUX_SESSION}" -d -n "slice${slice_counter}" || { echo "${COLRED}Error${COLRESET} creating tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" exit 1 } @@ -226,12 +221,12 @@ start_replay() { local log_file="${LOG_DIR}/slice${slice_counter}${log_append}.log" local log=" | tee -a ${log_file}" local cmd="${REPO_DIR}/target/release/stacks-inspect --config ${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml ${inspect_command} ${SLICE_DIR}${slice_counter} index-range $start_block_count $end_block_count 2>/dev/null" - echo " Creating tmux window: ${COLGREEN}replay:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" + echo " Creating tmux window: ${COLGREEN}${TMUX_SESSION}:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" echo "Command: ${cmd}" > "${log_file}" ## log the command being run for the slice - echo "Replaying indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" - ## send `cmd` to the tmux window where the replay will run + echo "Validating indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" + ## send `cmd` to the tmux window where the validation will run tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "${cmd}${log}" Enter || { - echo "${COLRED}Error${COLRESET} sending replay command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + echo "${COLRED}Error${COLRESET} sending stacks-inspect command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" exit 1 } ## log the return code as the last line @@ -258,12 +253,12 @@ check_progress() { sleep 1 done echo "************************************************************************" - echo "Checking Block Replay status" + echo "Checking Block Validation status" echo -e ' ' while true; do count=$(pgrep -c "stacks-inspect") if [ "${count}" -gt 0 ]; then - ${TERM_OUT} && printf "Block replay processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" + ${TERM_OUT} && printf "Block validation processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" else ${TERM_OUT} && printf "\r\n" break @@ -302,10 +297,10 @@ store_results() { return_code=$(tail -1 "${file}") case ${return_code} in 0) - # block replay ran successfully + # block validation ran successfully ;; 1) - # block replay had some block failures + # block validation had some block failures failed=1 ;; *) @@ -355,10 +350,10 @@ store_results() {
_EOF_ - ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the replay was not successful + ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the validation was not successful if [ ${failed} == "1" ];then output=$(grep -r -h "Failed processing block" slice*.log) - IFS=$'\n' + IFS=$'\n' for line in ${output}; do echo "
${line}
" >> "${results_html}" || { echo "${COLRED}Error${COLRESET} writing failure to: ${results_html}" @@ -382,12 +377,12 @@ usage() { echo " ${COLBOLD}${0}${COLRESET}" echo " ${COLYELLOW}--testing${COLRESET}: only check a small number of blocks" echo " ${COLYELLOW}-t|--terminal${COLRESET}: more terminal friendly output" - echo " ${COLYELLOW}-n|--network${COLRESET}: run block replay against specific network (default: mainnet)" + echo " ${COLYELLOW}-n|--network${COLRESET}: run block validation against specific network (default: mainnet)" echo " ${COLYELLOW}-b|--branch${COLRESET}: branch of stacks-core to build stacks-inspect from (default: develop)" echo " ${COLYELLOW}-c|--chainstate${COLRESET}: local chainstate copy to use instead of downloading a chainstaet snapshot" echo " ${COLYELLOW}-l|--logdir${COLRESET}: use existing log directory" echo " ${COLYELLOW}-r|--reserved${COLRESET}: how many cpu cores to reserve for system tasks" - echo + echo echo " ex: ${COLCYAN}${0} -t -u ${COLRESET}" echo exit 0 @@ -447,11 +442,11 @@ done while [ ${#} -gt 0 ]; do case ${1} in --testing) - # only replay 1_000 blocks + # only validate 1_000 blocks TESTING=true ;; -t|--terminal) - # update terminal with progress (it's just printf to show in real-time that the replays are running) + # update terminal with progress (it's just printf to show in real-time that the validations are running) TERM_OUT=true ;; -n|--network) @@ -490,16 +485,16 @@ while [ ${#} -gt 0 ]; do LOG_DIR="${2}" shift ;; - -r|--RESERVED) + -r|--RESERVED) # reserve this many cpus for the system (default is 10) - if [ "${2}" == "" ]; then + if [ "${2}" == "" ]; then echo "Missing required value for ${1}" fi if ! [[ "$2" =~ ^[0-9]+$ ]]; then echo "ERROR: arg ($2) is not a number." >&2 exit 1 fi - RESERVED=${2} + RESERVED=${2} shift ;; -h|--help|--usage) @@ -513,11 +508,11 @@ done ## clear display before starting tput reset -echo "Replay Started: ${COLYELLOW}$(date)${COLRESET}" -build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) -configure_replay_slices ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) -setup_replay ## configure logdir and tmux sessions -start_replay ## replay pre-nakamoto blocks (2.x) -start_replay nakamoto ## replay nakamoto blocks -store_results ## store aggregated results of replay -echo "Replay finished: $(date)" +echo "Validation Started: ${COLYELLOW}$(date)${COLRESET}" +build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: validation was performed already, and a second run is desired) +configure_validation_slices ## comment if using an existing chainstate/slice dir (ex: validation was performed already, and a second run is desired) +setup_validation ## configure logdir and tmux sessions +start_validation ## validate pre-nakamoto blocks (2.x) +start_validation nakamoto ## validate nakamoto blocks +store_results ## store aggregated results of validation +echo "Validation finished: $(date)" From 7683a291da927ac5bf804cee003e071952676b7d Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Sat, 27 Sep 2025 18:00:56 -0700 Subject: [PATCH 54/86] cargo fmt --- contrib/stacks-inspect/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index fd8aa98b60..e225149bb9 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -21,8 +21,8 @@ use clarity::types::StacksEpochId; use clarity::types::chainstate::StacksPrivateKey; use clarity_cli::DEFAULT_CLI_EPOCH; use stacks_inspect::{ - command_contract_hash, command_validate_block, command_validate_block_nakamoto, - command_replay_mock_mining, command_try_mine, drain_common_opts, + command_contract_hash, command_replay_mock_mining, command_try_mine, command_validate_block, + command_validate_block_nakamoto, drain_common_opts, }; use stackslib::chainstate::stacks::miner::BlockBuilderSettings; use stackslib::chainstate::stacks::{ From a8e30cc484455da55930da4bf1632308ffdb6189 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 29 Sep 2025 08:08:19 -0700 Subject: [PATCH 55/86] Update link to block-validation script in release process doc (script renamed) --- docs/release-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/release-process.md b/docs/release-process.md index 00928e48a0..0e87ce6013 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -58,7 +58,7 @@ The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/to - A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. -3. Perform a [block-replay](../contrib/tools/block-replay.sh) using an existing chainstate, or sync from genesis +3. Perform a [block-validation](../contrib/tools/block-validation.sh) using an existing chainstate, or sync from genesis 4. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick some commits into the release branch or open a PR against the release branch. From bcf459155aa13ed986b6151a9e212ff57dbf2e66 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 29 Sep 2025 08:30:27 -0700 Subject: [PATCH 56/86] Fix spelling error in stdout --- contrib/tools/block-validation.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/tools/block-validation.sh b/contrib/tools/block-validation.sh index ccf410c4c7..397814d0d7 100755 --- a/contrib/tools/block-validation.sh +++ b/contrib/tools/block-validation.sh @@ -198,7 +198,7 @@ start_validation() { local slice_blocks=$((block_diff / slices)) ## how many blocks to validate per slice ${TESTING} && echo "${COLRED}Testing: ${TESTING}${COLRESET}" echo "Total blocks: ${COLYELLOW}${total_blocks}${COLRESET}" - echo "Staring Block: ${COLYELLOW}$starting_block${COLRESET}" + echo "Starting Block: ${COLYELLOW}$starting_block${COLRESET}" echo "Block diff: ${COLYELLOW}$block_diff${COLRESET}" echo "******************************************************" echo "Total slices: ${COLYELLOW}${slices}${COLRESET}" From 28701faadefa58c16f527ec511dd352dd53a1129 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Sep 2025 11:51:38 -0700 Subject: [PATCH 57/86] CRC: add a helper function to pretty print ConsensusMismatch as a JSON string Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 6aa6f8412c..1b5a60ef10 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -233,6 +233,15 @@ impl ConsensusMismatch { pub fn is_empty(&self) -> bool { self.transactions.is_empty() && self.total_block_cost.is_none() && self.error.is_none() } + + /// Serializes the given `ConsensusMismatch` as pretty-printed JSON, + /// or returns an empty string if `None`. + pub fn to_json_string_pretty(mismatch: &Option) -> String { + mismatch + .as_ref() + .map(|m| serde_json::to_string_pretty(m).unwrap()) + .unwrap_or("".into()) + } } /// Represents a consensus test with chainstate and test vector. @@ -350,11 +359,11 @@ impl ConsensusTest<'_> { // Compare actual vs expected results. let mismatches = ConsensusMismatch::from_test_result(result, self.test_vector.expected_result); - let mismatch_str = mismatches - .as_ref() - .map(|m| serde_json::to_string_pretty(m).unwrap()) - .unwrap_or("".into()); - assert!(mismatches.is_none(), "Mismatches found: {mismatch_str}"); + assert!( + mismatches.is_none(), + "Mismatches found: {}", + ConsensusMismatch::to_json_string_pretty(&mismatches) + ); } /// Constructs a Nakamoto block with the given transactions and state index root. From c245ce486838e0c79bb65f92fba1fbf9068e12b3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Sep 2025 13:11:38 -0700 Subject: [PATCH 58/86] Add a stx transfer test Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 158 ++++++++++++-------- 1 file changed, 97 insertions(+), 61 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 1b5a60ef10..fde17f8a66 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -12,15 +12,16 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; - +use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; -use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, TrieHash}; -use clarity::types::{Address, StacksEpochId}; +use clarity::consts::CHAIN_ID_TESTNET; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash}; +use clarity::types::StacksEpochId; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::StacksTransactionEvent; +use clarity::vm::types::{PrincipalData, ResponseData}; use clarity::vm::Value as ClarityValue; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; @@ -29,14 +30,16 @@ use crate::burnchains::PoxConstants; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; use crate::chainstate::stacks::db::StacksEpochReceipt; -use crate::chainstate::stacks::{ - Error as ChainstateError, StacksTransaction, TenureChangeCause, TransactionAuth, - TransactionPayload, TransactionVersion, -}; +use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; use crate::chainstate::tests::TestChainstate; use crate::clarity_vm::clarity::PreCommitClarityBlock; +use crate::core::test_util::make_stacks_transfer_tx; use crate::net::tests::NakamotoBootPlan; +pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; + /// Represents the expected output of a transaction in a test. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ExpectedTransactionOutput { @@ -69,14 +72,14 @@ pub enum ExpectedResult { /// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ConsensusTestVector { - /// Initial balances for Stacks addresses during chainstate instantiation. - pub initial_balances: HashMap, + /// Initial balances for the provided PrincipalData during chainstate instantiation. + pub initial_balances: Vec<(PrincipalData, u64)>, /// Hex representation of the MARF hash for block construction. pub marf_hash: String, /// The epoch ID for the test environment. pub epoch_id: u32, - /// Transaction payloads to include in the block, as serialized strings. - pub payloads: Vec, + /// Transactions to include in the block + pub transactions: Vec, /// The expected result after appending the constructed block. pub expected_result: ExpectedResult, } @@ -253,16 +256,18 @@ pub struct ConsensusTest<'a> { impl ConsensusTest<'_> { /// Creates a new `ConsensusTest` with the given test name and vector. pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { + if let ExpectedResult::Success(output) = &test_vector.expected_result { + assert_eq!( + output.transactions.len(), + test_vector.transactions.len(), + "Test vector is invalid. Must specify an expected output per input transaction" + ); + } let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", ) .unwrap(); - let initial_balances = test_vector - .initial_balances - .iter() - .map(|(addr, amount)| (StacksAddress::from_string(addr).unwrap().into(), *amount)) - .collect(); let epoch_id = StacksEpochId::try_from(test_vector.epoch_id).unwrap(); let chain = match epoch_id { StacksEpochId::Epoch30 @@ -271,7 +276,7 @@ impl ConsensusTest<'_> { | StacksEpochId::Epoch33 => { let mut chain = NakamotoBootPlan::new(test_name) .with_pox_constants(10, 3) - .with_initial_balances(initial_balances) + .with_initial_balances(test_vector.initial_balances.clone()) .with_private_key(privk) .boot_nakamoto_chainstate(None); let (burn_ops, mut tenure_change, miner_key) = @@ -305,24 +310,7 @@ impl ConsensusTest<'_> { /// Runs the consensus test, validating the results against the expected outcome. pub fn run(mut self) { debug!("--------- Running test vector ---------"); - let txs: Vec<_> = self - .test_vector - .payloads - .iter() - .map(|payload_str| { - let payload: TransactionPayload = serde_json::from_str(payload_str).unwrap(); - StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&StacksPrivateKey::random()).unwrap(), - payload, - ) - }) - .collect(); - - let marf_hash = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); - - let (block, block_size) = self.construct_nakamoto_block(txs, marf_hash); - + let (block, block_size) = self.construct_nakamoto_block(); let mut stacks_node = self.chain.stacks_node.take().unwrap(); let sortdb = self.chain.sortdb.take().unwrap(); let chain_tip = @@ -367,11 +355,8 @@ impl ConsensusTest<'_> { } /// Constructs a Nakamoto block with the given transactions and state index root. - fn construct_nakamoto_block( - &self, - txs: Vec, - state_index_root: TrieHash, - ) -> (NakamotoBlock, usize) { + fn construct_nakamoto_block(&self) -> (NakamotoBlock, usize) { + let state_index_root = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); let chain_tip = NakamotoChainState::get_canonical_block_header( self.chain.stacks_node.as_ref().unwrap().chainstate.db(), self.chain.sortdb.as_ref().unwrap(), @@ -395,7 +380,7 @@ impl ConsensusTest<'_> { signer_signature: vec![], pox_treatment: BitVec::ones(1).unwrap(), }, - txs, + txs: self.test_vector.transactions.clone(), }; let tx_merkle_root = { @@ -416,38 +401,89 @@ impl ConsensusTest<'_> { } } -/// Creates a default test vector with empty transactions and zero cost. -fn default_test_vector() -> ConsensusTestVector { +#[test] +fn test_append_empty_block() { let outputs = ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, }; - ConsensusTestVector { - initial_balances: HashMap::new(), + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), marf_hash: "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), epoch_id: StacksEpochId::Epoch30 as u32, - payloads: vec![], + transactions: vec![], expected_result: ExpectedResult::Success(outputs), - } + }; + ConsensusTest::new(function_name!(), test_vector).run() } -/// Creates a test vector expecting a failure due to a state root mismatch. -fn failing_test_vector() -> ConsensusTestVector { - ConsensusTestVector { - initial_balances: HashMap::new(), +#[test] +fn test_append_state_index_root_mismatch() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, - payloads: vec![], + transactions: vec![], expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into()).to_string()), - } -} - -#[test] -fn test_append_empty_block() { - ConsensusTest::new(function_name!(), default_test_vector()).run() + }; + ConsensusTest::new(function_name!(), test_vector).run() } #[test] -fn test_append_state_index_root_mismatch() { - ConsensusTest::new(function_name!(), failing_test_vector()).run() +fn test_append_stx_transfers() { + let sender_privks = [ + StacksPrivateKey::from_hex(SK_1).unwrap(), + StacksPrivateKey::from_hex(SK_2).unwrap(), + StacksPrivateKey::from_hex(SK_3).unwrap(), + ]; + let send_amount = 1_000; + let tx_fee = 180; + let mut initial_balances = Vec::new(); + let transactions = sender_privks + .iter() + .map(|sender_privk| { + initial_balances.push(( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)).into(), + send_amount + tx_fee, + )); + make_stacks_transfer_tx( + sender_privk, + 0, + tx_fee, + CHAIN_ID_TESTNET, + &boot_code_addr(false).into(), + send_amount, + ) + }) + .collect(); + let transfer_result = ExpectedTransactionOutput { + return_type: ClarityValue::Response(ResponseData { + committed: true, + data: Box::new(ClarityValue::Bool(true)), + }), + cost: ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + }, + }; + let outputs = ExpectedBlockOutput { + transactions: vec![ + transfer_result.clone(), + transfer_result.clone(), + transfer_result, + ], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "3838b1ae67f108b10ec7a7afb6c2b18e6468be2423d7183ffa2f7824b619b8be".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() } From b6fc3ff4d7bcf0dd83b7607191d34bfb5f4a382d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Sep 2025 16:03:19 -0700 Subject: [PATCH 59/86] Add a ExpressionStackDepthTooDeep test Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 67 +++++++++++++++++++-- 1 file changed, 63 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index fde17f8a66..f0828e903e 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -19,10 +19,12 @@ use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKe use clarity::types::StacksEpochId; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; +use clarity::vm::ast::errors::{ParseError, ParseErrors}; +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::types::{PrincipalData, ResponseData}; -use clarity::vm::Value as ClarityValue; +use clarity::vm::{Value as ClarityValue, MAX_CALL_STACK_DEPTH}; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; @@ -32,10 +34,9 @@ use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; use crate::chainstate::tests::TestChainstate; -use crate::clarity_vm::clarity::PreCommitClarityBlock; -use crate::core::test_util::make_stacks_transfer_tx; +use crate::clarity_vm::clarity::{Error as ClarityError, PreCommitClarityBlock}; +use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; use crate::net::tests::NakamotoBootPlan; - pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; @@ -487,3 +488,61 @@ fn test_append_stx_transfers() { }; ConsensusTest::new(function_name!(), test_vector).run() } + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep() { + // something just over the limit of the expression depth + let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); + + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx_fee = (tx_exceeds_body.len() * 100) as u64; + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx_fee, + )]; + let tx_bytes = make_contract_publish( + &sender_privk, + 0, + tx_fee, + CHAIN_ID_TESTNET, + "test-exceeds", + &tx_exceeds_body, + ); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + let transfer_result = ExpectedTransactionOutput { + return_type: ClarityValue::Response(ResponseData { + committed: true, + data: Box::new(ClarityValue::Bool(true)), + }), + cost: ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + }, + }; + let outputs = ExpectedBlockOutput { + transactions: vec![transfer_result], + total_block_cost: ExecutionCost::ZERO, + }; + // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let msg = format!("Invalid Stacks block 518dfea674b5c4874e025a31e01a522c8269005c0685d12658f0359757de6692: {e:?}"); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(msg).to_string(), + ), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} From 1da0f4a104a6e3bd907467fbd550dfbbbeed0fe9 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 29 Sep 2025 19:40:17 -0700 Subject: [PATCH 60/86] feat: add MultipleMinerTest variant for new miner behavior --- stacks-node/src/tests/signer/v0.rs | 148 +++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/stacks-node/src/tests/signer/v0.rs b/stacks-node/src/tests/signer/v0.rs index 60658306c9..6e35e14822 100644 --- a/stacks-node/src/tests/signer/v0.rs +++ b/stacks-node/src/tests/signer/v0.rs @@ -18855,6 +18855,154 @@ fn signers_treat_signatures_as_precommits() { signer_test.shutdown(); } +#[test] +#[ignore] +/// Scenario: 2 miners, and one winning miner commits to a stale tip. +/// We're verifying that, in this scenario, the tenure is extended, +/// instead of a new one being created (and forking the tip). +/// +/// - Miner A wins tenure A +/// - Miner B wins tenure B, with 2 blocks +/// - Miner A wins tenure C, but with a block commit to tip A +/// - We verify that Miner B extends Tenure B +fn tenure_extend_after_stale_commit_different_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let num_txs = 5; + + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + num_txs, + |signer_config| { + signer_config.block_proposal_timeout = Duration::from_secs(60); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + ); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pk_1, _) = miners.get_miner_public_keys(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + + miners.pause_commits_miner_2(); + miners.boot_to_epoch_3(); + + miners.pause_commits_miner_1(); + + let sortdb = conf_1.get_burnchain().open_sortition_db(true).unwrap(); + + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .unwrap(); + + miners.submit_commit_miner_1(&sortdb); + + info!("------------------------- Miner 1 Wins Tenure A -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .unwrap(); + verify_sortition_winner(&sortdb, &miner_pkh_1); + miners.send_and_mine_transfer_tx(60).unwrap(); + let tip_a_height = miners.get_peer_stacks_tip_height(); + let prev_tip = get_chain_info(&conf_1); + + info!("------------------------- Miner 2 Wins Tenure B -------------------------"); + miners.submit_commit_miner_2(&sortdb); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .unwrap(); + verify_sortition_winner(&sortdb, &miner_pkh_2); + miners.send_and_mine_transfer_tx(60).unwrap(); + let tip_b_height = miners.get_peer_stacks_tip_height(); + + info!("------------------------- Miner 1 Wins Tenure C with stale commit -------------------------"); + + // We can't use `submit_commit_miner_1` here because we are using the stale view + { + TEST_MINER_COMMIT_TIP.set(Some((prev_tip.pox_consensus, prev_tip.stacks_tip))); + let rl1_commits_before = miners + .signer_test + .running_nodes + .counters + .naka_submitted_commits + .load(Ordering::SeqCst); + + miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .set(false); + + wait_for(30, || { + let commits_after = miners + .signer_test + .running_nodes + .counters + .naka_submitted_commits + .load(Ordering::SeqCst); + let last_commit_tip = miners + .signer_test + .running_nodes + .counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + + Ok(commits_after > rl1_commits_before && last_commit_tip == prev_tip.stacks_tip_height) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .set(true); + TEST_MINER_COMMIT_TIP.set(None); + } + + miners + .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) + .unwrap(); + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!( + "------------------------- Miner 1's proposal for C is rejected -------------------------" + ); + let proposed_block = wait_for_block_proposal(60, tip_a_height + 1, &miner_pk_1).unwrap(); + wait_for_block_global_rejection( + 60, + &proposed_block.header.signer_signature_hash(), + num_signers, + ) + .unwrap(); + + let stacks_height_after_rejection = miners.get_peer_stacks_tip_height(); + assert_eq!(stacks_height_after_rejection, tip_b_height); + + info!("------------------------- Miner 2 Extends Tenure B -------------------------"); + wait_for_tenure_change_tx(60, TenureChangeCause::Extended, tip_b_height + 1).unwrap(); + + let final_height = miners.get_peer_stacks_tip_height(); + assert_eq!(final_height, tip_b_height + 1); + + miners.shutdown(); +} + #[test] #[ignore] /// Scenario: same miner extends tenure when the block-commit for the next tenure still confirms N-1 From f5cad2138166a1bfd53f9ddad0b5eecf58383f81 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 29 Sep 2025 19:48:22 -0700 Subject: [PATCH 61/86] crc: rename variables, cleaner comment --- stacks-node/src/nakamoto_node/relayer.rs | 21 ++++++++----------- .../src/tests/nakamoto_integrations.rs | 14 ++++++------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/stacks-node/src/nakamoto_node/relayer.rs b/stacks-node/src/nakamoto_node/relayer.rs index b1ca555651..59691d39fa 100644 --- a/stacks-node/src/nakamoto_node/relayer.rs +++ b/stacks-node/src/nakamoto_node/relayer.rs @@ -653,22 +653,19 @@ impl RelayerThread { let canonical_stacks_tip = StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); - let commits_to_tip_tenure = match Self::sortition_commits_to_stacks_tip_tenure( + let commits_to_tip_tenure = Self::sortition_commits_to_stacks_tip_tenure( &mut self.chainstate, &canonical_stacks_tip, &canonical_stacks_snapshot, &sn, - ) { - Ok(b) => b, - Err(e) => { - warn!( - "Relayer: Failed to determine if winning sortition commits to current tenure: {e:?}"; - "sortition_ch" => %sn.consensus_hash, - "stacks_tip_ch" => %canonical_stacks_tip_ch - ); - false - } - }; + ).unwrap_or_else(|e| { + warn!( + "Relayer: Failed to determine if winning sortition commits to current tenure: {e:?}"; + "sortition_ch" => %sn.consensus_hash, + "stacks_tip_ch" => %canonical_stacks_tip_ch + ); + false + }); if !commits_to_tip_tenure { let won_ongoing_tenure_sortition = diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index 5e80e8a358..f85149fe5a 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -5495,11 +5495,11 @@ fn forked_tenure_is_ignored() { .get_stacks_blocks_processed(); // We don't expect a block in this tenure, because the miner should instead be building off // of a previous tenure - let block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none(); + let no_block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none(); Ok(commits_count > commits_before && blocks_count > blocks_before && blocks_processed > blocks_processed_before - && block_in_tenure) + && no_block_in_tenure) }) .unwrap_or_else(|_| { let commits_count = commits_submitted.load(Ordering::SeqCst); @@ -5508,20 +5508,20 @@ fn forked_tenure_is_ignored() { .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - let block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none(); - error!("Tenure C failed to produce a block"; + let no_block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none(); + error!("Tenure C shouldn't have produced a block"; "commits_count" => commits_count, "commits_before" => commits_before, "blocks_count" => blocks_count, "blocks_before" => blocks_before, "blocks_processed" => blocks_processed, "blocks_processed_before" => blocks_processed_before, - "block_in_tenure" => block_in_tenure, + "no_block_in_tenure" => no_block_in_tenure, ); - panic!("Tenure C failed to produce a block"); + panic!("Tenure C shouldn't have produced a block"); }); - info!("Tenure C produced a block!"); + info!("Tenure C did not produce a block"); let block_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate); assert!(block_tenure_c.is_none()); From 8ff1c568b3613df067b8c2e3e4897fd4e2bd34fa Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Tue, 30 Sep 2025 17:45:58 +0100 Subject: [PATCH 62/86] refactor ConsensusTest for insta --- .github/workflows/stacks-core-tests.yml | 1 + Cargo.lock | 50 +++ Cargo.toml | 5 + stackslib/Cargo.toml | 1 + stackslib/src/chainstate/tests/consensus.rs | 345 +++++------------- ...pend_block_with_contract_upload_insta.snap | 28 ++ ...nd_block_with_contract_upload_success.snap | 28 ++ ...error_expression_stack_depth_too_deep.snap | 5 + ...sus__append_state_index_root_mismatch.snap | 5 + ...ests__consensus__append_stx_transfers.snap | 54 +++ 10 files changed, 271 insertions(+), 251 deletions(-) create mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_insta.snap create mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap create mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap create mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatch.snap create mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers.snap diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 32b2e0ed61..56adefa11c 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -11,6 +11,7 @@ env: RUST_BACKTRACE: full SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 TEST_TIMEOUT: 30 + CI: true # Required by insta snapshot tests to run in CI concurrency: group: stacks-core-tests-${{ github.head_ref || github.ref || github.run_id }} diff --git a/Cargo.lock b/Cargo.lock index 1ef9cc74d5..0d33663687 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -665,6 +665,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -868,6 +880,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" version = "0.8.33" @@ -1542,6 +1560,20 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" +[[package]] +name = "insta" +version = "1.41.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9ffc4d4892617c50a928c52b2961cb5174b6fc6ebf252b2fac9d21955c48b8" +dependencies = [ + "console", + "lazy_static", + "linked-hash-map", + "ron", + "serde", + "similar", +] + [[package]] name = "instant" version = "0.1.12" @@ -1797,6 +1829,12 @@ dependencies = [ "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", ] +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -2580,6 +2618,17 @@ dependencies = [ "libc", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64 0.13.1", + "bitflags 1.3.2", + "serde", +] + [[package]] name = "rstest" version = "0.17.0" @@ -3311,6 +3360,7 @@ dependencies = [ "chrono", "clarity 0.0.1", "ed25519-dalek", + "insta", "lazy_static", "libstackerdb 0.0.1", "mio 0.6.23", diff --git a/Cargo.toml b/Cargo.toml index edbaa79db6..26b166c483 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,3 +55,8 @@ lto = "fat" [profile.release-lite] inherits = "release" lto = "thin" + +# faster tests for `insta` https://docs.rs/insta/1.43.2/insta/#optional-faster-runs +[profile.dev.package] +insta.opt-level = 3 +similar.opt-level = 3 diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index 7f8209b582..6d2f85dfd0 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -79,6 +79,7 @@ rlimit = "0.10.2" chrono = "0.4.19" tempfile = "3.3" proptest = { version = "1.6.0", default-features = false, features = ["std"] } +insta = { version = "1.37.0", features = ["ron"] } [features] default = [] diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index f0828e903e..971ef308f2 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -12,6 +12,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::cell::LazyCell; + use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; use clarity::consts::CHAIN_ID_TESTNET; @@ -22,25 +24,29 @@ use clarity::util::secp256k1::MessageSignature; use clarity::vm::ast::errors::{ParseError, ParseErrors}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::costs::ExecutionCost; -use clarity::vm::events::StacksTransactionEvent; -use clarity::vm::types::{PrincipalData, ResponseData}; +use clarity::vm::types::PrincipalData; use clarity::vm::{Value as ClarityValue, MAX_CALL_STACK_DEPTH}; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; use crate::burnchains::PoxConstants; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; use crate::chainstate::tests::TestChainstate; -use crate::clarity_vm::clarity::{Error as ClarityError, PreCommitClarityBlock}; +use crate::clarity_vm::clarity::Error as ClarityError; use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; use crate::net::tests::NakamotoBootPlan; pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +const FAUCET_PRIV_KEY: LazyCell = LazyCell::new(|| { + StacksPrivateKey::from_hex("510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101") + .expect("Failed to parse private key") +}); + /// Represents the expected output of a transaction in a test. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ExpectedTransactionOutput { @@ -70,6 +76,29 @@ pub enum ExpectedResult { Failure(String), } +impl From> for ExpectedResult { + fn from(result: Result) -> Self { + match result { + Ok(epoch_receipt) => { + let transactions: Vec = epoch_receipt + .tx_receipts + .iter() + .map(|r| ExpectedTransactionOutput { + return_type: r.result.clone(), + cost: r.execution_cost.clone(), + }) + .collect(); + let total_block_cost = epoch_receipt.anchored_block_cost.clone(); + ExpectedResult::Success(ExpectedBlockOutput { + transactions, + total_block_cost, + }) + } + Err(e) => ExpectedResult::Failure(e.to_string()), + } + } +} + /// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ConsensusTestVector { @@ -81,171 +110,6 @@ pub struct ConsensusTestVector { pub epoch_id: u32, /// Transactions to include in the block pub transactions: Vec, - /// The expected result after appending the constructed block. - pub expected_result: ExpectedResult, -} - -/// Tracks mismatches between actual and expected transaction results. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct TransactionMismatch { - /// The index of the transaction with mismatches. - pub index: u32, - /// Mismatch between actual and expected return types, if any. - pub return_type: Option<(ClarityValue, ClarityValue)>, - /// Mismatch between actual and expected execution costs, if any. - pub cost: Option<(ExecutionCost, ExecutionCost)>, -} - -impl TransactionMismatch { - /// Creates a new `TransactionMismatch` for the given transaction index. - fn new(index: u32) -> Self { - Self { - index, - return_type: None, - cost: None, - } - } - - /// Adds a return type mismatch to the transaction. - fn with_return_type_mismatch(mut self, actual: ClarityValue, expected: ClarityValue) -> Self { - self.return_type = Some((actual, expected)); - self - } - - /// Adds an execution cost mismatch to the transaction. - fn with_cost_mismatch(mut self, actual: ExecutionCost, expected: ExecutionCost) -> Self { - self.cost = Some((actual, expected)); - self - } - - /// Returns true if no mismatches are recorded. - fn is_empty(&self) -> bool { - self.return_type.is_none() && self.cost.is_none() - } -} - -/// Aggregates all mismatches between actual and expected test results. -#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] -pub struct ConsensusMismatch { - /// Mismatches for individual transactions. - pub transactions: Vec, - /// Mismatch between actual and expected total block costs, if any. - pub total_block_cost: Option<(ExecutionCost, ExecutionCost)>, - /// Mismatch between actual and expected error messages, if any. - pub error: Option<(String, String)>, -} - -impl ConsensusMismatch { - /// Creates a `ConsensusMismatch` from test results, if mismatches exist. - pub fn from_test_result<'a>( - append_result: Result< - ( - StacksEpochReceipt, - PreCommitClarityBlock<'a>, - Option, - Vec, - ), - ChainstateError, - >, - expected_result: ExpectedResult, - ) -> Option { - let mut mismatches = ConsensusMismatch::default(); - match (append_result, expected_result) { - (Ok((epoch_receipt, _, _, _)), ExpectedResult::Success(expected)) => { - // Convert transaction receipts to `ExpectedTransactionOutput` for comparison. - let actual_transactions: Vec<_> = epoch_receipt - .tx_receipts - .iter() - .map(|r| { - ( - r.tx_index, - ExpectedTransactionOutput { - return_type: r.result.clone(), - cost: r.execution_cost.clone(), - }, - ) - }) - .collect(); - - // Compare each transaction's actual vs expected outputs. - for ((tx_index, actual_tx), expected_tx) in - actual_transactions.iter().zip(expected.transactions.iter()) - { - let mut tx_mismatch = TransactionMismatch::new(*tx_index); - let mut has_mismatch = false; - - if actual_tx.return_type != expected_tx.return_type { - tx_mismatch = tx_mismatch.with_return_type_mismatch( - actual_tx.return_type.clone(), - expected_tx.return_type.clone(), - ); - has_mismatch = true; - } - - if actual_tx.cost != expected_tx.cost { - tx_mismatch = tx_mismatch - .with_cost_mismatch(actual_tx.cost.clone(), expected_tx.cost.clone()); - has_mismatch = true; - } - - if has_mismatch { - mismatches.add_transaction_mismatch(tx_mismatch); - } - } - - // Compare total block execution cost. - if epoch_receipt.anchored_block_cost != expected.total_block_cost { - mismatches.add_total_block_cost_mismatch( - &epoch_receipt.anchored_block_cost, - &expected.total_block_cost, - ); - } - // TODO: add any additional mismatches we might care about? - } - (Ok(_), ExpectedResult::Failure(expected_err)) => { - mismatches.error = Some(("Ok".to_string(), expected_err)); - } - (Err(actual_err), ExpectedResult::Failure(expected_err)) => { - let actual_err_str = actual_err.to_string(); - if actual_err_str != expected_err { - mismatches.error = Some((actual_err_str, expected_err)); - } - } - (Err(actual_err), ExpectedResult::Success(_)) => { - mismatches.error = Some((actual_err.to_string(), "Success".into())); - } - } - - if mismatches.is_empty() { - None - } else { - Some(mismatches) - } - } - - /// Adds a transaction mismatch to the collection. - fn add_transaction_mismatch(&mut self, mismatch: TransactionMismatch) { - self.transactions.push(mismatch); - } - - /// Records a total block cost mismatch. - fn add_total_block_cost_mismatch(&mut self, actual: &ExecutionCost, expected: &ExecutionCost) { - self.total_block_cost = Some((actual.clone(), expected.clone())); - } - - /// Returns true if no mismatches are recorded. - pub fn is_empty(&self) -> bool { - self.transactions.is_empty() && self.total_block_cost.is_none() && self.error.is_none() - } - - /// Serializes the given `ConsensusMismatch` as pretty-printed JSON, - /// or returns an empty string if `None`. - pub fn to_json_string_pretty(mismatch: &Option) -> String { - mismatch - .as_ref() - .map(|m| serde_json::to_string_pretty(m).unwrap()) - .unwrap_or("".into()) - } } /// Represents a consensus test with chainstate and test vector. @@ -257,18 +121,6 @@ pub struct ConsensusTest<'a> { impl ConsensusTest<'_> { /// Creates a new `ConsensusTest` with the given test name and vector. pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { - if let ExpectedResult::Success(output) = &test_vector.expected_result { - assert_eq!( - output.transactions.len(), - test_vector.transactions.len(), - "Test vector is invalid. Must specify an expected output per input transaction" - ); - } - let privk = StacksPrivateKey::from_hex( - "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", - ) - .unwrap(); - let epoch_id = StacksEpochId::try_from(test_vector.epoch_id).unwrap(); let chain = match epoch_id { StacksEpochId::Epoch30 @@ -278,7 +130,7 @@ impl ConsensusTest<'_> { let mut chain = NakamotoBootPlan::new(test_name) .with_pox_constants(10, 3) .with_initial_balances(test_vector.initial_balances.clone()) - .with_private_key(privk) + .with_private_key(FAUCET_PRIV_KEY.clone()) .boot_nakamoto_chainstate(None); let (burn_ops, mut tenure_change, miner_key) = chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); @@ -308,8 +160,11 @@ impl ConsensusTest<'_> { Self { chain, test_vector } } - /// Runs the consensus test, validating the results against the expected outcome. - pub fn run(mut self) { + /// Runs the consensus test. + /// + /// This method constructs a block from the test vector, appends it to the + /// chain, and returns the result of the block processing. + pub fn run(mut self) -> ExpectedResult { debug!("--------- Running test vector ---------"); let (block, block_size) = self.construct_nakamoto_block(); let mut stacks_node = self.chain.stacks_node.take().unwrap(); @@ -345,14 +200,8 @@ impl ConsensusTest<'_> { ); debug!("--------- Appended block: {} ---------", result.is_ok()); - // Compare actual vs expected results. - let mismatches = - ConsensusMismatch::from_test_result(result, self.test_vector.expected_result); - assert!( - mismatches.is_none(), - "Mismatches found: {}", - ConsensusMismatch::to_json_string_pretty(&mismatches) - ); + + result.map(|(receipt, _, _, _)| receipt).into() } /// Constructs a Nakamoto block with the given transactions and state index root. @@ -404,18 +253,26 @@ impl ConsensusTest<'_> { #[test] fn test_append_empty_block() { - let outputs = ExpectedBlockOutput { - transactions: vec![], - total_block_cost: ExecutionCost::ZERO, - }; let test_vector = ConsensusTestVector { initial_balances: Vec::new(), marf_hash: "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), epoch_id: StacksEpochId::Epoch30 as u32, transactions: vec![], - expected_result: ExpectedResult::Success(outputs), }; - ConsensusTest::new(function_name!(), test_vector).run() + let result = ConsensusTest::new(function_name!(), test_vector).run(); + // Example with inline expected result + insta::assert_ron_snapshot!(result, @r" + Success(ExpectedBlockOutput( + transactions: [], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + ), + )) + "); } #[test] @@ -426,9 +283,10 @@ fn test_append_state_index_root_mismatch() { marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into()).to_string()), }; - ConsensusTest::new(function_name!(), test_vector).run() + + let result = ConsensusTest::new(function_name!(), test_vector).run(); + insta::assert_ron_snapshot!(result); } #[test] @@ -458,35 +316,15 @@ fn test_append_stx_transfers() { ) }) .collect(); - let transfer_result = ExpectedTransactionOutput { - return_type: ClarityValue::Response(ResponseData { - committed: true, - data: Box::new(ClarityValue::Bool(true)), - }), - cost: ExecutionCost { - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - }, - }; - let outputs = ExpectedBlockOutput { - transactions: vec![ - transfer_result.clone(), - transfer_result.clone(), - transfer_result, - ], - total_block_cost: ExecutionCost::ZERO, - }; + let test_vector = ConsensusTestVector { initial_balances, marf_hash: "3838b1ae67f108b10ec7a7afb6c2b18e6468be2423d7183ffa2f7824b619b8be".into(), epoch_id: StacksEpochId::Epoch30 as u32, transactions, - expected_result: ExpectedResult::Success(outputs), }; - ConsensusTest::new(function_name!(), test_vector).run() + let result = ConsensusTest::new(function_name!(), test_vector).run(); + insta::assert_ron_snapshot!(result); } #[test] @@ -497,14 +335,9 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let tx_fee = (tx_exceeds_body.len() * 100) as u64; - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx_fee, - )]; let tx_bytes = make_contract_publish( - &sender_privk, + &FAUCET_PRIV_KEY, 0, tx_fee, CHAIN_ID_TESTNET, @@ -512,37 +345,47 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { &tx_exceeds_body, ); let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - let transfer_result = ExpectedTransactionOutput { - return_type: ClarityValue::Response(ResponseData { - committed: true, - data: Box::new(ClarityValue::Bool(true)), - }), - cost: ExecutionCost { - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - }, - }; - let outputs = ExpectedBlockOutput { - transactions: vec![transfer_result], - total_block_cost: ExecutionCost::ZERO, - }; + // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( ParseErrors::ExpressionStackDepthTooDeep, ))); let msg = format!("Invalid Stacks block 518dfea674b5c4874e025a31e01a522c8269005c0685d12658f0359757de6692: {e:?}"); let test_vector = ConsensusTestVector { - initial_balances, + initial_balances: vec![], // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(msg).to_string(), - ), }; - ConsensusTest::new(function_name!(), test_vector).run() + let result = ConsensusTest::new(function_name!(), test_vector).run(); + insta::assert_ron_snapshot!(result); +} + +#[test] +fn test_append_block_with_contract_upload_success() { + let contract_name = "test-contract"; + let contract_content = "(/ 1 1)"; + let tx_fee = (contract_content.len() * 100) as u64; + + let tx_bytes = make_contract_publish( + &FAUCET_PRIV_KEY, + 0, + tx_fee, + CHAIN_ID_TESTNET, + contract_name, + &contract_content, + ); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + + let test_vector = ConsensusTestVector { + initial_balances: vec![], + marf_hash: "908f7e3a8c905d5ceabd3bcaced378038aec57e137034e35e29ddaaf738045b5".into(), + epoch_id: StacksEpochId::Epoch32 as u32, + transactions: vec![tx], + }; + + let result = ConsensusTest::new(function_name!(), test_vector).run(); + + insta::assert_ron_snapshot!(result); } diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_insta.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_insta.snap new file mode 100644 index 0000000000..cb0cd4b165 --- /dev/null +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_insta.snap @@ -0,0 +1,28 @@ +--- +source: stackslib/src/chainstate/tests/consensus.rs +expression: result +--- +Success(ExpectedBlockOutput( + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), +)) diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap new file mode 100644 index 0000000000..cb0cd4b165 --- /dev/null +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap @@ -0,0 +1,28 @@ +--- +source: stackslib/src/chainstate/tests/consensus.rs +expression: result +--- +Success(ExpectedBlockOutput( + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), +)) diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap new file mode 100644 index 0000000000..e48636252e --- /dev/null +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap @@ -0,0 +1,5 @@ +--- +source: stackslib/src/chainstate/tests/consensus.rs +expression: result +--- +Failure("Invalid Stacks block 5822414b927f7f2da902f1009a894706bf0a51a56d239b3da3a501e8978ab6fb: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))") diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatch.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatch.snap new file mode 100644 index 0000000000..c478fb0387 --- /dev/null +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatch.snap @@ -0,0 +1,5 @@ +--- +source: stackslib/src/chainstate/tests/consensus.rs +expression: result +--- +Failure("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b") diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers.snap new file mode 100644 index 0000000000..01c6d8fff8 --- /dev/null +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers.snap @@ -0,0 +1,54 @@ +--- +source: stackslib/src/chainstate/tests/consensus.rs +expression: result +--- +Success(ExpectedBlockOutput( + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + ), + ), + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + ), + ), + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + ), +)) From 03ed436839e39ef2e1a415565f609e309a3b85ed Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 30 Sep 2025 13:06:26 -0700 Subject: [PATCH 63/86] Add support for epoch specific tests Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 463 ++++++++++++++++---- stackslib/src/chainstate/tests/mod.rs | 2 +- stackslib/src/net/tests/mod.rs | 19 +- 3 files changed, 401 insertions(+), 83 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index f0828e903e..eda5810c46 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -14,9 +14,14 @@ // along with this program. If not, see . use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; -use clarity::consts::CHAIN_ID_TESTNET; +use clarity::consts::{ + CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_EPOCH_3_2, + PEER_VERSION_EPOCH_3_3, STACKS_EPOCH_MAX, +}; use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash}; -use clarity::types::StacksEpochId; +use clarity::types::{StacksEpoch, StacksEpochId}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use clarity::vm::ast::errors::{ParseError, ParseErrors}; @@ -29,6 +34,7 @@ use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; use crate::burnchains::PoxConstants; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; use crate::chainstate::stacks::db::StacksEpochReceipt; @@ -36,11 +42,103 @@ use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, Ten use crate::chainstate::tests::TestChainstate; use crate::clarity_vm::clarity::{Error as ClarityError, PreCommitClarityBlock}; use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; +use crate::core::{EpochList, BLOCK_LIMIT_MAINNET_21}; use crate::net::tests::NakamotoBootPlan; pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +fn epoch_3_0_onwards(first_burnchain_height: u64) -> EpochList { + info!("StacksEpoch unit_test first_burn_height = {first_burnchain_height}"); + + EpochList::new(&[ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 0, + end_height: first_burnchain_height, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 1, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 1, + end_height: first_burnchain_height + 2, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch32, + start_height: first_burnchain_height + 2, + end_height: first_burnchain_height + 3, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch33, + start_height: first_burnchain_height + 3, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_3, + }, + ]) +} + /// Represents the expected output of a transaction in a test. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ExpectedTransactionOutput { @@ -78,7 +176,7 @@ pub struct ConsensusTestVector { /// Hex representation of the MARF hash for block construction. pub marf_hash: String, /// The epoch ID for the test environment. - pub epoch_id: u32, + pub epoch_id: StacksEpochId, /// Transactions to include in the block pub transactions: Vec, /// The expected result after appending the constructed block. @@ -264,47 +362,61 @@ impl ConsensusTest<'_> { "Test vector is invalid. Must specify an expected output per input transaction" ); } + assert!( + !matches!( + test_vector.epoch_id, + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + ), + "Pre-Nakamoto Tenures are not Supported" + ); let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", ) .unwrap(); - let epoch_id = StacksEpochId::try_from(test_vector.epoch_id).unwrap(); - let chain = match epoch_id { - StacksEpochId::Epoch30 - | StacksEpochId::Epoch31 - | StacksEpochId::Epoch32 - | StacksEpochId::Epoch33 => { - let mut chain = NakamotoBootPlan::new(test_name) - .with_pox_constants(10, 3) - .with_initial_balances(test_vector.initial_balances.clone()) - .with_private_key(privk) - .boot_nakamoto_chainstate(None); - let (burn_ops, mut tenure_change, miner_key) = - chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); - let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); - - let blocks_and_sizes = - chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); - chain - } - StacksEpochId::Epoch10 - | StacksEpochId::Epoch20 - | StacksEpochId::Epoch2_05 - | StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 => { - unimplemented!("Pre-Nakamoto epochs are not supported."); - } - }; + // We don't really ever want the reward cycle to force a new signer set...so for now + // Just set the cycle length to a high value + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_pox_constants(100, 3) + .with_initial_balances(test_vector.initial_balances.clone()) + .with_private_key(privk); + let epochs = epoch_3_0_onwards( + (boot_plan.pox_constants.pox_4_activation_height + + boot_plan.pox_constants.reward_cycle_length + + 1) as u64, + ); + boot_plan = boot_plan.with_epochs(epochs); + let mut chain = boot_plan.boot_nakamoto_chainstate(None); + let mut burn_block_height = chain.get_burn_block_height(); + let mut i = 0; + while SortitionDB::get_stacks_epoch(chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id + < test_vector.epoch_id + { + let (burn_ops, mut tenure_change, miner_key) = + chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); + let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); + + let _blocks_and_sizes = + chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + i += 1; + burn_block_height = chain.get_burn_block_height(); + } Self { chain, test_vector } } @@ -364,14 +476,18 @@ impl ConsensusTest<'_> { ) .unwrap() .unwrap(); - let cycle = self.chain.get_reward_cycle(); - + let burn_spent = SortitionDB::get_block_snapshot_consensus( + self.chain.sortdb_ref().conn(), + &chain_tip.consensus_hash, + ) + .unwrap() + .map(|sn| sn.total_burn).unwrap(); let mut block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, chain_length: chain_tip.stacks_block_height + 1, - burn_spent: 17000, + burn_spent, consensus_hash: chain_tip.consensus_hash.clone(), parent_block_id: chain_tip.index_block_hash(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), @@ -403,15 +519,63 @@ impl ConsensusTest<'_> { } #[test] -fn test_append_empty_block() { +fn test_append_empty_block_epoch_30() { + let outputs = ExpectedBlockOutput { + transactions: vec![], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![], + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_empty_block_epoch_31() { + let outputs = ExpectedBlockOutput { + transactions: vec![], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + epoch_id: StacksEpochId::Epoch31, + transactions: vec![], + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_empty_block_epoch_32() { + let outputs = ExpectedBlockOutput { + transactions: vec![], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![], + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_empty_block_epoch_33() { let outputs = ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, }; let test_vector = ConsensusTestVector { initial_balances: Vec::new(), - marf_hash: "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + epoch_id: StacksEpochId::Epoch30, transactions: vec![], expected_result: ExpectedResult::Success(outputs), }; @@ -419,20 +583,62 @@ fn test_append_empty_block() { } #[test] -fn test_append_state_index_root_mismatch() { +fn test_append_state_index_root_mismatch_epoch_30() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![], + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into()).to_string()), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_state_index_root_mismatch_epoch_31() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch31, + transactions: vec![], + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into()).to_string()), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_state_index_root_mismatch_epoch_32() { let test_vector = ConsensusTestVector { initial_balances: Vec::new(), // An invalid MARF. Will result in state root mismatch marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + epoch_id: StacksEpochId::Epoch32, transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into()).to_string()), + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into()).to_string()), }; ConsensusTest::new(function_name!(), test_vector).run() } #[test] -fn test_append_stx_transfers() { +fn test_append_state_index_root_mismatch_epoch_33() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch33, + transactions: vec![], + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into()).to_string()), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +fn create_stx_transfers_tx_and_outputs() -> ( + Vec<(PrincipalData, u64)>, + Vec, + ExpectedBlockOutput, +) { let sender_privks = [ StacksPrivateKey::from_hex(SK_1).unwrap(), StacksPrivateKey::from_hex(SK_2).unwrap(), @@ -479,10 +685,42 @@ fn test_append_stx_transfers() { ], total_block_cost: ExecutionCost::ZERO, }; + (initial_balances, transactions, outputs) +} + +#[test] +fn test_append_stx_transfers_epoch_30() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), + epoch_id: StacksEpochId::Epoch30, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_stx_transfers_epoch_31() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), + epoch_id: StacksEpochId::Epoch31, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_stx_transfers_epoch_32() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); let test_vector = ConsensusTestVector { initial_balances, - marf_hash: "3838b1ae67f108b10ec7a7afb6c2b18e6468be2423d7183ffa2f7824b619b8be".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), + epoch_id: StacksEpochId::Epoch32, transactions, expected_result: ExpectedResult::Success(outputs), }; @@ -490,8 +728,19 @@ fn test_append_stx_transfers() { } #[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep() { - // something just over the limit of the expression depth +fn test_append_stx_transfers_epoch_33() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), + epoch_id: StacksEpochId::Epoch33, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +fn create_exceeds_stacks_depth_contract_tx(sender_privk: &StacksPrivateKey) -> StacksTransaction { let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); @@ -499,10 +748,6 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let tx_fee = (tx_exceeds_body.len() * 100) as u64; - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx_fee, - )]; let tx_bytes = make_contract_publish( &sender_privk, 0, @@ -511,34 +756,100 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { "test-exceeds", &tx_exceeds_body, ); - let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - let transfer_result = ExpectedTransactionOutput { - return_type: ClarityValue::Response(ResponseData { - committed: true, - data: Box::new(ClarityValue::Bool(true)), - }), - cost: ExecutionCost { - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - }, + + StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_30() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; + // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block ff0796f9934d45aad71871f317061acb99dd5ef1237a8747a78624a2824f7d32: {e:?}")).to_string(), + ), }; - let outputs = ExpectedBlockOutput { - transactions: vec![transfer_result], - total_block_cost: ExecutionCost::ZERO, + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_31() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch31, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 9da03cdc774989cea30445f1453073b070430867edcecb180d1cc9a6e9738b46: {e:?}")).to_string(), + ), }; - // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_32() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch32, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 76a6d95b3ec273a13f10080b3b18e225cc838044c5e3a3000b7ccdd8b50a5ae1: {e:?}")).to_string(), + ), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_33() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( ParseErrors::ExpressionStackDepthTooDeep, ))); - let msg = format!("Invalid Stacks block 518dfea674b5c4874e025a31e01a522c8269005c0685d12658f0359757de6692: {e:?}"); + let msg = format!("Invalid Stacks block de3c507ab60e717275f97f267ec2608c96aaab42a7e32fc2d8129585dff9e74a: {e:?}"); let test_vector = ConsensusTestVector { initial_balances, // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + epoch_id: StacksEpochId::Epoch33, transactions: vec![tx], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock(msg).to_string(), diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index c16c1201cf..9d5e14abb5 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -565,7 +565,7 @@ impl<'a> TestChainstate<'a> { self.sortdb.as_mut().unwrap() } - pub fn sortdb_ref(&mut self) -> &SortitionDB { + pub fn sortdb_ref(&self) -> &SortitionDB { self.sortdb.as_ref().unwrap() } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 9bbb455f35..57859fed31 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -25,6 +25,8 @@ pub mod relay; use std::collections::{HashMap, HashSet}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use clarity::types::EpochList; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libstackerdb::StackerDBChunkData; use rand::Rng; @@ -101,11 +103,13 @@ pub struct NakamotoBootPlan { pub malleablized_blocks: bool, pub network_id: u32, pub txindex: bool, + pub epochs: Option>, } impl NakamotoBootPlan { pub fn new(test_name: &str) -> Self { let (test_signers, test_stackers) = TestStacker::common_signing_set(); + let pox_constants = TestPeerConfig::default().burnchain.pox_constants; Self { test_name: test_name.to_string(), pox_constants: TestPeerConfig::default().burnchain.pox_constants, @@ -119,6 +123,7 @@ impl NakamotoBootPlan { malleablized_blocks: true, network_id: TestPeerConfig::default().network_id, txindex: false, + epochs: None, } } @@ -154,6 +159,11 @@ impl NakamotoBootPlan { self } + pub fn with_epochs(mut self, epochs: EpochList) -> Self { + self.epochs = Some(epochs); + self + } + pub fn with_initial_balances(mut self, initial_balances: Vec<(PrincipalData, u64)>) -> Self { self.initial_balances = initial_balances; self @@ -367,16 +377,13 @@ impl NakamotoBootPlan { ) .unwrap(); - // reward cycles are 5 blocks long - // first 25 blocks are boot-up - // reward cycle 6 instantiates pox-3 - // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - chainstate_config.epochs = Some(StacksEpoch::unit_test_3_0_only( + let default_epoch = StacksEpoch::unit_test_3_0_only( (self.pox_constants.pox_4_activation_height + self.pox_constants.reward_cycle_length + 1) .into(), - )); + ); + chainstate_config.epochs = Some(self.epochs.clone().unwrap_or(default_epoch)); chainstate_config.initial_balances = vec![]; if self.add_default_balance { chainstate_config From 857c0f9efb16255b96a07f0006a456887633dbfd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 30 Sep 2025 14:46:47 -0700 Subject: [PATCH 64/86] Add support for multiple blocks per epoch and chainstate reuse Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 675 ++++++++++---------- stackslib/src/chainstate/tests/mod.rs | 8 + 2 files changed, 350 insertions(+), 333 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index eda5810c46..2c5731e898 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -12,6 +12,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; + use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; use clarity::consts::{ @@ -168,21 +170,26 @@ pub enum ExpectedResult { Failure(String), } -/// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. +/// Represents a block to be appended in a test and its expected result. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ConsensusTestVector { - /// Initial balances for the provided PrincipalData during chainstate instantiation. - pub initial_balances: Vec<(PrincipalData, u64)>, +pub struct TestBlock { /// Hex representation of the MARF hash for block construction. pub marf_hash: String, - /// The epoch ID for the test environment. - pub epoch_id: StacksEpochId, /// Transactions to include in the block pub transactions: Vec, /// The expected result after appending the constructed block. pub expected_result: ExpectedResult, } +/// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsensusTestVector { + /// Initial balances for the provided PrincipalData during chainstate instantiation. + pub initial_balances: Vec<(PrincipalData, u64)>, + /// A mapping of epoch to Blocks that should be applied in that epoch + pub epoch_blocks: HashMap>, +} + /// Tracks mismatches between actual and expected transaction results. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TransactionMismatch { @@ -249,7 +256,7 @@ impl ConsensusMismatch { ) -> Option { let mut mismatches = ConsensusMismatch::default(); match (append_result, expected_result) { - (Ok((epoch_receipt, _, _, _)), ExpectedResult::Success(expected)) => { + (Ok((epoch_receipt, clarity_commit, _, _)), ExpectedResult::Success(expected)) => { // Convert transaction receipts to `ExpectedTransactionOutput` for comparison. let actual_transactions: Vec<_> = epoch_receipt .tx_receipts @@ -299,6 +306,7 @@ impl ConsensusMismatch { ); } // TODO: add any additional mismatches we might care about? + clarity_commit.commit(); } (Ok(_), ExpectedResult::Failure(expected_err)) => { mismatches.error = Some(("Ok".to_string(), expected_err)); @@ -355,34 +363,41 @@ pub struct ConsensusTest<'a> { impl ConsensusTest<'_> { /// Creates a new `ConsensusTest` with the given test name and vector. pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { - if let ExpectedResult::Success(output) = &test_vector.expected_result { - assert_eq!( - output.transactions.len(), - test_vector.transactions.len(), - "Test vector is invalid. Must specify an expected output per input transaction" + // Validate blocks + for (epoch_id, blocks) in &test_vector.epoch_blocks { + assert!( + !matches!( + *epoch_id, + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + ), + "Pre-Nakamoto Tenures are not Supported" ); + for block in blocks { + if let ExpectedResult::Success(output) = &block.expected_result { + assert_eq!( + output.transactions.len(), + block.transactions.len(), + "Test block is invalid. Must specify an expected output per input transaction" + ); + } + } } - assert!( - !matches!( - test_vector.epoch_id, - StacksEpochId::Epoch10 - | StacksEpochId::Epoch20 - | StacksEpochId::Epoch2_05 - | StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 - ), - "Pre-Nakamoto Tenures are not Supported" - ); + let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", ) .unwrap(); - // We don't really ever want the reward cycle to force a new signer set...so for now - // Just set the cycle length to a high value + // Set up chainstate to start at Epoch 3.0 + // We don't really ever want the reward cycle to force a new signer set... + // so for now just set the cycle length to a high value (100) let mut boot_plan = NakamotoBootPlan::new(test_name) .with_pox_constants(100, 3) .with_initial_balances(test_vector.initial_balances.clone()) @@ -393,83 +408,129 @@ impl ConsensusTest<'_> { + 1) as u64, ); boot_plan = boot_plan.with_epochs(epochs); - let mut chain = boot_plan.boot_nakamoto_chainstate(None); - let mut burn_block_height = chain.get_burn_block_height(); - let mut i = 0; - while SortitionDB::get_stacks_epoch(chain.sortdb().conn(), burn_block_height) - .unwrap() - .unwrap() - .epoch_id - < test_vector.epoch_id - { - let (burn_ops, mut tenure_change, miner_key) = - chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); - let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); + let chain = boot_plan.boot_nakamoto_chainstate(None); + + Self { chain, test_vector } + } + + /// Advances the chainstate to the specified epoch. Creating a tenure change block per burn block height + fn advance_to_epoch(&mut self, target_epoch: StacksEpochId) { + let burn_block_height = self.chain.get_burn_block_height(); + let mut current_epoch = + SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + assert!(current_epoch <= target_epoch, "Chainstate is already at a higher epoch than the target. Current epoch: {current_epoch}. Target epoch: {target_epoch}"); + while current_epoch < target_epoch { + let (burn_ops, mut tenure_change, miner_key) = self + .chain + .begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = self.chain.next_burnchain_block(burn_ops); + let vrf_proof = self.chain.make_nakamoto_vrf_proof(miner_key); tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = self.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = self.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let _blocks_and_sizes = - chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); - i += 1; - burn_block_height = chain.get_burn_block_height(); + self.chain + .make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + let burn_block_height = self.chain.get_burn_block_height(); + current_epoch = + SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; } - Self { chain, test_vector } } - /// Runs the consensus test, validating the results against the expected outcome. + /// Runs the consensus test for the test vector, advancing epochs as needed. pub fn run(mut self) { - debug!("--------- Running test vector ---------"); - let (block, block_size) = self.construct_nakamoto_block(); - let mut stacks_node = self.chain.stacks_node.take().unwrap(); - let sortdb = self.chain.sortdb.take().unwrap(); - let chain_tip = - NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + // Get sorted epochs + let mut epochs: Vec = + self.test_vector.epoch_blocks.keys().cloned().collect(); + epochs.sort(); + + for epoch in epochs { + debug!( + "--------- Processing epoch {epoch:?} with {} blocks ---------", + self.test_vector.epoch_blocks[&epoch].len() + ); + self.advance_to_epoch(epoch); + for (i, block) in self.test_vector.epoch_blocks[&epoch].iter().enumerate() { + debug!("--------- Running block {i} for epoch {epoch:?} ---------"); + let (nakamoto_block, block_size) = + self.construct_nakamoto_block(&block.marf_hash, &block.transactions); + let sortdb = self.chain.sortdb.take().unwrap(); + let chain_tip = NakamotoChainState::get_canonical_block_header( + self.chain.stacks_node().chainstate.db(), + &sortdb, + ) .unwrap() .unwrap(); - let pox_constants = PoxConstants::test_default(); + let pox_constants = PoxConstants::test_default(); - let (mut chainstate_tx, clarity_instance) = - stacks_node.chainstate.chainstate_tx_begin().unwrap(); + debug!( + "--------- Appending block {} ---------", + nakamoto_block.header.signer_signature_hash(); + "block" => ?nakamoto_block + ); + { + let (mut chainstate_tx, clarity_instance) = self + .chain + .stacks_node() + .chainstate + .chainstate_tx_begin() + .unwrap(); + + let mut burndb_conn = sortdb.index_handle_at_tip(); + + let result = NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut burndb_conn, + &chain_tip.consensus_hash, + &pox_constants, + &chain_tip, + &chain_tip.burn_header_hash, + chain_tip.burn_header_height, + chain_tip.burn_header_timestamp, + &nakamoto_block, + block_size.try_into().unwrap(), + nakamoto_block.header.burn_spent, + 1500, + &RewardSet::empty(), + false, + ); - let mut burndb_conn = sortdb.index_handle_at_tip(); + debug!("--------- Appended block: {} ---------", result.is_ok()); - debug!("--------- Appending block {} ---------", block.header.signer_signature_hash(); "block" => ?block); - let result = NakamotoChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut burndb_conn, - &chain_tip.consensus_hash, - &pox_constants, - &chain_tip, - &chain_tip.burn_header_hash, - chain_tip.burn_header_height, - chain_tip.burn_header_timestamp, - &block, - block_size.try_into().unwrap(), - block.header.burn_spent, - 1500, - &RewardSet::empty(), - false, - ); + // Compare actual vs expected results. + let mismatches = + ConsensusMismatch::from_test_result(result, block.expected_result.clone()); + assert!( + mismatches.is_none(), + "Mismatches found in block {i} for epoch {epoch:?}: {}", + ConsensusMismatch::to_json_string_pretty(&mismatches) + ); + chainstate_tx.commit().unwrap(); + } - debug!("--------- Appended block: {} ---------", result.is_ok()); - // Compare actual vs expected results. - let mismatches = - ConsensusMismatch::from_test_result(result, self.test_vector.expected_result); - assert!( - mismatches.is_none(), - "Mismatches found: {}", - ConsensusMismatch::to_json_string_pretty(&mismatches) - ); + // Restore chainstate for the next block + self.chain.sortdb = Some(sortdb); + } + } } /// Constructs a Nakamoto block with the given transactions and state index root. - fn construct_nakamoto_block(&self) -> (NakamotoBlock, usize) { - let state_index_root = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); + fn construct_nakamoto_block( + &self, + marf_hash: &str, + transactions: &[StacksTransaction], + ) -> (NakamotoBlock, usize) { + let state_index_root = TrieHash::from_hex(marf_hash).unwrap(); let chain_tip = NakamotoChainState::get_canonical_block_header( self.chain.stacks_node.as_ref().unwrap().chainstate.db(), self.chain.sortdb.as_ref().unwrap(), @@ -482,7 +543,8 @@ impl ConsensusTest<'_> { &chain_tip.consensus_hash, ) .unwrap() - .map(|sn| sn.total_burn).unwrap(); + .map(|sn| sn.total_burn) + .unwrap(); let mut block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, @@ -497,7 +559,7 @@ impl ConsensusTest<'_> { signer_signature: vec![], pox_treatment: BitVec::ones(1).unwrap(), }, - txs: self.test_vector.transactions.clone(), + txs: transactions.to_vec(), }; let tx_merkle_root = { @@ -519,126 +581,117 @@ impl ConsensusTest<'_> { } #[test] -fn test_append_empty_block_epoch_30() { - let outputs = ExpectedBlockOutput { - transactions: vec![], - total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_empty_block_epoch_31() { - let outputs = ExpectedBlockOutput { +fn test_append_empty_blocks() { + let mut epoch_blocks = HashMap::new(); + let expected_result = ExpectedResult::Success(ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), - epoch_id: StacksEpochId::Epoch31, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_empty_block_epoch_32() { - let outputs = ExpectedBlockOutput { - transactions: vec![], - total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_empty_block_epoch_33() { - let outputs = ExpectedBlockOutput { - transactions: vec![], - total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} + }); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); -#[test] -fn test_append_state_index_root_mismatch_epoch_30() { let test_vector = ConsensusTestVector { initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into()).to_string()), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } #[test] -fn test_append_state_index_root_mismatch_epoch_31() { - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch31, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into()).to_string()), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} +fn test_append_state_index_root_mismatches() { + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), + ) + .to_string(), + ), + }], + ); -#[test] -fn test_append_state_index_root_mismatch_epoch_32() { let test_vector = ConsensusTestVector { initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch32, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into()).to_string()), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } #[test] -fn test_append_state_index_root_mismatch_epoch_33() { - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch33, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into()).to_string()), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -fn create_stx_transfers_tx_and_outputs() -> ( - Vec<(PrincipalData, u64)>, - Vec, - ExpectedBlockOutput, -) { +fn test_append_stx_transfers_success() { let sender_privks = [ StacksPrivateKey::from_hex(SK_1).unwrap(), StacksPrivateKey::from_hex(SK_2).unwrap(), @@ -647,13 +700,14 @@ fn create_stx_transfers_tx_and_outputs() -> ( let send_amount = 1_000; let tx_fee = 180; let mut initial_balances = Vec::new(); - let transactions = sender_privks + let transactions: Vec<_> = sender_privks .iter() .map(|sender_privk| { initial_balances.push(( StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)).into(), send_amount + tx_fee, )); + // Interestingly, it doesn't seem to care about nonce... make_stacks_transfer_tx( sender_privk, 0, @@ -685,68 +739,55 @@ fn create_stx_transfers_tx_and_outputs() -> ( ], total_block_cost: ExecutionCost::ZERO, }; - (initial_balances, transactions, outputs) -} - -#[test] -fn test_append_stx_transfers_epoch_30() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); - let test_vector = ConsensusTestVector { - initial_balances, - marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), - epoch_id: StacksEpochId::Epoch30, - transactions, - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_stx_transfers_epoch_31() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); - let test_vector = ConsensusTestVector { - initial_balances, - marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), - epoch_id: StacksEpochId::Epoch31, - transactions, - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); -#[test] -fn test_append_stx_transfers_epoch_32() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); let test_vector = ConsensusTestVector { initial_balances, - marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), - epoch_id: StacksEpochId::Epoch32, - transactions, - expected_result: ExpectedResult::Success(outputs), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } #[test] -fn test_append_stx_transfers_epoch_33() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); - let test_vector = ConsensusTestVector { - initial_balances, - marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), - epoch_id: StacksEpochId::Epoch33, - transactions, - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -fn create_exceeds_stacks_depth_contract_tx(sender_privk: &StacksPrivateKey) -> StacksTransaction { +fn test_append_chainstate_error_expression_stack_depth_too_deep() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let tx_fee = (tx_exceeds_body.len() * 100) as u64; let tx_bytes = make_contract_publish( &sender_privk, @@ -757,103 +798,71 @@ fn create_exceeds_stacks_depth_contract_tx(sender_privk: &StacksPrivateKey) -> S &tx_exceeds_body, ); - StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap() -} - -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_30() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); let initial_balances = vec![( StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), - )]; - // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. - let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( - ParseErrors::ExpressionStackDepthTooDeep, - ))); - let test_vector = ConsensusTestVector { - initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block ff0796f9934d45aad71871f317061acb99dd5ef1237a8747a78624a2824f7d32: {e:?}")).to_string(), - ), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_31() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), - )]; - let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( - ParseErrors::ExpressionStackDepthTooDeep, - ))); - let test_vector = ConsensusTestVector { - initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch31, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 9da03cdc774989cea30445f1453073b070430867edcecb180d1cc9a6e9738b46: {e:?}")).to_string(), - ), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_32() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), + tx_fee, )]; let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( ParseErrors::ExpressionStackDepthTooDeep, ))); - let test_vector = ConsensusTestVector { - initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch32, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 76a6d95b3ec273a13f10080b3b18e225cc838044c5e3a3000b7ccdd8b50a5ae1: {e:?}")).to_string(), - ), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block ff0796f9934d45aad71871f317061acb99dd5ef1237a8747a78624a2824f7d32: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block 9da03cdc774989cea30445f1453073b070430867edcecb180d1cc9a6e9738b46: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block 76a6d95b3ec273a13f10080b3b18e225cc838044c5e3a3000b7ccdd8b50a5ae1: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block de3c507ab60e717275f97f267ec2608c96aaab42a7e32fc2d8129585dff9e74a: {e:?}" + )) + .to_string(), + ), + }], + ); -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_33() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), - )]; - let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( - ParseErrors::ExpressionStackDepthTooDeep, - ))); - let msg = format!("Invalid Stacks block de3c507ab60e717275f97f267ec2608c96aaab42a7e32fc2d8129585dff9e74a: {e:?}"); let test_vector = ConsensusTestVector { initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch33, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(msg).to_string(), - ), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index 9d5e14abb5..3d4037aeaf 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -569,6 +569,14 @@ impl<'a> TestChainstate<'a> { self.sortdb.as_ref().unwrap() } + pub fn stacks_node(&mut self) -> &mut TestStacksNode { + self.stacks_node.as_mut().unwrap() + } + + pub fn stacks_node_ref(&self) -> &TestStacksNode { + self.stacks_node.as_ref().unwrap() + } + /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments /// the provided reference. pub fn tenure_with_txs( From f396663614f84146b12322f4a6d82e1308a4f6ef Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Wed, 1 Oct 2025 12:13:07 +0100 Subject: [PATCH 65/86] make FAUCET_PRIV_KEY pub --- stackslib/src/chainstate/tests/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 971ef308f2..eb05ac4dbe 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -42,7 +42,7 @@ pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; -const FAUCET_PRIV_KEY: LazyCell = LazyCell::new(|| { +pub const FAUCET_PRIV_KEY: LazyCell = LazyCell::new(|| { StacksPrivateKey::from_hex("510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101") .expect("Failed to parse private key") }); From 74e9576d2f9949a9a893be1b9fcedea72c2230c7 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Thu, 2 Oct 2025 10:33:03 +0100 Subject: [PATCH 66/86] remove unused code --- stackslib/src/chainstate/tests/consensus.rs | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 8cb7c5e98f..858eef2715 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -27,7 +27,6 @@ use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKe use clarity::types::{StacksEpoch, StacksEpochId}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; -use clarity::vm::ast::errors::{ParseError, ParseErrors}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::PrincipalData; @@ -42,7 +41,6 @@ use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; use crate::chainstate::tests::TestChainstate; -use crate::clarity_vm::clarity::Error as ClarityError; use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; use crate::core::{EpochList, BLOCK_LIMIT_MAINNET_21}; use crate::net::tests::NakamotoBootPlan; @@ -574,7 +572,6 @@ fn test_append_stx_transfers_success() { #[test] fn test_append_chainstate_error_expression_stack_depth_too_deep() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); @@ -591,13 +588,7 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { ); let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx_fee, - )]; - let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( - ParseErrors::ExpressionStackDepthTooDeep, - ))); + let mut epoch_blocks = HashMap::new(); epoch_blocks.insert( StacksEpochId::Epoch30, @@ -629,7 +620,7 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { ); let test_vector = ConsensusTestVector { epoch_blocks }; - let result = ConsensusTest::new(function_name!(), initial_balances).run(test_vector); + let result = ConsensusTest::new(function_name!(), vec![]).run(test_vector); insta::assert_ron_snapshot!(result); } From 7365051f405db39f0b25d37d6338243507b22ec5 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Thu, 2 Oct 2025 10:33:40 +0100 Subject: [PATCH 67/86] fix test_append_state_index_root_mismatches --- stackslib/src/chainstate/tests/consensus.rs | 8 ++-- ...s__append_state_index_root_mismatches.snap | 44 ++----------------- 2 files changed, 8 insertions(+), 44 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 858eef2715..9375d426f8 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -474,28 +474,28 @@ fn test_append_state_index_root_mismatches() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap index 017c5a91da..1d73839536 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap @@ -3,44 +3,8 @@ source: stackslib/src/chainstate/tests/consensus.rs expression: result --- [ - Success(ExpectedBlockOutput( - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), - Success(ExpectedBlockOutput( - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), - Success(ExpectedBlockOutput( - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), - Success(ExpectedBlockOutput( - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), + Failure("Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5"), + Failure("Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf"), + Failure("Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f"), + Failure("Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78"), ] From ce05f1d3a096a0524017a05cf6a25bd2683ae06e Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Thu, 2 Oct 2025 12:27:50 +0100 Subject: [PATCH 68/86] revert to previous ConsensusTest signature. update tests --- stackslib/src/chainstate/tests/consensus.rs | 133 ++++++++++++------ ...nd_block_with_contract_upload_success.snap | 102 -------------- ...error_expression_stack_depth_too_deep.snap | 8 +- 3 files changed, 95 insertions(+), 148 deletions(-) delete mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 9375d426f8..9412dd76dd 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -210,6 +210,8 @@ pub struct TestBlock { /// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ConsensusTestVector { + /// Initial balances for the provided PrincipalData during chainstate instantiation. + pub initial_balances: Vec<(PrincipalData, u64)>, /// A mapping of epoch to Blocks that should be applied in that epoch pub epoch_blocks: HashMap>, } @@ -217,17 +219,40 @@ pub struct ConsensusTestVector { /// Represents a consensus test with chainstate and test vector. pub struct ConsensusTest<'a> { pub chain: TestChainstate<'a>, + pub test_vector: ConsensusTestVector, } impl ConsensusTest<'_> { /// Creates a new `ConsensusTest` with the given test name and vector. - pub fn new(test_name: &str, initial_balances: Vec<(PrincipalData, u64)>) -> Self { + pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { + // Validate blocks + for (epoch_id, blocks) in &test_vector.epoch_blocks { + assert!( + !matches!( + *epoch_id, + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + ), + "Pre-Nakamoto Tenures are not Supported" + ); + assert!( + !blocks.is_empty(), + "Each epoch must have at least one block" + ); + } + // Set up chainstate to start at Epoch 3.0 // We don't really ever want the reward cycle to force a new signer set... // so for now just set the cycle length to a high value (100) let mut boot_plan = NakamotoBootPlan::new(test_name) .with_pox_constants(100, 3) - .with_initial_balances(initial_balances.clone()) + .with_initial_balances(test_vector.initial_balances.clone()) .with_private_key(FAUCET_PRIV_KEY.clone()); let epochs = epoch_3_0_onwards( (boot_plan.pox_constants.pox_4_activation_height @@ -237,7 +262,7 @@ impl ConsensusTest<'_> { boot_plan = boot_plan.with_epochs(epochs); let chain = boot_plan.boot_nakamoto_chainstate(None); - Self { chain } + Self { chain, test_vector } } /// Advances the chainstate to the specified epoch. Creating a tenure change block per burn block height @@ -277,41 +302,20 @@ impl ConsensusTest<'_> { /// /// This method constructs a block from the test vector, appends it to the /// chain, and returns the result of the block processing. - pub fn run(mut self, test_vector: ConsensusTestVector) -> Vec { - // Validate blocks - for (epoch_id, blocks) in &test_vector.epoch_blocks { - assert!( - !matches!( - *epoch_id, - StacksEpochId::Epoch10 - | StacksEpochId::Epoch20 - | StacksEpochId::Epoch2_05 - | StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 - ), - "Pre-Nakamoto Tenures are not Supported" - ); - assert!( - !blocks.is_empty(), - "Each epoch must have at least one block" - ); - } - + pub fn run(mut self) -> Vec { // Get sorted epochs - let mut epochs: Vec = test_vector.epoch_blocks.keys().cloned().collect(); + let mut epochs: Vec = + self.test_vector.epoch_blocks.keys().cloned().collect(); epochs.sort(); let mut results = vec![]; for epoch in epochs { debug!( "--------- Processing epoch {epoch:?} with {} blocks ---------", - test_vector.epoch_blocks[&epoch].len() + self.test_vector.epoch_blocks[&epoch].len() ); self.advance_to_epoch(epoch); - for (i, block) in test_vector.epoch_blocks[&epoch].iter().enumerate() { + for (i, block) in self.test_vector.epoch_blocks[&epoch].iter().enumerate() { debug!("--------- Running block {i} for epoch {epoch:?} ---------"); let (nakamoto_block, block_size) = self.construct_nakamoto_block(&block.marf_hash, &block.transactions); @@ -463,8 +467,11 @@ fn test_append_empty_blocks() { }], ); - let test_vector = ConsensusTestVector { epoch_blocks }; - let result = ConsensusTest::new(function_name!(), vec![]).run(test_vector); + let test_vector = ConsensusTestVector { + initial_balances: vec![], + epoch_blocks, + }; + let result = ConsensusTest::new(function_name!(), test_vector).run(); insta::assert_ron_snapshot!(result); } @@ -500,8 +507,11 @@ fn test_append_state_index_root_mismatches() { }], ); - let test_vector = ConsensusTestVector { epoch_blocks }; - let result = ConsensusTest::new(function_name!(), vec![]).run(test_vector); + let test_vector = ConsensusTestVector { + initial_balances: vec![], + epoch_blocks, + }; + let result = ConsensusTest::new(function_name!(), test_vector).run(); insta::assert_ron_snapshot!(result); } @@ -564,9 +574,12 @@ fn test_append_stx_transfers_success() { }], ); - let test_vector = ConsensusTestVector { epoch_blocks }; + let test_vector = ConsensusTestVector { + initial_balances, + epoch_blocks, + }; - let result = ConsensusTest::new(function_name!(), initial_balances).run(test_vector); + let result = ConsensusTest::new(function_name!(), test_vector).run(); insta::assert_ron_snapshot!(result); } @@ -619,8 +632,11 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { }], ); - let test_vector = ConsensusTestVector { epoch_blocks }; - let result = ConsensusTest::new(function_name!(), vec![]).run(test_vector); + let test_vector = ConsensusTestVector { + initial_balances: vec![], + epoch_blocks, + }; + let result = ConsensusTest::new(function_name!(), test_vector).run(); insta::assert_ron_snapshot!(result); } @@ -669,9 +685,42 @@ fn test_append_block_with_contract_upload_success() { transactions: vec![tx.clone()], }], ); - let test_vector = ConsensusTestVector { epoch_blocks }; - - let result = ConsensusTest::new(function_name!(), vec![]).run(test_vector); - - insta::assert_ron_snapshot!(result); + let test_vector = ConsensusTestVector { + initial_balances: vec![], + epoch_blocks, + }; + + let result = ConsensusTest::new(function_name!(), test_vector).run(); + // Example of expecting the same result across all blocks + insta::allow_duplicates! { + for res in result { + // Example of inline snapshot + insta::assert_ron_snapshot!(res, @r" + Success(ExpectedBlockOutput( + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + )) + "); + } + } } diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap deleted file mode 100644 index e328915727..0000000000 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_upload_success.snap +++ /dev/null @@ -1,102 +0,0 @@ ---- -source: stackslib/src/chainstate/tests/consensus.rs -expression: result ---- -[ - Success(ExpectedBlockOutput( - transactions: [ - ExpectedTransactionOutput( - return_type: Response(ResponseData( - committed: true, - data: Bool(true), - )), - cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - ), - ], - total_block_cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - )), - Success(ExpectedBlockOutput( - transactions: [ - ExpectedTransactionOutput( - return_type: Response(ResponseData( - committed: true, - data: Bool(true), - )), - cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - ), - ], - total_block_cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - )), - Success(ExpectedBlockOutput( - transactions: [ - ExpectedTransactionOutput( - return_type: Response(ResponseData( - committed: true, - data: Bool(true), - )), - cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - ), - ], - total_block_cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - )), - Success(ExpectedBlockOutput( - transactions: [ - ExpectedTransactionOutput( - return_type: Response(ResponseData( - committed: true, - data: Bool(true), - )), - cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - ), - ], - total_block_cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - )), -] diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap index ebc055d698..6089465bfa 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_chainstate_error_expression_stack_depth_too_deep.snap @@ -3,8 +3,8 @@ source: stackslib/src/chainstate/tests/consensus.rs expression: result --- [ - Failure("Invalid Stacks block a441b00c9a2d5093524dc87161b644c15cde93948c5b28e54425326e8d59961d: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block 62a2f769bc70d73f47bcbf8cb268fe03d9e361788fe77afa195768c90a91f488: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block a489609c924c0eaea59b91a0b34595d10f03ca5f29cbd3fcecfe7fc9a30e210f: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block 3b20a227a540976766393d40929a86f384601f27f652dc97328879d6708ee400: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block a60c62267d58f1ea29c64b2f86d62cf210ff5ab14796abfa947ca6d95007d440: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block 238f2ce280580228f19c8122a9bdd0c61299efabe59d8c22c315ee40a865cc7b: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block b5dd8cdc0f48b30d355a950077f7c9b20bf01062e9c96262c28f17fff55a2b0f: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block cfbddc874c465753158a065eff61340e933d33671633843dde0fbd2bfaaac7a4: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), ] From 210335bf7fe0e0bad8c48c5bb671a0f2b94ca90e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 2 Oct 2025 06:29:19 -0700 Subject: [PATCH 69/86] fix: update test description --- stacks-node/src/tests/nakamoto_integrations.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index f85149fe5a..3c12e095a1 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -5300,8 +5300,8 @@ fn burn_ops_integration_test() { /// Miner B starts its tenure, Miner B produces a Stacks block b_0, but miner C submits its block commit before b_0 is broadcasted. /// Bitcoin block C, containing Miner C's block commit, is mined BEFORE miner C has a chance to update their block commit with b_0's information. /// This test asserts: -/// * tenure C ignores b_0, and correctly builds off of block a_x. -fn forked_tenure_is_ignored() { +/// * tenure C correctly extends b_0, building off of block B despite the commit being submitted before b_0 was broadcasted. +fn bad_commit_does_not_trigger_fork() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } From 5b2dc39ba226e3a51fe0b25dd171401c35ed32b6 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Thu, 2 Oct 2025 15:19:52 +0100 Subject: [PATCH 70/86] add contract_call example --- stackslib/src/chainstate/tests/consensus.rs | 81 ++++++++++++++++++++- 1 file changed, 80 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 9412dd76dd..585b3f6a07 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -41,7 +41,9 @@ use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; use crate::chainstate::tests::TestChainstate; -use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; +use crate::core::test_util::{ + make_contract_call, make_contract_publish, make_stacks_transfer_tx, to_addr, +}; use crate::core::{EpochList, BLOCK_LIMIT_MAINNET_21}; use crate::net::tests::NakamotoBootPlan; @@ -55,6 +57,9 @@ pub const FAUCET_PRIV_KEY: LazyCell = LazyCell::new(|| { .expect("Failed to parse private key") }); +const FOO_CONTRACT: &str = "(define-public (foo) (ok 1)) + (define-public (bar (x uint)) (ok x))"; + fn epoch_3_0_onwards(first_burnchain_height: u64) -> EpochList { info!("StacksEpoch unit_test first_burn_height = {first_burnchain_height}"); @@ -724,3 +729,77 @@ fn test_append_block_with_contract_upload_success() { } } } + +#[test] +fn test_append_block_with_contract_call_success() { + let tx_fee = (FOO_CONTRACT.len() * 100) as u64; + + let tx_bytes = make_contract_publish( + &FAUCET_PRIV_KEY, + 0, + tx_fee, + CHAIN_ID_TESTNET, + "foo_contract", + FOO_CONTRACT, + ); + let tx_contract_deploy = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + + let tx_bytes = make_contract_call( + &FAUCET_PRIV_KEY, + 1, + 200, + CHAIN_ID_TESTNET, + &to_addr(&FAUCET_PRIV_KEY), + "foo_contract", + "bar", + &[ClarityValue::UInt(1)], + ); + let tx_contract_call = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "186c8e49bcfc59bb67ed22f031f009a44681f296392e0f92bed520918ba463ae".into(), + transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], + }], + ); + + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "ad23713f072473cad6a32125ed5fa822bb62bbfae8ed2302209c12d2f1958128".into(), + transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], + }], + ); + + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "021bd30b09b5ac6ff34abd11f05244a966af937b584b1752f272cd717bb25f1d".into(), + transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], + }], + ); + + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "416e728daeec4de695c89d15eede8ddb7b85fb4af82daffb1e0d8166a3e93451".into(), + transactions: vec![tx_contract_deploy, tx_contract_call], + }], + ); + + let test_vector = ConsensusTestVector { + initial_balances: vec![], + epoch_blocks, + }; + + let result = ConsensusTest::new(function_name!(), test_vector).run(); + insta::allow_duplicates! { + for res in result { + insta::assert_ron_snapshot!(res); + } + } +} From 1db629c14093d7d1c74227cadb0e6a8bf1afcda9 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 2 Oct 2025 16:27:22 -0700 Subject: [PATCH 71/86] fix: exit 0 from stacks-signer if no args --- stacks-signer/src/main.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 483c121950..c4931c3ab5 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -30,7 +30,7 @@ use std::io::{self, Write}; use std::time::Duration; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; -use clap::Parser; +use clap::{CommandFactory, Parser}; use clarity::types::chainstate::StacksPublicKey; use clarity::util::sleep_ms; use libsigner::{SignerSession, VERSION_STRING}; @@ -215,6 +215,15 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } fn main() { + // If no args were passed, exit 0. + // This differs from the default behavior, which exits with code 2. + if std::env::args_os().len() == 1 { + let mut cmd = Cli::command(); + cmd.print_help().ok(); + println!(); + std::process::exit(0); + } + let cli = Cli::parse(); tracing_subscriber::registry() From 01ffedd4bf5f2380156312cd40a2a879a2933dfa Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 3 Oct 2025 12:11:24 +0200 Subject: [PATCH 72/86] feat: add marf input computation for test-harness, #6523 --- stackslib/src/chainstate/tests/consensus.rs | 107 +++++++++++++++++--- 1 file changed, 92 insertions(+), 15 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 2c5731e898..0193b87d3d 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -39,8 +39,11 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; -use crate::chainstate::stacks::db::StacksEpochReceipt; -use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; +use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt}; +use crate::chainstate::stacks::{ + Error as ChainstateError, StacksTransaction, TenureChangeCause, MINER_BLOCK_CONSENSUS_HASH, + MINER_BLOCK_HEADER_HASH, +}; use crate::chainstate::tests::TestChainstate; use crate::clarity_vm::clarity::{Error as ClarityError, PreCommitClarityBlock}; use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; @@ -170,6 +173,16 @@ pub enum ExpectedResult { Failure(String), } +impl ExpectedResult { + pub fn is_success(&self) -> bool { + matches!(&self, Self::Success(_)) + } + + pub fn is_failure(&self) -> bool { + matches!(&self, Self::Failure(_)) + } +} + /// Represents a block to be appended in a test and its expected result. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct TestBlock { @@ -181,6 +194,16 @@ pub struct TestBlock { pub expected_result: ExpectedResult, } +impl TestBlock { + pub fn is_success(&self) -> bool { + self.expected_result.is_success() + } + + pub fn is_failure(&self) -> bool { + self.expected_result.is_failure() + } +} + /// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ConsensusTestVector { @@ -459,10 +482,10 @@ impl ConsensusTest<'_> { self.test_vector.epoch_blocks[&epoch].len() ); self.advance_to_epoch(epoch); - for (i, block) in self.test_vector.epoch_blocks[&epoch].iter().enumerate() { + let epoch_blocks = self.test_vector.epoch_blocks[&epoch].clone(); + for (i, block) in epoch_blocks.iter().enumerate() { debug!("--------- Running block {i} for epoch {epoch:?} ---------"); - let (nakamoto_block, block_size) = - self.construct_nakamoto_block(&block.marf_hash, &block.transactions); + let (nakamoto_block, block_size) = self.construct_nakamoto_block(&block); let sortdb = self.chain.sortdb.take().unwrap(); let chain_tip = NakamotoChainState::get_canonical_block_header( self.chain.stacks_node().chainstate.db(), @@ -524,13 +547,8 @@ impl ConsensusTest<'_> { } } - /// Constructs a Nakamoto block with the given transactions and state index root. - fn construct_nakamoto_block( - &self, - marf_hash: &str, - transactions: &[StacksTransaction], - ) -> (NakamotoBlock, usize) { - let state_index_root = TrieHash::from_hex(marf_hash).unwrap(); + /// Constructs a Nakamoto block with the given [`TestBlock`] configuration. + fn construct_nakamoto_block(&mut self, test_block: &TestBlock) -> (NakamotoBlock, usize) { let chain_tip = NakamotoChainState::get_canonical_block_header( self.chain.stacks_node.as_ref().unwrap().chainstate.db(), self.chain.sortdb.as_ref().unwrap(), @@ -553,13 +571,13 @@ impl ConsensusTest<'_> { consensus_hash: chain_tip.consensus_hash.clone(), parent_block_id: chain_tip.index_block_hash(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), - state_index_root, + state_index_root: TrieHash::from_empty_data(), timestamp: 1, miner_signature: MessageSignature::empty(), signer_signature: vec![], pox_treatment: BitVec::ones(1).unwrap(), }, - txs: transactions.to_vec(), + txs: test_block.transactions.to_vec(), }; let tx_merkle_root = { @@ -570,14 +588,73 @@ impl ConsensusTest<'_> { .collect(); MerkleTree::::new(&txid_vecs).root() }; - block.header.tx_merkle_root = tx_merkle_root; + + block.header.state_index_root = if test_block.is_success() { + self.compute_block_marf_index(block.header.timestamp, &block.txs) + } else { + //64 hex zeroes + TrieHash::from_bytes(&[0; 32]).unwrap() + }; + self.chain.miner.sign_nakamoto_block(&mut block); let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); signers.sign_nakamoto_block(&mut block, cycle); let block_len = block.serialize_to_vec().len(); (block, block_len) } + + fn compute_block_marf_index( + &mut self, + block_time: u64, + block_txs: &Vec, + ) -> TrieHash { + let node = self.chain.stacks_node.as_mut().unwrap(); + let sortdb = self.chain.sortdb.as_ref().unwrap(); + let burndb_conn = sortdb.index_handle_at_tip(); + let chainstate = &mut node.chainstate; + + let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); + let burndb_conn = sortdb.index_handle_at_tip(); + + let mut clarity_tx = StacksChainState::chainstate_block_begin( + &chainstate_tx, + clarity_instance, + &burndb_conn, + &chain_tip.consensus_hash, + &chain_tip.anchored_header.block_hash(), + &MINER_BLOCK_CONSENSUS_HASH, + &MINER_BLOCK_HEADER_HASH, + ); + + clarity_tx + .connection() + .as_free_transaction(|clarity_tx_conn| { + clarity_tx_conn.with_clarity_db(|db| { + db.setup_block_metadata(Some(block_time))?; + Ok(()) + }) + }) + .unwrap(); + + StacksChainState::process_block_transactions(&mut clarity_tx, block_txs, 0).unwrap(); + + NakamotoChainState::finish_block( + &mut clarity_tx, + None, + false, + chain_tip.burn_header_height, + ) + .unwrap(); + + let trie_hash = clarity_tx.seal(); + clarity_tx.rollback_block(); + return trie_hash; + } } #[test] From da5ab83727b569f8ed559a391e19d83f4e9fe5fa Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 3 Oct 2025 12:41:40 +0200 Subject: [PATCH 73/86] chore: add documentation for marf input computation, #6523 --- stackslib/src/chainstate/tests/consensus.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 0193b87d3d..b06d875f08 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -174,10 +174,11 @@ pub enum ExpectedResult { } impl ExpectedResult { + /// Returns `true` if this result represents a successful outcome. pub fn is_success(&self) -> bool { matches!(&self, Self::Success(_)) } - + /// Returns `true` if this result represents a failed outcome. pub fn is_failure(&self) -> bool { matches!(&self, Self::Failure(_)) } @@ -195,10 +196,12 @@ pub struct TestBlock { } impl TestBlock { + /// Returns `true` if the [`ExpectedResult`] variant represents a successful outcome. pub fn is_success(&self) -> bool { self.expected_result.is_success() } + /// Returns `true` if the [`ExpectedResult`] variant represents a failed outcome. pub fn is_failure(&self) -> bool { self.expected_result.is_failure() } @@ -590,10 +593,11 @@ impl ConsensusTest<'_> { }; block.header.tx_merkle_root = tx_merkle_root; + // Set the MARF root hash: compute it for success cases, + // or use an all-zero hash for failure cases. block.header.state_index_root = if test_block.is_success() { - self.compute_block_marf_index(block.header.timestamp, &block.txs) + self.compute_block_marf_root_hash(block.header.timestamp, &block.txs) } else { - //64 hex zeroes TrieHash::from_bytes(&[0; 32]).unwrap() }; @@ -604,7 +608,15 @@ impl ConsensusTest<'_> { (block, block_len) } - fn compute_block_marf_index( + /// Computes the MARF root hash for a block. + /// + /// This function is intended for use in success test cases only, where all + /// transactions are valid. In other scenarios, the computation may fail. + /// + /// The implementation is deliberately minimal: it does not cover every + /// possible situation (such as new tenure handling), but it should be + /// sufficient for the scope of our test cases. + fn compute_block_marf_root_hash( &mut self, block_time: u64, block_txs: &Vec, From b3bdc1de692a6f4a83db53a4a347a6f35db7e688 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 3 Oct 2025 12:45:00 +0200 Subject: [PATCH 74/86] chore: improve marf computation failure message, #6523 --- stackslib/src/chainstate/tests/consensus.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index b06d875f08..c227073a20 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -651,9 +651,10 @@ impl ConsensusTest<'_> { Ok(()) }) }) - .unwrap(); + .expect("MARF: Failure on block metadata setup!"); - StacksChainState::process_block_transactions(&mut clarity_tx, block_txs, 0).unwrap(); + StacksChainState::process_block_transactions(&mut clarity_tx, block_txs, 0) + .expect("MARF: Failure on processing block transactions!"); NakamotoChainState::finish_block( &mut clarity_tx, @@ -661,7 +662,7 @@ impl ConsensusTest<'_> { false, chain_tip.burn_header_height, ) - .unwrap(); + .expect("MARF: Failure on finishing block!"); let trie_hash = clarity_tx.seal(); clarity_tx.rollback_block(); From 32f353338014121d409e4a6bd04ca0ab50328862 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 3 Oct 2025 13:17:03 +0200 Subject: [PATCH 75/86] chore: remove unused marf_hash field, #6523 --- stackslib/src/chainstate/tests/consensus.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index c227073a20..2d1e1be4e7 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -187,8 +187,6 @@ impl ExpectedResult { /// Represents a block to be appended in a test and its expected result. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct TestBlock { - /// Hex representation of the MARF hash for block construction. - pub marf_hash: String, /// Transactions to include in the block pub transactions: Vec, /// The expected result after appending the constructed block. @@ -680,7 +678,6 @@ fn test_append_empty_blocks() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), transactions: vec![], expected_result: expected_result.clone(), }], @@ -688,7 +685,6 @@ fn test_append_empty_blocks() { epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), transactions: vec![], expected_result: expected_result.clone(), }], @@ -696,7 +692,6 @@ fn test_append_empty_blocks() { epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), transactions: vec![], expected_result: expected_result.clone(), }], @@ -704,7 +699,6 @@ fn test_append_empty_blocks() { epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), transactions: vec![], expected_result: expected_result.clone(), }], @@ -723,7 +717,6 @@ fn test_append_state_index_root_mismatches() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock( @@ -736,7 +729,6 @@ fn test_append_state_index_root_mismatches() { epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock( @@ -749,7 +741,6 @@ fn test_append_state_index_root_mismatches() { epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock( @@ -762,7 +753,6 @@ fn test_append_state_index_root_mismatches() { epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock( @@ -833,7 +823,6 @@ fn test_append_stx_transfers_success() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), transactions: transactions.clone(), expected_result: ExpectedResult::Success(outputs.clone()), }], @@ -841,7 +830,6 @@ fn test_append_stx_transfers_success() { epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), transactions: transactions.clone(), expected_result: ExpectedResult::Success(outputs.clone()), }], @@ -849,7 +837,6 @@ fn test_append_stx_transfers_success() { epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), transactions: transactions.clone(), expected_result: ExpectedResult::Success(outputs.clone()), }], @@ -857,7 +844,6 @@ fn test_append_stx_transfers_success() { epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), transactions: transactions.clone(), expected_result: ExpectedResult::Success(outputs.clone()), }], @@ -900,7 +886,6 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock(format!( @@ -913,7 +898,6 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock(format!( @@ -926,7 +910,6 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock(format!( @@ -939,7 +922,6 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock(format!( From f34fde55ea356faa5d81398d3056ff8e992ad421 Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Fri, 3 Oct 2025 17:03:36 +0100 Subject: [PATCH 76/86] add missing snapshot --- ...pend_block_with_contract_call_success.snap | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap new file mode 100644 index 0000000000..f50380ee57 --- /dev/null +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap @@ -0,0 +1,41 @@ +--- +source: stackslib/src/chainstate/tests/consensus.rs +expression: res +--- +Success(ExpectedBlockOutput( + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 104, + read_count: 4, + runtime: 12467, + ), +)) From 45fdd1d2e4e6d2d705f43827b4a89e7de52e8b35 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 6 Oct 2025 16:03:30 -0700 Subject: [PATCH 77/86] fix: update test descriptions --- stacks-node/src/tests/signer/v0.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/stacks-node/src/tests/signer/v0.rs b/stacks-node/src/tests/signer/v0.rs index 6e35e14822..e329b5b3cf 100644 --- a/stacks-node/src/tests/signer/v0.rs +++ b/stacks-node/src/tests/signer/v0.rs @@ -11415,6 +11415,11 @@ fn tenure_extend_after_failed_miner() { #[test] #[ignore] /// Test that a miner will extend its tenure after the succeding miner commits to the wrong block. +/// +/// This test is quite similar to `tenure_extend_after_stale_commit_different_miner`, +/// with the difference that signers will reject a reorg attempt due to the reorg attempt +/// being more than `first_proposal_burn_block_timing` seconds. +/// /// - Miner 1 wins a tenure and mines normally /// - Miner 1 wins another tenure and mines normally, but miner 2 does not see any blocks from this tenure /// - Miner 2 wins a tenure and is unable to mine a block @@ -18861,10 +18866,14 @@ fn signers_treat_signatures_as_precommits() { /// We're verifying that, in this scenario, the tenure is extended, /// instead of a new one being created (and forking the tip). /// -/// - Miner A wins tenure A -/// - Miner B wins tenure B, with 2 blocks -/// - Miner A wins tenure C, but with a block commit to tip A -/// - We verify that Miner B extends Tenure B +/// This test is quite similar to `tenure_extend_after_bad_commit`, but +/// with the difference of the fact that there are 2 blocks mined in tenure B, +/// which means signers will always reject a reorg attempt (regardless of timing). +/// +/// - Miner 1 wins tenure A +/// - Miner 2 wins tenure B, with 2 blocks +/// - Miner 1 wins tenure C, but with a block commit to tip A +/// - We verify that Miner 1 extends Tenure B fn tenure_extend_after_stale_commit_different_miner() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; From 3612582d5616020a1ea056e383518e50b6ca164d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 7 Oct 2025 10:11:19 -0400 Subject: [PATCH 78/86] Make TestPeer use TestChainstate internally Signed-off-by: Jacinta Ferrant --- CHANGELOG.md | 2 +- stacks-signer/CHANGELOG.md | 2 +- .../chainstate/nakamoto/coordinator/tests.rs | 322 ++++---- stackslib/src/chainstate/nakamoto/mod.rs | 8 +- .../src/chainstate/nakamoto/tests/mod.rs | 10 +- .../src/chainstate/nakamoto/tests/node.rs | 277 +------ stackslib/src/chainstate/stacks/boot/mod.rs | 122 +-- .../src/chainstate/stacks/boot/pox_2_tests.rs | 147 ++-- .../src/chainstate/stacks/boot/pox_3_tests.rs | 187 +++-- .../src/chainstate/stacks/boot/pox_4_tests.rs | 577 ++++++++++---- .../chainstate/stacks/boot/signers_tests.rs | 8 +- stackslib/src/chainstate/stacks/db/blocks.rs | 183 +++-- .../src/chainstate/stacks/db/unconfirmed.rs | 101 +-- .../src/chainstate/stacks/tests/accounting.rs | 104 +-- .../stacks/tests/block_construction.rs | 354 +++++---- stackslib/src/chainstate/tests/mod.rs | 525 ++++++++++++- stackslib/src/clarity_vm/tests/ephemeral.rs | 8 +- stackslib/src/net/api/tests/blockreplay.rs | 2 +- stackslib/src/net/api/tests/getblock_v3.rs | 4 +- stackslib/src/net/api/tests/gettenure.rs | 4 +- stackslib/src/net/api/tests/gettransaction.rs | 2 +- stackslib/src/net/api/tests/mod.rs | 118 +-- .../src/net/api/tests/postblock_proposal.rs | 42 +- stackslib/src/net/mod.rs | 718 +++++++----------- stackslib/src/net/server.rs | 4 +- stackslib/src/net/tests/convergence.rs | 2 +- stackslib/src/net/tests/download/epoch2x.rs | 108 ++- stackslib/src/net/tests/download/nakamoto.rs | 128 ++-- stackslib/src/net/tests/inv/epoch2x.rs | 122 +-- stackslib/src/net/tests/inv/nakamoto.rs | 75 +- stackslib/src/net/tests/mempool/mod.rs | 88 ++- stackslib/src/net/tests/mod.rs | 659 ++++------------ stackslib/src/net/tests/neighbors.rs | 10 +- stackslib/src/net/tests/relay/epoch2x.rs | 368 +++++---- stackslib/src/net/tests/relay/nakamoto.rs | 127 ++-- versions.toml | 4 +- 36 files changed, 2888 insertions(+), 2634 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7e14fb57c..72de8c0556 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## Unreleased +## [3.2.0.0.2] ### Added diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index c332710cc2..fc7ce56508 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## Unreleased +## [3.2.0.0.2.0] ### Added diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 5d5108441f..6eb9ad2b60 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -60,6 +60,7 @@ use crate::chainstate::stacks::{ TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionSmartContract, TransactionVersion, }; +use crate::chainstate::tests::TestChainstateConfig; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; use crate::net::relay::{BlockAcceptResponse, Relayer}; @@ -176,8 +177,9 @@ pub fn boot_nakamoto<'a>( peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.chain_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; // Create some balances for test Stackers let mut stacker_balances = test_stackers @@ -201,15 +203,40 @@ pub fn boot_nakamoto<'a>( }) .collect(); - peer_config.initial_balances.append(&mut stacker_balances); - peer_config.initial_balances.append(&mut signer_balances); - peer_config.initial_balances.append(&mut initial_balances); - peer_config.burnchain.pox_constants.v2_unlock_height = 21; - peer_config.burnchain.pox_constants.pox_3_activation_height = 26; - peer_config.burnchain.pox_constants.v3_unlock_height = 27; - peer_config.burnchain.pox_constants.pox_4_activation_height = 31; - peer_config.test_stackers = Some(test_stackers.to_vec()); - peer_config.test_signers = Some(test_signers.clone()); + peer_config + .chain_config + .initial_balances + .append(&mut stacker_balances); + peer_config + .chain_config + .initial_balances + .append(&mut signer_balances); + peer_config + .chain_config + .initial_balances + .append(&mut initial_balances); + peer_config + .chain_config + .burnchain + .pox_constants + .v2_unlock_height = 21; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height = 26; + peer_config + .chain_config + .burnchain + .pox_constants + .v3_unlock_height = 27; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height = 31; + peer_config.chain_config.test_stackers = Some(test_stackers.to_vec()); + peer_config.chain_config.test_signers = Some(test_signers.clone()); let mut peer = TestPeer::new_with_observer(peer_config, observer); advance_to_nakamoto(&mut peer, test_signers, test_stackers); @@ -220,13 +247,18 @@ pub fn boot_nakamoto<'a>( /// Make a replay peer, used for replaying the blockchain pub fn make_replay_peer<'a>(peer: &mut TestPeer<'a>) -> TestPeer<'a> { let mut replay_config = peer.config.clone(); - replay_config.test_name = format!("{}.replay", &peer.config.test_name); + replay_config.chain_config.test_name = + format!("{}.replay", &peer.config.chain_config.test_name); replay_config.server_port = 0; replay_config.http_port = 0; - replay_config.test_stackers = peer.config.test_stackers.clone(); - - let test_stackers = replay_config.test_stackers.clone().unwrap_or_default(); - let mut test_signers = replay_config.test_signers.clone().unwrap(); + replay_config.chain_config.test_stackers = peer.config.chain_config.test_stackers.clone(); + + let test_stackers = replay_config + .chain_config + .test_stackers + .clone() + .unwrap_or_default(); + let mut test_signers = replay_config.chain_config.test_signers.clone().unwrap(); let mut replay_peer = TestPeer::new(replay_config); let observer = TestEventObserver::new(); advance_to_nakamoto( @@ -237,12 +269,12 @@ pub fn make_replay_peer<'a>(peer: &mut TestPeer<'a>) -> TestPeer<'a> { // sanity check let replay_tip = { - let sort_db = replay_peer.sortdb.as_ref().unwrap(); + let sort_db = replay_peer.sortdb_ref(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); tip }; let tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.sortdb_ref(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let sort_ic = sort_db.index_conn(); let ancestor_tip = SortitionDB::get_ancestor_snapshot( @@ -330,7 +362,12 @@ fn replay_reward_cycle( stacks_blocks: &[NakamotoBlock], ) { eprintln!("\n\n=============================================\nBegin replay\n==============================================\n"); - let reward_cycle_length = peer.config.burnchain.pox_constants.reward_cycle_length as usize; + let reward_cycle_length = peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length as usize; let reward_cycle_indices: Vec = (0..stacks_blocks.len()) .step_by(reward_cycle_length) .collect(); @@ -339,8 +376,8 @@ fn replay_reward_cycle( let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); } - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); @@ -353,7 +390,7 @@ fn replay_reward_cycle( info!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( - &peer.config.burnchain, + &peer.config.chain_config.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, @@ -367,7 +404,7 @@ fn replay_reward_cycle( )); if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {block_id}"); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { test_debug!("Did NOT accept Nakamoto block {block_id}"); blocks_to_process.push(block); @@ -375,8 +412,8 @@ fn replay_reward_cycle( } } - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); peer.check_nakamoto_migration(); } @@ -400,8 +437,8 @@ fn test_simple_nakamoto_coordinator_bootup() { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, @@ -414,8 +451,8 @@ fn test_simple_nakamoto_coordinator_bootup() { .map(|(block, _, _)| block) .collect(); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap(); @@ -463,8 +500,8 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); // do a stx transfer in each block to a given recipient let recipient_addr = @@ -502,8 +539,8 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { .collect(); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -527,8 +564,8 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { replay_reward_cycle(&mut replay_peer, &[burn_ops], &blocks); let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -563,8 +600,8 @@ impl TestPeer<'_> { G: FnMut(&mut NakamotoBlock) -> bool, { let nakamoto_tip = { - let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; - let sort_db = self.sortdb.as_mut().unwrap(); + let chainstate = &mut self.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -593,7 +630,7 @@ impl TestPeer<'_> { G: FnMut(&mut NakamotoBlock) -> bool, { let sender_addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_key)); - let mut test_signers = self.config.test_signers.clone().unwrap(); + let mut test_signers = self.config.chain_config.test_signers.clone().unwrap(); let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); @@ -643,7 +680,7 @@ impl TestPeer<'_> { let (burn_height, _, consensus_hash) = self.next_burnchain_block(burn_ops); let pox_constants = self.sortdb().pox_constants.clone(); let first_burn_height = self.sortdb().first_block_height; - let mut test_signers = self.config.test_signers.clone().unwrap(); + let mut test_signers = self.config.chain_config.test_signers.clone().unwrap(); info!( "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", @@ -656,12 +693,12 @@ impl TestPeer<'_> { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let nakamoto_tip = - if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + if let Some(nakamoto_parent_tenure) = self.chain.nakamoto_parent_tenure_opt.as_ref() { nakamoto_parent_tenure.last().as_ref().unwrap().block_id() } else { let tip = { - let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; - let sort_db = self.sortdb.as_mut().unwrap(); + let chainstate = &mut self.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -669,16 +706,19 @@ impl TestPeer<'_> { tip.index_block_hash() }; - let miner_addr = self.miner.origin_address().unwrap(); + let miner_addr = self.chain.miner.origin_address().unwrap(); let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); let tenure_change_tx = self + .chain .miner .make_nakamoto_tenure_change_with_nonce(tenure_change, miner_acct.nonce); - let coinbase_tx = - self.miner - .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); + let coinbase_tx = self.chain.miner.make_nakamoto_coinbase_with_nonce( + None, + vrf_proof, + miner_acct.nonce + 1, + ); self.make_nakamoto_tenure_and( tenure_change_tx, @@ -746,12 +786,12 @@ impl TestPeer<'_> { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let nakamoto_tip = - if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + if let Some(nakamoto_parent_tenure) = self.chain.nakamoto_parent_tenure_opt.as_ref() { nakamoto_parent_tenure.last().as_ref().unwrap().block_id() } else { let tip = { - let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; - let sort_db = self.sortdb.as_mut().unwrap(); + let chainstate = &mut self.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -759,16 +799,19 @@ impl TestPeer<'_> { tip.index_block_hash() }; - let miner_addr = self.miner.origin_address().unwrap(); + let miner_addr = self.chain.miner.origin_address().unwrap(); let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); let tenure_change_tx = self + .chain .miner .make_nakamoto_tenure_change_with_nonce(tenure_change, miner_acct.nonce); - let coinbase_tx = - self.miner - .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); + let coinbase_tx = self.chain.miner.make_nakamoto_coinbase_with_nonce( + None, + vrf_proof, + miner_acct.nonce + 1, + ); let block = self.mine_single_block_tenure_at_tip( &nakamoto_tip, @@ -813,7 +856,7 @@ fn block_descendant() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -902,7 +945,7 @@ fn block_info_tests(use_primary_testnet: bool) { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1337,7 +1380,7 @@ fn pox_treatment() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1550,8 +1593,8 @@ fn pox_treatment() { blocks.push(block); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -1590,7 +1633,7 @@ fn transactions_indexing() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1614,7 +1657,7 @@ fn transactions_indexing() { let tracked_block_id = tracked_block.block_id(); - let chainstate = &peer.stacks_node.unwrap().chainstate; + let chainstate = &peer.chain.stacks_node.unwrap().chainstate; // compare transactions to what has been tracked for tx in tracked_block.txs { @@ -1655,7 +1698,7 @@ fn transactions_not_indexing() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1679,7 +1722,7 @@ fn transactions_not_indexing() { let untracked_block_id = untracked_block.block_id(); - let chainstate = &peer.stacks_node.unwrap().chainstate; + let chainstate = &peer.chain.stacks_node.unwrap().chainstate; // ensure untracked transactions are not recorded for tx in untracked_block.txs { @@ -1721,13 +1764,13 @@ fn test_nakamoto_chainstate_getters() { ); let sort_tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.chain.sortdb.as_ref().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; { // scope this to drop the chainstate ref and db tx - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); // no tenures yet @@ -1753,8 +1796,8 @@ fn test_nakamoto_chainstate_getters() { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); // do a stx transfer in each block to a given recipient let recipient_addr = @@ -1792,8 +1835,8 @@ fn test_nakamoto_chainstate_getters() { .collect(); let tip = { - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -1811,14 +1854,10 @@ fn test_nakamoto_chainstate_getters() { &blocks.last().unwrap().header ); - let sort_tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); - SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() - }; + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); { // scope this to drop the chainstate ref and db tx - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_ref().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; for coinbase_height in 0..=((tip .anchored_header @@ -1856,8 +1895,8 @@ fn test_nakamoto_chainstate_getters() { debug!("\n======================================\nBegin tests\n===========================================\n"); { // scope this to drop the chainstate ref and db tx - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); // we now have a tenure, and it confirms the last epoch2 block @@ -1999,8 +2038,14 @@ fn test_nakamoto_chainstate_getters() { next_tenure_change.tenure_consensus_hash = next_consensus_hash.clone(); next_tenure_change.burn_view_consensus_hash = next_consensus_hash.clone(); - let next_tenure_change_tx = peer.miner.make_nakamoto_tenure_change(next_tenure_change); - let next_coinbase_tx = peer.miner.make_nakamoto_coinbase(None, next_vrf_proof); + let next_tenure_change_tx = peer + .chain + .miner + .make_nakamoto_tenure_change(next_tenure_change); + let next_coinbase_tx = peer + .chain + .miner + .make_nakamoto_coinbase(None, next_vrf_proof); // make the second tenure's blocks let blocks_and_sizes = peer.make_nakamoto_tenure( @@ -2035,13 +2080,13 @@ fn test_nakamoto_chainstate_getters() { .collect(); let sort_tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.sortdb_ref(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; { // scope this to drop the chainstate ref and db tx - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); @@ -2215,7 +2260,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a let mut consensus_hashes = vec![]; let mut fee_counts = vec![]; let mut total_blocks = 0; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); let stx_miner_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -2235,9 +2280,10 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -2296,11 +2342,12 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a // if we're starting a new reward cycle, then save the current one let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; if peer .config + .chain_config .burnchain .is_naka_signing_cycle_start(tip.block_height) { @@ -2324,8 +2371,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a // in nakamoto, tx fees are rewarded by the next tenure, so the // scheduled rewards come 1 tenure after the coinbase reward matures let miner = p2pkh_from(&stx_miner_key); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); // this is sortition height 12, and this miner has earned all 12 of the coinbases // plus the initial per-block mining bonus of 2600 STX, but minus the last three rewards (since @@ -2389,8 +2436,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2411,8 +2458,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a // verify that matured miner records were in place let mut matured_rewards = vec![]; { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let (mut chainstate_tx, _) = chainstate.chainstate_tx_begin().unwrap(); for i in 0..24 { let matured_reward_opt = NakamotoChainState::get_matured_miner_reward_schedules( @@ -2502,8 +2549,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a } let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2565,9 +2612,10 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); rc_burn_ops.push(burn_ops); @@ -2609,8 +2657,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> all_blocks.append(&mut blocks.clone()); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2630,8 +2678,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> // highest tenure is our tenure-change let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -2668,7 +2716,10 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> blocks.last().cloned().unwrap().header.block_id(), blocks.len() as u32, ); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change_extend); + let tenure_change_tx = peer + .chain + .miner + .make_nakamoto_tenure_change(tenure_change_extend); let blocks_and_sizes = peer.make_nakamoto_tenure_extension( tenure_change_tx, @@ -2703,8 +2754,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> all_blocks.append(&mut blocks.clone()); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2725,8 +2776,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> // highest tenure is our tenure-extend let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -2759,8 +2810,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); rc_burn_ops.push(burn_ops); @@ -2802,8 +2853,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> all_blocks.append(&mut blocks.clone()); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2823,8 +2874,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> // highest tenure is our new tenure-change let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -2854,8 +2905,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> replay_reward_cycle(&mut replay_peer, &rc_burn_ops, &all_blocks); let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2912,7 +2963,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let mut rc_burn_ops = vec![]; let mut consensus_hashes = vec![]; let mut fee_counts = vec![]; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); for i in 0..10 { let (burn_ops, mut tenure_change, miner_key) = @@ -2924,9 +2975,10 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -3002,8 +3054,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -3037,11 +3089,12 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe // if we're starting a new reward cycle, then save the current one let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; if peer .config + .chain_config .burnchain .is_naka_signing_cycle_start(tip.block_height) { @@ -3062,8 +3115,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe // in nakamoto, tx fees are rewarded by the next tenure, so the // scheduled rewards come 1 tenure after the coinbase reward matures let miner = p2pkh_from(&stx_miner_key); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); // this is sortition height 12, and this miner has earned all 12 of the coinbases // plus the initial per-block mining bonus of 2600 STX, but minus the last three rewards (since @@ -3111,13 +3164,9 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe } eprintln!( - "Checking block #{} ({},{}): {} =?= {} + {}", - i, - &ch, + "Checking block #{i} ({ch},{}): {} =?= {expected_total_coinbase} + {expected_total_tx_fees}", &sn.block_height, - stx_balance.amount_unlocked(), - expected_total_coinbase, - expected_total_tx_fees + stx_balance.amount_unlocked() ); assert_eq!( stx_balance.amount_unlocked(), @@ -3126,8 +3175,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -3153,8 +3202,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe } let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -3208,7 +3257,7 @@ fn process_next_nakamoto_block_deadlock() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -3226,6 +3275,7 @@ fn process_next_nakamoto_block_deadlock() { let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); let mut sortition_db = peer.sortdb().reopen().unwrap(); let (chainstate, _) = &mut peer + .chain .stacks_node .as_mut() .unwrap() @@ -3314,13 +3364,14 @@ fn test_stacks_on_burnchain_ops() { ); let mut all_blocks: Vec = vec![]; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); let mut extra_burn_ops = vec![]; let mut bitpatterns = HashMap::new(); // map consensus hash to txid bit pattern let cur_reward_cycle = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(peer.get_burn_block_height()) .unwrap(); @@ -3417,9 +3468,10 @@ fn test_stacks_on_burnchain_ops() { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -3517,8 +3569,8 @@ fn test_stacks_on_burnchain_ops() { // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -3608,8 +3660,8 @@ fn test_stacks_on_burnchain_ops() { } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 52bc37d78a..9d3bded117 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -843,7 +843,7 @@ impl NakamotoBlockHeader { .map_err(|_| ChainstateError::NoRegisteredSigners(0))?; // HashMap of - let signers_by_pk: HashMap<_, _> = signers + let mut signers_by_pk: HashMap<_, _> = signers .iter() .enumerate() .map(|(i, signer)| (&signer.signing_key, (signer, i))) @@ -861,7 +861,7 @@ impl NakamotoBlockHeader { let mut public_key_bytes = [0u8; 33]; public_key_bytes.copy_from_slice(&public_key.to_bytes_compressed()[..]); - let (signer, signer_index) = signers_by_pk.get(&public_key_bytes).ok_or_else(|| { + let (signer, signer_index) = signers_by_pk.remove(&public_key_bytes).ok_or_else(|| { warn!( "Found an invalid public key. Reward set has {} signers. Chain length {}. Signatures length {}", signers.len(), @@ -876,13 +876,13 @@ impl NakamotoBlockHeader { // Enforce order of signatures if let Some(index) = last_index.as_ref() { - if *index >= *signer_index { + if *index >= signer_index { return Err(ChainstateError::InvalidStacksBlock( "Signatures are out of order".to_string(), )); } } else { - last_index = Some(*signer_index); + last_index = Some(signer_index); } total_weight_signed = total_weight_signed diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 4c285874d1..d734623333 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1991,7 +1991,7 @@ fn test_make_miners_stackerdb_config() { None, ); - let naka_miner_hash160 = peer.miner.nakamoto_miner_hash160(); + let naka_miner_hash160 = peer.chain.miner.nakamoto_miner_hash160(); let miner_keys: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let miner_hash160s: Vec<_> = miner_keys .iter() @@ -2009,8 +2009,8 @@ fn test_make_miners_stackerdb_config() { debug!("miners = {:#?}", &miner_hash160s); // extract chainstate, sortdb, and stackerdbs -- we don't need the peer anymore - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let mut last_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let stackerdbs = peer.network.stackerdbs; let miners_contract_id = boot_code_id(MINERS_NAME, false); @@ -3261,9 +3261,7 @@ pub mod nakamoto_block_signatures { match header.verify_signer_signatures(&reward_set) { Ok(_) => panic!("Expected duplicate signature to fail"), - Err(ChainstateError::InvalidStacksBlock(msg)) => { - assert!(msg.contains("Signatures are out of order")); - } + Err(ChainstateError::InvalidStacksBlock(_)) => {} _ => panic!("Expected InvalidStacksBlock error"), } } diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 1c9a64b959..e7a6135d18 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -34,15 +34,12 @@ use crate::chainstate::burn::operations::{ use crate::chainstate::burn::*; use crate::chainstate::coordinator::tests::NullEventDispatcher; use crate::chainstate::coordinator::{ChainsCoordinator, OnChainRewardSetProvider}; -use crate::chainstate::nakamoto::coordinator::{ - get_nakamoto_next_recipients, load_nakamoto_reward_set, -}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::staging_blocks::{ NakamotoBlockObtainMethod, NakamotoStagingBlocksConnRef, }; use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, StacksDBIndexed, }; @@ -1152,218 +1149,23 @@ impl TestPeer<'_> { TenureChangePayload, LeaderKeyRegisterOp, ) { - let mut sortdb = self.sortdb.take().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let mut burn_block = TestBurnchainBlock::new(&tip, 0); - let mut stacks_node = self.stacks_node.take().unwrap(); - - let (last_tenure_id, parent_block_opt, parent_tenure_opt) = - if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { - ( - nakamoto_parent_tenure.first().as_ref().unwrap().block_id(), - None, - Some(nakamoto_parent_tenure.clone()), - ) - } else { - get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) - }; - - // find the VRF leader key register tx to use. - // it's the one pointed to by the parent tenure - let parent_consensus_hash_and_tenure_start_id_opt = - if let Some(parent_tenure) = parent_tenure_opt.as_ref() { - let tenure_start_block = parent_tenure.first().unwrap(); - Some(( - tenure_start_block.header.consensus_hash.clone(), - tenure_start_block.block_id(), - )) - } else if let Some(parent_block) = parent_block_opt.as_ref() { - let parent_header_info = - StacksChainState::get_stacks_block_header_info_by_index_block_hash( - stacks_node.chainstate.db(), - &last_tenure_id, - ) - .unwrap() - .unwrap(); - Some(( - parent_header_info.consensus_hash.clone(), - parent_header_info.index_block_hash(), - )) - } else { - None - }; - - let last_key = if let Some((ch, parent_tenure_start_block_id)) = - parent_consensus_hash_and_tenure_start_id_opt.clone() - { - // it's possible that the parent was a shadow block. - // if so, find the highest non-shadow ancestor's block-commit, so we can - let mut cursor = ch; - let (tenure_sn, tenure_block_commit) = loop { - let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) - .unwrap() - .unwrap(); - - let Some(tenure_block_commit) = get_block_commit_by_txid( - sortdb.conn(), - &tenure_sn.sortition_id, - &tenure_sn.winning_block_txid, - ) - .unwrap() else { - // parent must be a shadow block - let header = NakamotoChainState::get_block_header_nakamoto( - stacks_node.chainstate.db(), - &parent_tenure_start_block_id, - ) - .unwrap() - .unwrap() - .anchored_header - .as_stacks_nakamoto() - .cloned() - .unwrap(); - - if !header.is_shadow_block() { - panic!("Parent tenure start block ID {} has no block-commit and is not a shadow block", &parent_tenure_start_block_id); - } - - cursor = stacks_node - .chainstate - .index_conn() - .get_parent_tenure_consensus_hash(&parent_tenure_start_block_id, &cursor) - .unwrap() - .unwrap(); - - continue; - }; - break (tenure_sn, tenure_block_commit); - }; - - let tenure_leader_key = SortitionDB::get_leader_key_at( - &sortdb.index_conn(), - tenure_block_commit.key_block_ptr.into(), - tenure_block_commit.key_vtxindex.into(), - &tenure_sn.sortition_id, - ) - .unwrap() - .unwrap(); - tenure_leader_key - } else { - panic!("No leader key"); - }; - - let network_id = self.config.network_id; - let chainstate_path = self.chainstate_path.clone(); - let burn_block_height = burn_block.block_height; - - let (mut block_commit_op, tenure_change_payload) = stacks_node.begin_nakamoto_tenure( - &sortdb, - &mut self.miner, - &mut burn_block, - &last_key, - parent_block_opt.as_ref(), - parent_tenure_opt.as_ref().map(|blocks| blocks.as_slice()), - 1000, - tenure_change_cause, - ); - - // patch up block-commit -- these blocks all mine off of genesis - if last_tenure_id == StacksBlockId(BOOT_BLOCK_HASH.0) { - block_commit_op.parent_block_ptr = 0; - block_commit_op.parent_vtxindex = 0; - } - - let mut burn_ops = vec![]; - if self.miner.last_VRF_public_key().is_none() { - let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); - burn_ops.push(BlockstackOperationType::LeaderKeyRegister(leader_key_op)); - } - - // patch in reward set info - match get_nakamoto_next_recipients( - &tip, - &mut sortdb, - &mut stacks_node.chainstate, - &tenure_change_payload.previous_tenure_end, - &self.config.burnchain, - ) { - Ok(recipients) => { - block_commit_op.commit_outs = match recipients { - Some(info) => { - let mut recipients = info - .recipients - .into_iter() - .map(|x| x.0) - .collect::>(); - if recipients.len() == 1 { - recipients.push(PoxAddress::standard_burn_address(false)); - } - recipients - } - None => { - if self - .config - .burnchain - .is_in_prepare_phase(burn_block.block_height) - { - vec![PoxAddress::standard_burn_address(false)] - } else { - vec![ - PoxAddress::standard_burn_address(false), - PoxAddress::standard_burn_address(false), - ] - } - } - }; - test_debug!( - "Block commit at height {} has {} recipients: {:?}", - block_commit_op.block_height, - block_commit_op.commit_outs.len(), - &block_commit_op.commit_outs - ); - } - Err(e) => { - panic!("Failure fetching recipient set: {e:?}"); - } - }; - - burn_ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit_op)); - - // prepare to mine - let miner_addr = self.miner.origin_address().unwrap(); - let miner_account = get_account(&mut stacks_node.chainstate, &sortdb, &miner_addr); - self.miner.set_nonce(miner_account.nonce); - - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); - (burn_ops, tenure_change_payload, last_key) + self.chain.begin_nakamoto_tenure(tenure_change_cause) } /// Make the VRF proof for this tenure. /// Call after processing the block-commit pub fn make_nakamoto_vrf_proof(&mut self, miner_key: LeaderKeyRegisterOp) -> VRFProof { - let sortdb = self.sortdb.take().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let proof = self - .miner - .make_proof(&miner_key.public_key, &tip.sortition_hash) - .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", miner_key.public_key)); - self.sortdb = Some(sortdb); - debug!( - "VRF proof made from {:?} over {}: {proof:?}", - miner_key.public_key, &tip.sortition_hash - ); - proof + self.chain.make_nakamoto_vrf_proof(miner_key) } pub fn try_process_block(&mut self, block: &NakamotoBlock) -> Result { - let mut sort_handle = self.sortdb.as_ref().unwrap().index_handle_at_tip(); + let mut sort_handle = self.chain.sortdb.as_ref().unwrap().index_handle_at_tip(); let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); let accepted = Relayer::process_new_nakamoto_block( - &self.config.burnchain, - self.sortdb.as_ref().unwrap(), + &self.config.chain_config.burnchain, + self.chain.sortdb.as_ref().unwrap(), &mut sort_handle, - &mut self.stacks_node.as_mut().unwrap().chainstate, + &mut self.chain.stacks_node.as_mut().unwrap().chainstate, &stacks_tip, block, None, @@ -1375,11 +1177,11 @@ impl TestPeer<'_> { let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); let Some(block_receipt) = NakamotoChainState::process_next_nakamoto_block::( - &mut self.stacks_node.as_mut().unwrap().chainstate, - self.sortdb.as_mut().unwrap(), + &mut self.chain.stacks_node.as_mut().unwrap().chainstate, + self.chain.sortdb.as_mut().unwrap(), &sort_tip, None, - self.config.txindex, + self.config.chain_config.txindex, )? else { return Ok(false); @@ -1454,7 +1256,7 @@ impl TestPeer<'_> { let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, sortdb, - &mut peer.miner, + &mut peer.chain.miner, signers, &tenure_change .try_as_tenure_change() @@ -1463,12 +1265,12 @@ impl TestPeer<'_> { .clone(), Some(tenure_change), Some(coinbase), - &mut peer.coord, + &mut peer.chain.coord, miner_setup, block_builder, after_block, - peer.mine_malleablized_blocks, - peer.nakamoto_parent_tenure_opt.is_none(), + peer.chain.mine_malleablized_blocks, + peer.chain.nakamoto_parent_tenure_opt.is_none(), None, )?; @@ -1486,7 +1288,9 @@ impl TestPeer<'_> { .flat_map(|(_, _, _, malleablized)| malleablized) .collect(); - peer.malleablized_blocks.append(&mut malleablized_blocks); + peer.chain + .malleablized_blocks + .append(&mut malleablized_blocks); let block_data = blocks .into_iter() @@ -1516,8 +1320,8 @@ impl TestPeer<'_> { &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, { - let mut stacks_node = self.stacks_node.take().unwrap(); - let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); let tenure_extend_payload = if let TransactionPayload::TenureChange(ref tc) = &tenure_extend_tx.payload { @@ -1543,7 +1347,7 @@ impl TestPeer<'_> { let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, &mut sortdb, - &mut self.miner, + &mut self.chain.miner, signers, &tenure_extend_tx .try_as_tenure_change() @@ -1552,12 +1356,12 @@ impl TestPeer<'_> { .clone(), Some(tenure_extend_tx), None, - &mut self.coord, + &mut self.chain.coord, |_| {}, block_builder, |_| true, - self.mine_malleablized_blocks, - self.nakamoto_parent_tenure_opt.is_none(), + self.chain.mine_malleablized_blocks, + self.chain.nakamoto_parent_tenure_opt.is_none(), None, ) .unwrap(); @@ -1576,15 +1380,17 @@ impl TestPeer<'_> { .flat_map(|(_, _, _, malleablized)| malleablized) .collect(); - self.malleablized_blocks.append(&mut malleablized_blocks); + self.chain + .malleablized_blocks + .append(&mut malleablized_blocks); let block_data = blocks .into_iter() .map(|(blk, sz, cost, _)| (blk, sz, cost)) .collect(); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); block_data } @@ -1593,8 +1399,8 @@ impl TestPeer<'_> { pub fn process_nakamoto_tenure(&mut self, blocks: Vec) { debug!("Peer will process {} Nakamoto blocks", blocks.len()); - let mut sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); @@ -1616,7 +1422,7 @@ impl TestPeer<'_> { .unwrap(); if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {}", &block_id); - self.coord.handle_new_nakamoto_stacks_block().unwrap(); + self.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); debug!("Begin check Nakamoto block {}", &block.block_id()); TestPeer::check_processed_nakamoto_block(&mut sortdb, &mut node.chainstate, block); @@ -1626,8 +1432,8 @@ impl TestPeer<'_> { } } - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); } /// Get the tenure-start block of the parent tenure of `tenure_id_consensus_hash` @@ -1764,7 +1570,7 @@ impl TestPeer<'_> { let Ok(Some(parent_block_header)) = NakamotoChainState::get_block_header(chainstate.db(), &block.header.parent_block_id) else { - panic!("No parent block for {:?}", &block); + panic!("No parent block for {block:?}"); }; // get_coinbase_height @@ -2479,8 +2285,8 @@ impl TestPeer<'_> { &naka_tip_id ); - let mut stacks_node = self.stacks_node.take().unwrap(); - let sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( &mut stacks_node.chainstate, @@ -2494,12 +2300,13 @@ impl TestPeer<'_> { // Get the reward set let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - self.miner + self.chain + .miner .burnchain .block_height_to_reward_cycle(sort_tip_sn.block_height) .expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, - &self.miner.burnchain, + &self.chain.miner.burnchain, &mut stacks_node.chainstate, &shadow_block.header.parent_block_id, &sortdb, @@ -2561,11 +2368,11 @@ impl TestPeer<'_> { drop(rollback_tx); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); // process it - self.coord.handle_new_nakamoto_stacks_block().unwrap(); + self.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); // verify that it processed self.refresh_burnchain_view(); diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 381dfa06c0..b60fe5cd3f 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1655,8 +1655,8 @@ pub mod test { observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, Vec) { let mut peer_config = TestPeerConfig::new(test_name, 0, 0); - peer_config.burnchain = burnchain.clone(); - peer_config.epochs = epochs; + peer_config.chain_config.burnchain = burnchain.clone(); + peer_config.chain_config.epochs = epochs; peer_config.setup_code = format!( "(contract-call? .pox set-burnchain-parameters u{} u{} u{} u{})", burnchain.first_block_height, @@ -1693,14 +1693,14 @@ pub mod test { .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) .collect(); - peer_config.initial_balances = balances; + peer_config.chain_config.initial_balances = balances; let peer = TestPeer::new_with_observer(peer_config, observer); (peer, keys.to_vec()) } pub fn eval_at_tip(peer: &mut TestPeer, boot_contract: &str, expr: &str) -> Value { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); @@ -1711,7 +1711,7 @@ pub mod test { &boot_code_id(boot_contract, false), expr, ); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); value } @@ -1728,7 +1728,7 @@ pub mod test { name: &str, expr: &str, ) -> Value { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); @@ -1739,7 +1739,7 @@ pub mod test { &contract_id(addr, name), expr, ); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); value } @@ -1815,11 +1815,7 @@ pub mod test { addr: &PrincipalData, ) -> Option<(u128, PoxAddress, u128, u128)> { let value_opt = eval_at_tip(peer, "pox", &format!("(get-stacker-info '{addr})")); - let data = if let Some(d) = value_opt.expect_optional().unwrap() { - d - } else { - return None; - }; + let data = value_opt.expect_optional().unwrap()?; let data = data.expect_tuple().unwrap(); @@ -1855,9 +1851,9 @@ pub mod test { where F: FnOnce(&mut StacksChainState, &SortitionDB) -> R, { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let r = todo(peer.chainstate(), &sortdb); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); r } @@ -2814,8 +2810,9 @@ pub mod test { } pub fn get_current_reward_cycle(peer: &TestPeer, burnchain: &Burnchain) -> u128 { - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() as u128 @@ -2841,9 +2838,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2908,7 +2906,7 @@ pub mod test { let mut peer_config = TestPeerConfig::new(function_name!(), 2000, 2001); let alice = StacksAddress::from_string("STVK1K405H6SK9NKJAP32GHYHDJ98MMNP8Y6Z9N0").unwrap(); let bob = StacksAddress::from_string("ST76D2FMXZ7D2719PNE4N71KPSX84XCCNCMYC940").unwrap(); - peer_config.initial_lockups = vec![ + peer_config.chain_config.initial_lockups = vec![ ChainstateAccountLockup::new(alice.clone(), 1000, 1), ChainstateAccountLockup::new(bob.clone(), 1000, 1), ChainstateAccountLockup::new(alice.clone(), 1000, 2), @@ -2968,9 +2966,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3035,9 +3034,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); @@ -3152,9 +3152,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3263,9 +3264,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3474,9 +3476,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) @@ -3735,9 +3738,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4002,9 +4006,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4218,9 +4223,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); @@ -4431,9 +4437,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4680,9 +4687,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -5202,9 +5210,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -5648,9 +5657,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 41ddeeec98..2749bbbf1f 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -60,7 +60,7 @@ pub fn get_reward_set_entries_at( tip: &StacksBlockId, at_burn_ht: u64, ) -> Vec { - let burnchain = peer.config.burnchain.clone(); + let burnchain = peer.config.chain_config.burnchain.clone(); with_sortdb(peer, |ref mut c, sortdb| { get_reward_set_entries_at_block(c, &burnchain, sortdb, tip, at_burn_ht).unwrap() }) @@ -73,7 +73,7 @@ pub fn get_reward_set_entries_index_order_at( tip: &StacksBlockId, at_burn_ht: u64, ) -> Vec { - let burnchain = peer.config.burnchain.clone(); + let burnchain = peer.config.chain_config.burnchain.clone(); with_sortdb(peer, |ref mut c, sortdb| { c.get_reward_addresses(&burnchain, sortdb, at_burn_ht, tip) .unwrap() @@ -149,9 +149,15 @@ pub fn check_all_stacker_link_invariants( // For cycles where PoX-3 is active, check if Epoch24 has activated first. let active_pox_contract = peer .config + .chain_config .burnchain .pox_constants - .active_pox_contract(peer.config.burnchain.reward_cycle_to_block_height(cycle)); + .active_pox_contract( + peer.config + .chain_config + .burnchain + .reward_cycle_to_block_height(cycle), + ); if active_pox_contract == POX_3_NAME && epoch < StacksEpochId::Epoch24 { info!( "Skipping check on a PoX-3 reward cycle because Epoch24 has not started yet"; @@ -337,6 +343,7 @@ pub fn check_stacking_state_invariants( let stacking_state_unlock_ht = peer .config + .chain_config .burnchain .reward_cycle_to_block_height((first_cycle + lock_period) as u64); @@ -430,11 +437,13 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .burn_header_height; let tip_cycle = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(current_burn_height.into()) .unwrap(); let cycle_start = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(cycle_number); @@ -446,11 +455,17 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .unwrap() .unwrap(); - let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( - peer.config - .burnchain - .reward_cycle_to_block_height(cycle_number), - ); + let active_pox_contract = peer + .config + .chain_config + .burnchain + .pox_constants + .active_pox_contract( + peer.config + .chain_config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); if cycle_start_epoch.epoch_id == StacksEpochId::Epoch22 || cycle_start_epoch.epoch_id == StacksEpochId::Epoch23 @@ -467,8 +482,8 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c "Skipping validation of reward set that started in Epoch24, but its cycle starts before pox-3 activation"; "cycle" => cycle_number, "cycle_start" => cycle_start, - "pox_3_activation" => peer.config.burnchain.pox_constants.pox_3_activation_height, - "pox_4_activation" => peer.config.burnchain.pox_constants.pox_4_activation_height, + "pox_3_activation" => peer.config.chain_config.burnchain.pox_constants.pox_3_activation_height, + "pox_4_activation" => peer.config.chain_config.burnchain.pox_constants.pox_4_activation_height, "epoch_2_4_start" => cycle_start_epoch.start_height, ); return; @@ -510,7 +525,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c if tip_epoch.epoch_id >= StacksEpochId::Epoch24 && current_burn_height - <= peer.config.burnchain.pox_constants.pox_3_activation_height + <= peer + .config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height { // if the tip is epoch-2.4, and pox-3 isn't the active pox contract yet, // the invariant checks will not make sense for the same reasons as above @@ -519,7 +539,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c if tip_epoch.epoch_id >= StacksEpochId::Epoch25 && current_burn_height - <= peer.config.burnchain.pox_constants.pox_4_activation_height + <= peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height { // if the tip is epoch-2.5, and pox-5 isn't the active pox contract yet, // the invariant checks will not make sense for the same reasons as above @@ -550,11 +575,17 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c /// Get the `cycle_number`'s total stacked amount at the given chaintip pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_number: u64) -> u128 { - let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( - peer.config - .burnchain - .reward_cycle_to_block_height(cycle_number), - ); + let active_pox_contract = peer + .config + .chain_config + .burnchain + .pox_constants + .active_pox_contract( + peer.config + .chain_config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); with_clarity_db_ro(peer, tip, |db| { let total_stacked_key = TupleData::from_data(vec![( @@ -776,7 +807,7 @@ fn test_simple_pox_lockup_transition_pox_2() { }; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -794,7 +825,7 @@ fn test_simple_pox_lockup_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -854,7 +885,7 @@ fn test_simple_pox_lockup_transition_pox_2() { // should be accepted (checked via the tx receipt). Also, importantly, // the cost tracker should assign costs to Charlie's transaction. // This is also checked by the transaction receipt. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 9 assert_eq!(tip.block_height, 9 + EMPTY_SORTITIONS as u64); @@ -880,7 +911,7 @@ fn test_simple_pox_lockup_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -904,7 +935,7 @@ fn test_simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 0); // Now, Bob tries to lock in PoX v1 too, but it shouldn't work! - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_lockup( &bob, @@ -921,7 +952,7 @@ fn test_simple_pox_lockup_transition_pox_2() { let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); // our "tenure counter" is now at 12 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 12 + EMPTY_SORTITIONS as u64); // One more empty tenure to reach the unlock height let block_id = peer.tenure_with_txs(&[], &mut coinbase_nonce); @@ -932,7 +963,7 @@ fn test_simple_pox_lockup_transition_pox_2() { // At this point, the auto unlock height for v1 accounts should be reached. // let Alice stack in PoX v2 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 13 assert_eq!(tip.block_height, 13 + EMPTY_SORTITIONS as u64); @@ -963,7 +994,7 @@ fn test_simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 31 assert_eq!(tip.block_height, 31 + EMPTY_SORTITIONS as u64); @@ -1174,7 +1205,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_2_lockup( &alice, @@ -1245,7 +1276,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { .unwrap(); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1470,7 +1501,7 @@ fn delegate_stack_increase() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx let success_alice_delegation = alice_nonce; @@ -1528,7 +1559,7 @@ fn delegate_stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1822,7 +1853,7 @@ fn stack_increase() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit an increase: this should fail, because Alice is not yet locked let fail_no_lock_tx = alice_nonce; @@ -1876,7 +1907,7 @@ fn stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -2031,7 +2062,7 @@ fn test_lock_period_invariant_extend_transition() { .unwrap() + 1; - eprintln!("First v2 cycle = {}", first_v2_cycle); + eprintln!("First v2 cycle = {first_v2_cycle}"); assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); let epochs = StacksEpoch::all(0, 0, EMPTY_SORTITIONS as u64 + 10); @@ -2059,7 +2090,7 @@ fn test_lock_period_invariant_extend_transition() { let ALICE_LOCKUP = 1024 * POX_THRESHOLD_STEPS_USTX; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -2074,7 +2105,7 @@ fn test_lock_period_invariant_extend_transition() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -2138,7 +2169,7 @@ fn test_lock_period_invariant_extend_transition() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // Alice _will_ auto-unlock: she can stack-extend in PoX v2 let alice_lockup = make_pox_2_extend( @@ -2311,7 +2342,7 @@ fn test_pox_extend_transition_pox_2() { }; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -2326,7 +2357,7 @@ fn test_pox_extend_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -2392,7 +2423,7 @@ fn test_pox_extend_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -2451,7 +2482,7 @@ fn test_pox_extend_transition_pox_2() { alice_rewards_to_v2_start_checks(tip_index_block, &mut peer); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 15 assert_eq!(tip.block_height, 15 + EMPTY_SORTITIONS as u64); @@ -2468,7 +2499,7 @@ fn test_pox_extend_transition_pox_2() { } // our "tenure counter" is now at 32 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 32 + EMPTY_SORTITIONS as u64); // Alice would have unlocked under v1 rules, so try to stack again via PoX 1 and expect a runtime error @@ -2736,7 +2767,7 @@ fn test_delegate_extend_transition_pox_2() { }; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -2751,7 +2782,7 @@ fn test_delegate_extend_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let delegate_tx = make_pox_contract_call( &alice, 0, @@ -2883,7 +2914,7 @@ fn test_delegate_extend_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_delegate_tx = make_pox_2_contract_call( &bob, @@ -3090,7 +3121,7 @@ fn test_delegate_extend_transition_pox_2() { alice_rewards_to_v2_start_checks(tip_index_block, &mut peer); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 15 assert_eq!(tip.block_height, 15 + EMPTY_SORTITIONS as u64); @@ -3156,7 +3187,7 @@ fn test_delegate_extend_transition_pox_2() { } // our "tenure counter" is now at 32 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 32 + EMPTY_SORTITIONS as u64); // Alice would have unlocked under v1 rules, so try to stack again via PoX 1 and expect a runtime error @@ -3185,7 +3216,7 @@ fn test_delegate_extend_transition_pox_2() { for r in b.receipts.into_iter() { if let TransactionOrigin::Stacks(ref t) = r.transaction { let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); + eprintln!("TX addr: {addr}"); if addr == alice_address { alice_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == bob_address { @@ -3375,7 +3406,7 @@ fn test_pox_2_getters() { peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -3645,8 +3676,9 @@ fn test_get_pox_addrs() { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) @@ -3923,8 +3955,9 @@ fn test_stack_with_segwit() { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) @@ -4257,7 +4290,7 @@ fn test_pox_2_delegate_stx_addr_validation() { peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4463,7 +4496,7 @@ fn stack_aggregation_increase() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx for alice let alice_delegation_1 = make_pox_2_contract_call( @@ -4529,7 +4562,7 @@ fn stack_aggregation_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -4552,7 +4585,7 @@ fn stack_aggregation_increase() { assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4603,7 +4636,7 @@ fn stack_aggregation_increase() { bob_nonce += 1; latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4900,7 +4933,7 @@ fn stack_in_both_pox1_and_pox2() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 10 assert_eq!(tip.block_height, 10 + EMPTY_SORTITIONS as u64); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 0e4c34fa00..a99d4c98c2 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -184,7 +184,7 @@ fn simple_pox_lockup_transition_pox_2() { let mut coinbase_nonce = 0; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -202,7 +202,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -249,7 +249,9 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 0); // produce blocks until immediately before the 2.1 epoch switch - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); // alice is still locked, balance should be 0 @@ -261,7 +263,7 @@ fn simple_pox_lockup_transition_pox_2() { // should be accepted (checked via the tx receipt). Also, importantly, // the cost tracker should assign costs to Charlie's transaction. // This is also checked by the transaction receipt. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let test = make_pox_2_contract_call( &charlie, @@ -284,7 +286,7 @@ fn simple_pox_lockup_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -301,7 +303,7 @@ fn simple_pox_lockup_transition_pox_2() { let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); assert_eq!( - get_tip(peer.sortdb.as_ref()).block_height as u32, + get_tip(peer.chain.sortdb.as_ref()).block_height as u32, pox_constants.v1_unlock_height + 1, "Test should have reached 1 + PoX-v1 unlock height" ); @@ -311,7 +313,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // Now, Bob tries to lock in PoX v1 too, but it shouldn't work! - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_lockup( &bob, @@ -327,7 +329,7 @@ fn simple_pox_lockup_transition_pox_2() { // At this point, the auto unlock height for v1 accounts has been reached. // let Alice stack in PoX v2 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_2_lockup( &alice, @@ -347,7 +349,9 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); // now, let's roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -364,7 +368,8 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // now, roll the chain forward to Epoch-2.4 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always be unlocked @@ -372,7 +377,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()).block_height; + let tip = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_lockup = make_pox_3_lockup( &bob, 2, @@ -583,7 +588,8 @@ fn pox_auto_unlock(alice_first: bool) { let mut coinbase_nonce = 0; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -592,7 +598,7 @@ fn pox_auto_unlock(alice_first: bool) { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_2_lockup( &alice, @@ -653,7 +659,7 @@ fn pox_auto_unlock(alice_first: bool) { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -734,7 +740,8 @@ fn pox_auto_unlock(alice_first: bool) { // now, lets check behavior in Epochs 2.2-2.4, with pox-3 auto unlock tests // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch22].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch22].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -747,13 +754,14 @@ fn pox_auto_unlock(alice_first: bool) { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // produce blocks until epoch 2.4 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // repeat the lockups as before, so we can test the pox-3 auto unlock behavior - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_3_lockup( &alice, @@ -815,7 +823,7 @@ fn pox_auto_unlock(alice_first: bool) { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1022,13 +1030,14 @@ fn delegate_stack_increase() { let mut coinbase_nonce = 0; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx let alice_delegation_1 = make_pox_2_contract_call( @@ -1089,7 +1098,7 @@ fn delegate_stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1208,7 +1217,9 @@ fn delegate_stack_increase() { // on pox-3 // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup assert_eq!( @@ -1251,12 +1262,13 @@ fn delegate_stack_increase() { ); // Roll to Epoch-2.4 and re-do the above tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx let alice_delegation_1 = make_pox_3_contract_call( @@ -1315,7 +1327,7 @@ fn delegate_stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1639,13 +1651,14 @@ fn stack_increase() { let increase_amt = total_balance - first_lockup_amt; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit an increase: this should fail, because Alice is not yet locked let fail_no_lock_tx = alice_nonce; @@ -1691,7 +1704,7 @@ fn stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1773,7 +1786,9 @@ fn stack_increase() { // on pox-3 // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup assert_eq!( @@ -1802,13 +1817,14 @@ fn stack_increase() { ); // Roll to Epoch-2.4 and re-do the above stack-increase tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 3 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit an increase: this should fail, because Alice is not yet locked let pox_3_fail_no_lock_tx = alice_nonce; @@ -1858,7 +1874,7 @@ fn stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -2165,7 +2181,7 @@ fn pox_extend_transition() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -2212,12 +2228,14 @@ fn pox_extend_transition() { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); alice_rewards_to_v2_start_checks(latest_block.clone(), &mut peer); } @@ -2226,7 +2244,7 @@ fn pox_extend_transition() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -2271,7 +2289,7 @@ fn pox_extend_transition() { // produce blocks until the v2 reward cycles start let height_target = burnchain.reward_cycle_to_block_height(first_v2_cycle) - 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // alice is still locked, balance should be 0 let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -2286,7 +2304,9 @@ fn pox_extend_transition() { // Roll to Epoch-2.4 and re-do the above tests // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should be locked, and so should bob's let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -2313,12 +2333,13 @@ fn pox_extend_transition() { assert_eq!(bob_account.amount_unlocked(), INITIAL_BALANCE); // Roll to Epoch-2.4 and re-do the above stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_3_lockup( &alice, 2, @@ -2362,11 +2383,11 @@ fn pox_extend_transition() { assert_eq!(alice_balance, 0); // advance to the first v3 reward cycle - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_3_lockup( &bob, 2, @@ -2448,7 +2469,7 @@ fn pox_extend_transition() { for r in b.receipts.into_iter() { if let TransactionOrigin::Stacks(ref t) = r.transaction { let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); + eprintln!("TX addr: {addr}"); if addr == alice_address { alice_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == bob_address { @@ -2580,21 +2601,22 @@ fn delegate_extend_pox_3() { let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 3 should now exist. // charlie will lock bob and alice through the delegation interface - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let mut alice_nonce = 0; let mut bob_nonce = 0; @@ -2818,13 +2840,13 @@ fn delegate_extend_pox_3() { } let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // Extend bob's lockup via `delegate-stack-extend` for 1 more cycle // so that we can check the first-reward-cycle is correctly updated @@ -3046,12 +3068,13 @@ fn pox_3_getters() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; // alice locks in v2 @@ -3291,7 +3314,7 @@ fn pox_3_getters() { } fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { @@ -3377,7 +3400,7 @@ fn get_pox_addrs() { let mut coinbase_nonce = 0; let assert_latest_was_burn = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3393,11 +3416,16 @@ fn get_pox_addrs() { assert!(commit.burn_fee > 0); let (addrs, payout) = get_burn_pox_addr_info(peer); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; info!("Checking burn outputs at burn_height = {}", burn_height); - if peer.config.burnchain.is_in_prepare_phase(burn_height) { + if peer + .config + .chain_config + .burnchain + .is_in_prepare_phase(burn_height) + { assert_eq!(addrs.len(), 1); assert_eq!(payout, 1000); assert!(addrs[0].is_burn()); @@ -3410,7 +3438,7 @@ fn get_pox_addrs() { }; let assert_latest_was_pox = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3438,18 +3466,20 @@ fn get_pox_addrs() { }; // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } } let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -3477,7 +3507,7 @@ fn get_pox_addrs() { let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < target_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height < target_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); assert_latest_was_burn(&mut peer); } @@ -3588,7 +3618,7 @@ fn stack_with_segwit() { let mut coinbase_nonce = 0; let assert_latest_was_burn = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3604,11 +3634,16 @@ fn stack_with_segwit() { assert!(commit.burn_fee > 0); let (addrs, payout) = get_burn_pox_addr_info(peer); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; info!("Checking burn outputs at burn_height = {}", burn_height); - if peer.config.burnchain.is_in_prepare_phase(burn_height) { + if peer + .config + .chain_config + .burnchain + .is_in_prepare_phase(burn_height) + { assert_eq!(addrs.len(), 1); assert_eq!(payout, 1000); assert!(addrs[0].is_burn()); @@ -3621,7 +3656,7 @@ fn stack_with_segwit() { }; let assert_latest_was_pox = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3649,18 +3684,20 @@ fn stack_with_segwit() { }; // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } } let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -3687,7 +3724,7 @@ fn stack_with_segwit() { let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < target_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height < target_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); assert_latest_was_burn(&mut peer); } @@ -3830,12 +3867,13 @@ fn stack_aggregation_increase() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx for alice let alice_delegation_1 = make_pox_3_contract_call( @@ -3898,7 +3936,7 @@ fn stack_aggregation_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -3926,7 +3964,7 @@ fn stack_aggregation_increase() { assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -3977,7 +4015,7 @@ fn stack_aggregation_increase() { bob_nonce += 1; latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4245,12 +4283,13 @@ fn pox_3_delegate_stx_addr_validation() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index f26a5651d4..90ac2d21e5 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -46,6 +46,7 @@ use crate::chainstate::stacks::boot::signers_tests::get_signer_index; use crate::chainstate::stacks::boot::{PoxVersions, MINERS_NAME}; use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use crate::chainstate::stacks::*; +use crate::chainstate::tests::TestChainstateConfig; use crate::core::*; use crate::net::test::{TestEventObserver, TestEventObserverBlock, TestPeer, TestPeerConfig}; use crate::net::tests::NakamotoBootPlan; @@ -79,9 +80,10 @@ fn make_simple_pox_4_lock( let addr = key_to_stacks_addr(key); let pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); let signer_pk = StacksPublicKey::from_private(key); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let next_reward_cycle = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -369,7 +371,7 @@ fn pox_extend_transition() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -416,12 +418,14 @@ fn pox_extend_transition() { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); alice_rewards_to_v2_start_checks(latest_block.clone(), &mut peer); } @@ -430,7 +434,7 @@ fn pox_extend_transition() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -475,7 +479,7 @@ fn pox_extend_transition() { // produce blocks until the v2 reward cycles start let height_target = burnchain.reward_cycle_to_block_height(first_v2_cycle) - 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // alice is still locked, balance should be 0 let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -488,7 +492,9 @@ fn pox_extend_transition() { v2_rewards_checks(latest_block, &mut peer); // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should be locked, and so should bob's let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -515,13 +521,13 @@ fn pox_extend_transition() { assert_eq!(bob_account.amount_unlocked(), INITIAL_BALANCE); // Roll to pox4 activation and re-do the above stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(burnchain.pox_constants.pox_4_activation_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_signer_private = Secp256k1PrivateKey::random(); let alice_signer_key = Secp256k1PublicKey::from_private(&alice_signer_private); @@ -569,7 +575,7 @@ fn pox_extend_transition() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // check that the "raw" reward set will contain entries for alice at the cycle start @@ -598,7 +604,7 @@ fn pox_extend_transition() { assert_eq!(alice_balance, 0); // advance to the first v3 reward cycle - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -621,7 +627,7 @@ fn pox_extend_transition() { 2, ); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_4_lockup( &bob, 2, @@ -818,7 +824,7 @@ fn pox_extend_transition() { } fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { @@ -900,10 +906,11 @@ fn pox_lock_unlock() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } @@ -911,11 +918,11 @@ fn pox_lock_unlock() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let reward_cycle = burnchain.block_height_to_reward_cycle(tip_height).unwrap() as u128; let stackers: Vec<_> = keys .iter() @@ -960,13 +967,13 @@ fn pox_lock_unlock() { // Advance to start of rewards cycle stackers are participating in let target_height = burnchain.pox_constants.pox_4_activation_height + 5; - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // now we should be in the reward phase, produce the reward blocks @@ -976,7 +983,7 @@ fn pox_lock_unlock() { // Check that STX are locked for 2 reward cycles for _ in 0..lock_period { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1081,10 +1088,11 @@ fn pox_3_defunct() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } @@ -1092,11 +1100,11 @@ fn pox_3_defunct() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -1153,13 +1161,13 @@ fn pox_3_defunct() { // Advance to start of rewards cycle stackers are participating in let target_height = burnchain.pox_constants.pox_4_activation_height + 5; - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // now we should be in the reward phase, produce the reward blocks @@ -1168,7 +1176,7 @@ fn pox_3_defunct() { // Check next 3 reward cycles for _ in 0..=lock_period { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1211,10 +1219,11 @@ fn pox_3_unlocks() { // Advance to a few blocks before pox 3 unlock let target_height = burnchain.pox_constants.v3_unlock_height - 14; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } @@ -1222,11 +1231,11 @@ fn pox_3_unlocks() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -1264,7 +1273,7 @@ fn pox_3_unlocks() { // Check that STX are locked for 2 reward cycles for _ in 0..2 { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1308,18 +1317,18 @@ fn pox_3_unlocks() { // Advance to v3 unlock let target_height = burnchain.pox_constants.v3_unlock_height; - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // Check that STX are not locked for 3 reward cycles after pox4 starts for _ in 0..3 { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1396,7 +1405,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -1405,11 +1414,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let lock_period = 1; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); // stack-stx @@ -1483,7 +1492,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { steph_nonce += 1; // alice delegates STX to bob - let target_height = get_tip(peer.sortdb.as_ref()).block_height + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + (3 * pox_constants.reward_cycle_length as u64) // 3 cycles (next cycle + 2) + 1; // additional few blocks shouldn't matter to unlock-cycle let alice_delegate = make_pox_4_delegate_stx( @@ -1497,7 +1506,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { let alice_delegate_nonce = alice_nonce; alice_nonce += 1; - let curr_height = get_tip(peer.sortdb.as_ref()).block_height; + let curr_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_delegate_stack_nonce = bob_nonce; let bob_delegate_stack = make_pox_4_delegate_stack_stx( &bob, @@ -1544,7 +1553,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { &mut coinbase_nonce, )); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tipId = StacksBlockId::new(&tip.consensus_hash, &tip.canonical_stacks_tip_hash); assert_eq!(tipId, latest_block.unwrap()); @@ -1784,11 +1793,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're in the prepare phase (first block of prepare-phase was mined, i.e. pox-set for next cycle determined) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -1797,11 +1806,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height, + get_tip(peer.chain.sortdb.as_ref()).block_height, ); let lock_period = 1; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); // stack-stx @@ -1875,7 +1884,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { steph_nonce += 1; // alice delegates STX to bob - let target_height = get_tip(peer.sortdb.as_ref()).block_height + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + (3 * pox_constants.reward_cycle_length as u64) // 3 cycles (next cycle + 2) + 1; // additional few blocks shouldn't matter to unlock-cycle let alice_delegate = make_pox_4_delegate_stx( @@ -1889,7 +1898,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { let alice_delegate_nonce = alice_nonce; alice_nonce += 1; - let curr_height = get_tip(peer.sortdb.as_ref()).block_height; + let curr_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_delegate_stack_nonce = bob_nonce; let bob_delegate_stack = make_pox_4_delegate_stack_stx( &bob, @@ -1936,7 +1945,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { &mut coinbase_nonce, )); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tipId = StacksBlockId::new(&tip.consensus_hash, &tip.canonical_stacks_tip_hash); assert_eq!(tipId, latest_block.clone().unwrap()); @@ -2215,11 +2224,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're in the prepare phase (first block of prepare-phase was mined, i.e. pox-set for next cycle determined) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2228,15 +2237,15 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let lock_period = 2; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); // alice delegates STX to bob - let target_height = get_tip(peer.sortdb.as_ref()).block_height + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + (3 * pox_constants.reward_cycle_length as u64) // 3 cycles (next cycle + 2) + 1; // additional few blocks shouldn't matter to unlock-cycle let alice_delegate = make_pox_4_delegate_stx( @@ -2250,7 +2259,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() let alice_delegate_nonce = alice_nonce; alice_nonce += 1; - let curr_height = get_tip(peer.sortdb.as_ref()).block_height; + let curr_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_delegate_stack_nonce = bob_nonce; let bob_delegate_stack = make_pox_4_delegate_stack_stx( &bob, @@ -2291,7 +2300,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() &mut coinbase_nonce, )); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tipId = StacksBlockId::new(&tip.consensus_hash, &tip.canonical_stacks_tip_hash); assert_eq!(tipId, latest_block.unwrap()); @@ -2440,11 +2449,11 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're 1 before the prepare phase (first block of prepare-phase not yet mined, whatever txs we create now won't be included in the reward set) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height + 1) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height + 1) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2452,7 +2461,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()) * 120 / 100; // * 1.2 @@ -2477,7 +2486,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { &steph_pox_addr, steph_lock_period, &steph_signing_key, - get_tip(peer.sortdb.as_ref()).block_height, + get_tip(peer.chain.sortdb.as_ref()).block_height, Some(signature), u128::MAX, 1, @@ -2560,11 +2569,11 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're in the prepare phase (first block of prepare-phase was mined, i.e. pox-set for next cycle determined) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2572,7 +2581,7 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()) * 120 / 100; // * 1.2 @@ -2597,7 +2606,7 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { &steph_pox_addr, steph_lock_period, &steph_signing_key, - get_tip(peer.sortdb.as_ref()).block_height, + get_tip(peer.chain.sortdb.as_ref()).block_height, Some(signature), u128::MAX, 1, @@ -2681,7 +2690,7 @@ fn pox_4_delegate_stack_increase_events() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2697,7 +2706,7 @@ fn pox_4_delegate_stack_increase_events() { alice_principal.clone(), amount / 2, bob_pox_addr.clone(), - get_tip(peer.sortdb.as_ref()).block_height as u128, + get_tip(peer.chain.sortdb.as_ref()).block_height as u128, 2, ); @@ -2791,15 +2800,15 @@ fn pox_4_revoke_delegate_stx_events() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let current_cycle = get_current_reward_cycle(&peer, &burnchain); let next_cycle = current_cycle + 1; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); @@ -2845,7 +2854,7 @@ fn pox_4_revoke_delegate_stx_events() { // check delegate with expiry - let target_height = get_tip(peer.sortdb.as_ref()).block_height + 10; + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + 10; let alice_delegate_2 = make_pox_4_delegate_stx( &alice, alice_nonce, @@ -2860,7 +2869,7 @@ fn pox_4_revoke_delegate_stx_events() { peer.tenure_with_txs(&[alice_delegate_2], &mut coinbase_nonce); // produce blocks until delegation expired - while get_tip(peer.sortdb.as_ref()).block_height <= target_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height <= target_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -3015,7 +3024,7 @@ fn verify_signer_key_signatures() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -4297,31 +4306,74 @@ fn stack_agg_increase() { ) .unwrap(); - peer_config.aggregate_public_key = Some(aggregate_public_key); + peer_config.chain_config.aggregate_public_key = Some(aggregate_public_key); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); // Let us not activate nakamoto to make life easier - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; - peer_config.initial_balances.append(&mut initial_balances); - peer_config.burnchain.pox_constants.v2_unlock_height = 81; - peer_config.burnchain.pox_constants.pox_3_activation_height = 101; - peer_config.burnchain.pox_constants.v3_unlock_height = 102; - peer_config.burnchain.pox_constants.pox_4_activation_height = 105; - peer_config.test_signers = Some(test_signers); - peer_config.burnchain.pox_constants.reward_cycle_length = 20; - peer_config.burnchain.pox_constants.prepare_length = 5; - let epochs = peer_config.epochs.clone().unwrap(); + peer_config.chain_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); // Let us not activate nakamoto to make life easier + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config + .chain_config + .initial_balances + .append(&mut initial_balances); + peer_config + .chain_config + .burnchain + .pox_constants + .v2_unlock_height = 81; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height = 101; + peer_config + .chain_config + .burnchain + .pox_constants + .v3_unlock_height = 102; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height = 105; + peer_config.chain_config.test_signers = Some(test_signers); + peer_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length = 20; + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length = 5; + let epochs = peer_config.chain_config.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpochId::Epoch30]; let mut peer = TestPeer::new_with_observer(peer_config, Some(&observer)); let mut peer_nonce = 0; // Set constants - let reward_cycle_len = peer.config.burnchain.pox_constants.reward_cycle_length; - let prepare_phase_len = peer.config.burnchain.pox_constants.prepare_length; + let reward_cycle_len = peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length; + let prepare_phase_len = peer + .config + .chain_config + .burnchain + .pox_constants + .prepare_length; // Advance into pox4 - let mut target_height = peer.config.burnchain.pox_constants.pox_4_activation_height; + let mut target_height = peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height; let mut latest_block = None; // Produce blocks until the first reward phase that everyone should be in while peer.get_burn_block_height() < u64::from(target_height) { @@ -4329,7 +4381,7 @@ fn stack_agg_increase() { } let latest_block = latest_block.expect("Failed to get tip"); // Current reward cycle: 5 (starts at burn block 101) - let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let reward_cycle = get_current_reward_cycle(&peer, &peer.config.chain_config.burnchain); let next_reward_cycle = reward_cycle.wrapping_add(1); // Current burn block height: 105 let burn_block_height = peer.get_burn_block_height(); @@ -5089,7 +5141,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { } pub fn assert_latest_was_burn(peer: &mut TestPeer) { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -5105,11 +5157,16 @@ pub fn assert_latest_was_burn(peer: &mut TestPeer) { assert!(commit.burn_fee > 0); let (addrs, payout) = get_burn_pox_addr_info(peer); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; info!("Checking burn outputs at burn_height = {burn_height}"); - if peer.config.burnchain.is_in_prepare_phase(burn_height) { + if peer + .config + .chain_config + .burnchain + .is_in_prepare_phase(burn_height) + { assert_eq!(addrs.len(), 1); assert_eq!(payout, 1000); assert!(addrs[0].is_burn()); @@ -5122,7 +5179,7 @@ pub fn assert_latest_was_burn(peer: &mut TestPeer) { } fn assert_latest_was_pox(peer: &mut TestPeer) -> Vec { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -6648,31 +6705,72 @@ pub fn pox_4_scenario_test_setup<'a>( ) .unwrap(); - peer_config.aggregate_public_key = Some(aggregate_public_key); + peer_config.chain_config.aggregate_public_key = Some(aggregate_public_key); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.chain_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; peer_config + .chain_config .initial_balances .extend_from_slice(&initial_balances); - peer_config.burnchain.pox_constants.v2_unlock_height = 81; - peer_config.burnchain.pox_constants.pox_3_activation_height = 101; - peer_config.burnchain.pox_constants.v3_unlock_height = 102; - peer_config.burnchain.pox_constants.pox_4_activation_height = 105; - peer_config.test_signers = Some(test_signers); - peer_config.burnchain.pox_constants.reward_cycle_length = 20; - peer_config.burnchain.pox_constants.prepare_length = 5; + peer_config + .chain_config + .burnchain + .pox_constants + .v2_unlock_height = 81; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height = 101; + peer_config + .chain_config + .burnchain + .pox_constants + .v3_unlock_height = 102; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height = 105; + peer_config.chain_config.test_signers = Some(test_signers); + peer_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length = 20; + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length = 5; let mut peer = TestPeer::new_with_observer(peer_config.clone(), Some(observer)); let mut peer_nonce = 0; - let reward_cycle_len = peer.config.burnchain.pox_constants.reward_cycle_length; - let prepare_phase_len = peer.config.burnchain.pox_constants.prepare_length; + let reward_cycle_len = peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length; + let prepare_phase_len = peer + .config + .chain_config + .burnchain + .pox_constants + .prepare_length; - let target_height = peer.config.burnchain.pox_constants.pox_4_activation_height; + let target_height = peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height; let mut latest_block = None; while peer.get_burn_block_height() < u64::from(target_height) { @@ -6681,10 +6779,10 @@ pub fn pox_4_scenario_test_setup<'a>( } let latest_block = latest_block.expect("Failed to get tip"); - let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let reward_cycle = get_current_reward_cycle(&peer, &peer.config.chain_config.burnchain); let next_reward_cycle = reward_cycle.wrapping_add(1); let burn_block_height = peer.get_burn_block_height(); - let current_block_height = peer.config.current_block; + let current_block_height = peer.config.chain_config.current_block; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); ( @@ -6745,8 +6843,8 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( max_amount: None, }]; let mut peer_config = TestPeerConfig::default(); - peer_config.aggregate_public_key = Some(aggregate_public_key); - let mut pox_constants = peer_config.clone().burnchain.pox_constants; + peer_config.chain_config.aggregate_public_key = Some(aggregate_public_key); + let mut pox_constants = peer_config.chain_config.burnchain.pox_constants.clone(); pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -6762,12 +6860,12 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( boot_plan.initial_balances = initial_balances; boot_plan.pox_constants = pox_constants.clone(); burnchain.pox_constants = pox_constants; - peer_config.burnchain = burnchain.clone(); - peer_config.test_signers = Some(test_signers.clone()); + peer_config.chain_config.burnchain = burnchain.clone(); + peer_config.chain_config.test_signers = Some(test_signers.clone()); info!("---- Booting into Nakamoto Peer ----"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], Some(observer)); - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.chain.sortdb.as_ref().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -6775,7 +6873,7 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( .unwrap(); let coinbase_nonce = 0; - let burn_block_height = get_tip(peer.sortdb.as_ref()).block_height; + let burn_block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let reward_cycle = burnchain .block_height_to_reward_cycle(burn_block_height) .unwrap() as u128; @@ -6954,9 +7052,16 @@ fn test_scenario_one(use_nakamoto: bool) { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer.config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -7043,6 +7148,7 @@ fn test_scenario_one(use_nakamoto: bool) { // 4.3 Check unlock height let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64 + lock_period as u64) .wrapping_sub(1) as u128, @@ -7092,6 +7198,7 @@ fn test_scenario_one(use_nakamoto: bool) { // 6.3 Check unlock height (end of cycle 7 - block 140) let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height((next_reward_cycle + lock_period) as u64) .wrapping_sub(1) as u128, @@ -7112,7 +7219,11 @@ fn test_scenario_one(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7122,7 +7233,11 @@ fn test_scenario_one(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7139,7 +7254,7 @@ fn test_scenario_one(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - peer_config.aggregate_public_key.unwrap(), + peer_config.chain_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -7150,6 +7265,7 @@ fn test_scenario_one(use_nakamoto: bool) { // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let mut target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); info!( @@ -7387,9 +7503,16 @@ fn test_deser_abort() { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer.config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -7476,6 +7599,7 @@ fn test_deser_abort() { // 4.3 Check unlock height let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64 + lock_period as u64) .wrapping_sub(1) as u128, @@ -7525,6 +7649,7 @@ fn test_deser_abort() { // 6.3 Check unlock height (end of cycle 7 - block 140) let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height((next_reward_cycle + lock_period) as u64) .wrapping_sub(1) as u128, @@ -7712,9 +7837,16 @@ fn test_scenario_two(use_nakamoto: bool) { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -7824,7 +7956,11 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7834,7 +7970,11 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7844,7 +7984,11 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 3, next_reward_cycle, ); @@ -7854,7 +7998,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.unwrap(), + peer_config.chain_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -7870,6 +8014,7 @@ fn test_scenario_two(use_nakamoto: bool) { // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); let (latest_block, tx_block, receipts) = advance_to_block_height( @@ -8097,6 +8242,7 @@ fn test_scenario_three(use_nakamoto: bool) { david.principal.clone(), Some( peer.config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) .into(), @@ -8213,9 +8359,16 @@ fn test_scenario_three(use_nakamoto: bool) { // Commit txs in next block & advance to reward set calculation of the next reward cycle let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -8527,9 +8680,16 @@ fn test_scenario_four(use_nakamoto: bool) { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, @@ -8561,7 +8721,11 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8571,7 +8735,11 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8581,7 +8749,11 @@ fn test_scenario_four(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8598,7 +8770,11 @@ fn test_scenario_four(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8608,9 +8784,16 @@ fn test_scenario_four(use_nakamoto: bool) { // Commit vote txs & move to the prepare phase of reward cycle 7 (block 155) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64 + 1) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64); + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, @@ -8654,7 +8837,11 @@ fn test_scenario_four(use_nakamoto: bool) { .expect("No approved key found"); assert_eq!( approved_key, - peer_config.aggregate_public_key.clone().unwrap() + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap() ); // Alice stack-extend err tx @@ -8689,7 +8876,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.unwrap(), + peer_config.chain_config.aggregate_public_key.unwrap(), 1, 7, ); @@ -9089,7 +9276,7 @@ pub fn prepare_pox4_test<'a>( max_amount: None, }) .collect::>(); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -9113,7 +9300,7 @@ pub fn prepare_pox4_test<'a>( info!("---- Booting into Nakamoto Peer ----"); let peer = boot_plan.boot_into_nakamoto_peer(vec![], observer); - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.chain.sortdb.as_ref().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -9121,7 +9308,7 @@ pub fn prepare_pox4_test<'a>( .unwrap(); let coinbase_nonce = 0; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; info!("Block height: {}", block_height); @@ -9139,16 +9326,16 @@ pub fn prepare_pox4_test<'a>( let target_height = burnchain.pox_constants.pox_4_activation_height; let mut coinbase_nonce = 0; let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height + if get_tip(peer.chain.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } } - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; ( burnchain, peer, @@ -9178,9 +9365,10 @@ pub fn tenure_with_txs_fallible( tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure_and( tenure_change_tx, @@ -9202,8 +9390,8 @@ pub fn tenure_with_txs_fallible( .map(|(block, _, _)| block) .collect(); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -9230,8 +9418,8 @@ pub fn tenure_with_txs( tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, @@ -9251,8 +9439,8 @@ pub fn tenure_with_txs( .map(|(block, _, _)| block) .collect(); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -9328,13 +9516,14 @@ fn missed_slots_no_unlock() { + 1; // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch25].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch25].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // perform lockups so we can test that pox-4 does not exhibit unlock-on-miss behavior - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_simple_pox_4_lock(&alice, &mut peer, 1024 * POX_THRESHOLD_STEPS_USTX, 6); @@ -9377,7 +9566,7 @@ fn missed_slots_no_unlock() { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -9578,12 +9767,13 @@ fn no_lockups_2_5() { + 1; // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch25].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch25].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); @@ -9618,7 +9808,7 @@ fn no_lockups_2_5() { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -9722,36 +9912,43 @@ fn test_scenario_five(use_nakamoto: bool) { let carl_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(carl_lock_period) as u64) as u128; let frank_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(frank_lock_period) as u64) as u128; let grace_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(grace_lock_period) as u64) as u128; let heidi_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(heidi_lock_period) as u64) as u128; let ivan_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(ivan_lock_period) as u64) as u128; let jude_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(jude_lock_period) as u64) as u128; let mallory_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(mallory_lock_period) as u64) as u128; @@ -9964,15 +10161,22 @@ fn test_scenario_five(use_nakamoto: bool) { // Advance to reward set calculation of the next reward cycle let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); info!( "Scenario five: submitting stacking txs."; "target_height" => target_height, "next_reward_cycle" => next_reward_cycle, - "prepare_length" => peer_config.burnchain.pox_constants.prepare_length, + "prepare_length" => peer_config.chain_config.burnchain.pox_constants.prepare_length, ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, @@ -10031,7 +10235,11 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10039,7 +10247,11 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10047,7 +10259,11 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10058,6 +10274,7 @@ fn test_scenario_five(use_nakamoto: bool) { // Mine vote txs & advance to the reward set calculation of the next reward cycle let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); info!( @@ -10088,7 +10305,10 @@ fn test_scenario_five(use_nakamoto: bool) { } let approved_key = get_approved_aggregate_key(&mut peer, &latest_block, next_reward_cycle) .expect("No approved key found"); - assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + assert_eq!( + approved_key, + peer_config.chain_config.aggregate_public_key.unwrap() + ); // Stack for following reward cycle again and then advance to epoch 3.0 activation boundary let reward_cycle = peer.get_reward_cycle() as u128; @@ -10167,9 +10387,16 @@ fn test_scenario_five(use_nakamoto: bool) { let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); info!( "Scenario five: submitting extend and aggregate commit txs. Target height: {}", @@ -10225,8 +10452,9 @@ fn test_scenario_five(use_nakamoto: bool) { let cycle_id = next_reward_cycle; // Generate next cycle aggregate public key - peer_config.aggregate_public_key = Some( + peer_config.chain_config.aggregate_public_key = Some( peer_config + .chain_config .test_signers .unwrap() .generate_aggregate_key(cycle_id as u64), @@ -10239,7 +10467,11 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10247,7 +10479,11 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10255,7 +10491,11 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10266,6 +10506,7 @@ fn test_scenario_five(use_nakamoto: bool) { let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); // Submit vote transactions @@ -10297,7 +10538,10 @@ fn test_scenario_five(use_nakamoto: bool) { } let approved_key = get_approved_aggregate_key(&mut peer, &latest_block, next_reward_cycle) .expect("No approved key found"); - assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + assert_eq!( + approved_key, + peer_config.chain_config.aggregate_public_key.unwrap() + ); // Let us start stacking for the following reward cycle let current_reward_cycle = peer.get_reward_cycle() as u128; @@ -10378,9 +10622,16 @@ fn test_scenario_five(use_nakamoto: bool) { let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); // This assertion just makes testing logic a bit easier let davids_stackers = &[(grace, grace_lock_period), (heidi, heidi_lock_period)]; diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index 2418e75d5c..7cb414c7aa 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -350,8 +350,8 @@ pub fn prepare_signers_test<'a>( tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, @@ -409,8 +409,8 @@ fn advance_blocks( tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let recipient_addr = boot_code_addr(false); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 2b8373936e..f5d62cd509 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -412,7 +412,7 @@ impl StacksChainState { block_path.push(to_hex(&block_hash_bytes[0..2])); block_path.push(to_hex(&block_hash_bytes[2..4])); - block_path.push(format!("{}", index_block_hash)); + block_path.push(index_block_hash.to_string()); block_path } @@ -9746,7 +9746,7 @@ pub mod test { assert_eq!(mblock_info.len(), mblocks.len()); let this_mblock_info = &mblock_info[i]; - test_debug!("Pass {} (seq {})", &i, &this_mblock_info.sequence); + test_debug!("Pass {i} (seq {})", &this_mblock_info.sequence); assert_eq!(this_mblock_info.consensus_hash, consensus_hash); assert_eq!(this_mblock_info.anchored_block_hash, block.block_hash()); @@ -9799,7 +9799,7 @@ pub mod test { let mut parent_consensus_hashes = vec![]; for i in 0..32 { - test_debug!("Making block {}", i); + test_debug!("Making block {i}"); let privk = StacksPrivateKey::random(); let block = make_empty_coinbase_block(&privk); @@ -9814,7 +9814,7 @@ pub mod test { } for i in 0..blocks.len() { - test_debug!("Making microblock stream {}", i); + test_debug!("Making microblock stream {i}"); // make a sample microblock stream for block i let mut mblocks = make_sample_microblock_stream(&privks[i], &blocks[i].block_hash()); mblocks.truncate(3); @@ -9852,7 +9852,7 @@ pub mod test { .zip(µblocks) .enumerate() { - test_debug!("Store microblock stream {} to staging", i); + test_debug!("Store microblock stream {i} to staging"); for mblock in mblocks.iter() { test_debug!("Store microblock {}", &mblock.block_hash()); store_staging_microblock( @@ -10025,27 +10025,29 @@ pub mod test { .unwrap(); let initial_balance = 1000000000; - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; let recv_addr = StacksAddress::from_string("ST1H1B54MY50RMBRRKS7GV2ZWG79RZ1RQ1ETW4E01").unwrap(); let mut peer = TestPeer::new(peer_config.clone()); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); // NOTE: first_stacks_block_height is the burnchain height at which the node starts mining. // The burnchain block at this height will have the VRF key register, but no block-commit. // The first burnchain block with a Stacks block is at first_stacks_block_height + 1. let (first_stacks_block_height, canonical_sort_id) = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); (sn.block_height, sn.sortition_id) }; let mut header_hashes = vec![]; for i in 0..(first_stacks_block_height + 1) { - let ic = peer.sortdb.as_ref().unwrap().index_conn(); + let ic = peer.chain.sortdb.as_ref().unwrap().index_conn(); let sn = SortitionDB::get_ancestor_snapshot(&ic, i, &canonical_sort_id) .unwrap() .unwrap(); @@ -10060,16 +10062,22 @@ pub mod test { } let last_stacks_block_height = first_stacks_block_height - + ((peer_config.burnchain.pox_constants.reward_cycle_length as u64) * 5) + + ((peer_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length as u64) + * 5) + 2; let mut mblock_nonce = 0; // make some blocks, up to and including a fractional reward cycle for tenure_id in 0..(last_stacks_block_height - first_stacks_block_height) { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!(tip.block_height, first_stacks_block_height + tenure_id); @@ -10122,7 +10130,7 @@ pub mod test { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &peer_config.burnchain, + &peer_config.chain_config.burnchain, ) .unwrap(); @@ -10181,6 +10189,7 @@ pub mod test { } let total_reward_cycles = peer_config + .chain_config .burnchain .block_height_to_reward_cycle(last_stacks_block_height) .unwrap(); @@ -10197,14 +10206,20 @@ pub mod test { // everything is stored, so check each reward cycle for i in 0..total_reward_cycles { - let start_range = peer_config.burnchain.reward_cycle_to_block_height(i); + let start_range = peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i); let end_range = cmp::min( header_hashes.len() as u64, - peer_config.burnchain.reward_cycle_to_block_height(i + 1), + peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i + 1), ); let blocks_inv = chainstate .get_blocks_inventory_for_reward_cycle( - &peer_config.burnchain, + &peer_config.chain_config.burnchain, i, &header_hashes[(start_range as usize)..(end_range as usize)], ) @@ -10248,10 +10263,16 @@ pub mod test { // orphan blocks for i in 0..total_reward_cycles { - let start_range = peer_config.burnchain.reward_cycle_to_block_height(i); + let start_range = peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i); let end_range = cmp::min( header_hashes.len() as u64, - peer_config.burnchain.reward_cycle_to_block_height(i + 1), + peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i + 1), ); for block_height in start_range..end_range { if let Some(hdr_hash) = &header_hashes[block_height as usize].1 { @@ -10272,14 +10293,20 @@ pub mod test { } for i in 0..total_reward_cycles { - let start_range = peer_config.burnchain.reward_cycle_to_block_height(i); + let start_range = peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i); let end_range = cmp::min( header_hashes.len() as u64, - peer_config.burnchain.reward_cycle_to_block_height(i + 1), + peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i + 1), ); let blocks_inv = chainstate .get_blocks_inventory_for_reward_cycle( - &peer_config.burnchain, + &peer_config.chain_config.burnchain, i, &header_hashes[(start_range as usize)..(end_range as usize)], ) @@ -10302,25 +10329,27 @@ pub mod test { #[test] fn test_get_parent_block_header() { let peer_config = TestPeerConfig::new(function_name!(), 21313, 21314); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; let mut last_block_ch: Option = None; let mut last_parent_opt: Option = None; for tenure_id in 0..num_blocks { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -10387,7 +10416,7 @@ pub mod test { if tenure_id == 0 { let parent_header_opt = StacksChainState::load_parent_block_header( - &peer.sortdb.as_ref().unwrap().index_conn(), + &peer.chain.sortdb.as_ref().unwrap().index_conn(), &blocks_path, &consensus_hash, &stacks_block.block_hash(), @@ -10395,7 +10424,7 @@ pub mod test { assert!(parent_header_opt.is_err()); } else { let parent_header_opt = StacksChainState::load_parent_block_header( - &peer.sortdb.as_ref().unwrap().index_conn(), + &peer.chain.sortdb.as_ref().unwrap().index_conn(), &blocks_path, &consensus_hash, &stacks_block.block_hash(), @@ -10844,31 +10873,37 @@ pub mod test { .map(|addr| (addr.to_account_principal(), initial_balance)) .collect(); init_balances.push((addr.to_account_principal(), initial_balance)); - peer_config.initial_balances = init_balances; + peer_config.chain_config.initial_balances = init_balances; let mut epochs = StacksEpoch::unit_test_2_1(0); let last_epoch = epochs.last_mut().unwrap(); last_epoch.block_limit.runtime = 10_000_000; - peer_config.epochs = Some(epochs); - peer_config.burnchain.pox_constants.v1_unlock_height = 26; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config + .chain_config + .burnchain + .pox_constants + .v1_unlock_height = 26; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; let mut last_block_id = StacksBlockId([0x00; 32]); for tenure_id in 0..num_blocks { let del_addr = &del_addrs[tenure_id]; - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -11024,11 +11059,12 @@ pub mod test { ); } - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); { let chainstate = peer.chainstate(); let (mut chainstate_tx, clarity_instance) = @@ -11061,12 +11097,12 @@ pub mod test { assert_eq!(transfer_stx_ops, expected_transfer_ops); assert_eq!(delegate_stx_ops, expected_del_ops); } - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); } // all burnchain transactions mined, even if there was no sortition in the burn block in // which they were mined. - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions let stacks_tip = @@ -11085,7 +11121,7 @@ pub mod test { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); assert_eq!( account.stx_balance.get_total_balance().unwrap(), @@ -11166,32 +11202,38 @@ pub mod test { .map(|addr| (addr.to_account_principal(), initial_balance)) .collect(); init_balances.push((addr.to_account_principal(), initial_balance)); - peer_config.initial_balances = init_balances; + peer_config.chain_config.initial_balances = init_balances; let mut epochs = StacksEpoch::unit_test_2_1(0); let last_epoch = epochs.last_mut().unwrap(); last_epoch.block_limit.runtime = 10_000_000; last_epoch.block_limit.read_length = 10_000_000; - peer_config.epochs = Some(epochs); - peer_config.burnchain.pox_constants.v1_unlock_height = 26; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config + .chain_config + .burnchain + .pox_constants + .v1_unlock_height = 26; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; let mut last_block_id = StacksBlockId([0x00; 32]); for tenure_id in 0..num_blocks { let del_addr = &del_addrs[tenure_id]; - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -11704,11 +11746,12 @@ pub mod test { ); } - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); { let chainstate = peer.chainstate(); let (mut chainstate_tx, clarity_instance) = @@ -11741,12 +11784,12 @@ pub mod test { assert_eq!(transfer_stx_ops, expected_transfer_ops); assert_eq!(delegate_stx_ops, expected_delegate_ops); } - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); } // all burnchain transactions mined, even if there was no sortition in the burn block in // which they were mined. - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions let stacks_tip = @@ -11768,7 +11811,7 @@ pub mod test { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); // skipped tenure 6's TransferSTX assert_eq!( diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 3cef8148d6..08eb119f3f 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -652,18 +652,20 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7000, 7001); - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -674,9 +676,10 @@ mod test { Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -754,7 +757,7 @@ mod test { // build 1-block microblock stream let microblocks = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -813,7 +816,7 @@ mod test { microblock }; - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); vec![microblock] }; @@ -829,7 +832,7 @@ mod test { } // process microblock stream to generate unconfirmed state - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -848,14 +851,14 @@ mod test { }) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // move 1 stx per round assert_eq!(recv_balance.amount_unlocked(), (tenure_id + 1) as u128); let (canonical_burn, canonical_block) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -869,7 +872,7 @@ mod test { }) }) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert_eq!(confirmed_recv_balance.amount_unlocked(), tenure_id as u128); eprintln!("\nrecv_balance: {}\nconfirmed_recv_balance: {}\nblock header {}: {:?}\ntip: {}/{}\n", recv_balance.amount_unlocked(), confirmed_recv_balance.amount_unlocked(), &stacks_block.block_hash(), &stacks_block.header, &canonical_burn, &canonical_block); @@ -889,18 +892,20 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7002, 7003); - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); tip.block_height }; @@ -911,9 +916,10 @@ mod test { Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -990,7 +996,7 @@ mod test { StacksAddress::from_string("ST1H1B54MY50RMBRRKS7GV2ZWG79RZ1RQ1ETW4E01").unwrap(); // build microblock stream iteratively, and test balances at each additional microblock - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let microblocks = { let sort_iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) @@ -1055,7 +1061,7 @@ mod test { } microblocks }; - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // store microblock stream for (i, mblock) in microblocks.into_iter().enumerate() { @@ -1068,7 +1074,7 @@ mod test { .unwrap(); // process microblock stream to generate unconfirmed state - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); peer.chainstate() .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); @@ -1087,7 +1093,7 @@ mod test { ) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // move 100 ustx per round -- 10 per mblock assert_eq!( @@ -1097,7 +1103,7 @@ mod test { let (canonical_burn, canonical_block) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let confirmed_recv_balance = peer .chainstate() .with_read_only_clarity_tx( @@ -1112,7 +1118,7 @@ mod test { }, ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert_eq!( confirmed_recv_balance.amount_unlocked(), @@ -1136,25 +1142,27 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7004, 7005); - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - peer_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; + peer_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: (i64::MAX) as u64, block_limit: BLOCK_LIMIT_MAINNET_20, network_epoch: PEER_VERSION_EPOCH_2_0, }])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 5; let num_microblocks = 3; let first_stacks_block_height = { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); tip.block_height }; @@ -1170,9 +1178,10 @@ mod test { Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -1284,8 +1293,8 @@ mod test { &stacks_block.block_hash(), ); - let mut sortdb = peer.sortdb.take().unwrap(); - let mut inner_node = peer.stacks_node.take().unwrap(); + let mut sortdb = peer.chain.sortdb.take().unwrap(); + let mut inner_node = peer.chain.stacks_node.take().unwrap(); for i in 0..num_microblocks { Relayer::refresh_unconfirmed(&mut inner_node.chainstate, &mut sortdb); @@ -1368,8 +1377,8 @@ mod test { .unwrap(); } - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(inner_node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(inner_node); } let (consensus_hash, canonical_block) = @@ -1378,7 +1387,7 @@ mod test { StacksBlockHeader::make_index_block_hash(&consensus_hash, &canonical_block); // process microblock stream to generate unconfirmed state - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -1397,7 +1406,7 @@ mod test { }) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // all valid txs were processed assert_eq!(db_recv_balance.amount_unlocked(), recv_balance); diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 73e3072a07..b81e77ccb1 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -61,7 +61,7 @@ fn test_bad_microblock_fees_pre_v210() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; @@ -95,8 +95,8 @@ fn test_bad_microblock_fees_pre_v210() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + let burnchain = peer_config.chain_config.burnchain.clone(); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -109,11 +109,12 @@ fn test_bad_microblock_fees_pre_v210() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -123,8 +124,9 @@ fn test_bad_microblock_fees_pre_v210() { let mut block_ids = vec![]; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -377,11 +379,11 @@ fn test_bad_microblock_fees_fix_transition() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2020, 2021); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -419,7 +421,7 @@ fn test_bad_microblock_fees_fix_transition() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -432,11 +434,12 @@ fn test_bad_microblock_fees_fix_transition() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -446,8 +449,9 @@ fn test_bad_microblock_fees_fix_transition() { let mut block_ids = vec![]; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -734,11 +738,11 @@ fn test_get_block_info_v210() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2022, 2023); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -776,7 +780,7 @@ fn test_get_block_info_v210() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -789,11 +793,12 @@ fn test_get_block_info_v210() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -802,8 +807,9 @@ fn test_get_block_info_v210() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -996,7 +1002,7 @@ fn test_get_block_info_v210() { } for i in 0..num_blocks { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -1073,7 +1079,7 @@ fn test_get_block_info_v210() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); } } @@ -1105,11 +1111,11 @@ fn test_get_block_info_v210_no_microblocks() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2022, 2023); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -1147,7 +1153,7 @@ fn test_get_block_info_v210_no_microblocks() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -1160,11 +1166,12 @@ fn test_get_block_info_v210_no_microblocks() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -1173,8 +1180,9 @@ fn test_get_block_info_v210_no_microblocks() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1299,7 +1307,7 @@ fn test_get_block_info_v210_no_microblocks() { } for i in 0..num_blocks { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -1364,7 +1372,7 @@ fn test_get_block_info_v210_no_microblocks() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); } } @@ -1425,7 +1433,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { 2024, 2025, ); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; @@ -1466,8 +1474,8 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + let burnchain = peer_config.chain_config.burnchain.clone(); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -1480,11 +1488,12 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -1499,8 +1508,9 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1764,7 +1774,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { let mut recipient_total_reward = 0; for i in 0..num_blocks { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -1867,11 +1877,11 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); } // finally, verify that the alt. recipient got all the coinbases except the first one - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 6e49fcf876..fb7434d78a 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -58,23 +58,25 @@ use crate::cost_estimates::UnitEstimator; #[test] fn test_build_anchored_blocks_empty() { let peer_config = TestPeerConfig::new(function_name!(), 2000, 2001); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; let mut last_block: Option = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); assert_eq!( tip.block_height, @@ -157,17 +159,18 @@ fn test_build_anchored_blocks_stx_transfers_single() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2002, 2003); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -177,8 +180,9 @@ fn test_build_anchored_blocks_stx_transfers_single() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -290,17 +294,18 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2022, 2023); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -310,8 +315,9 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -426,16 +432,17 @@ fn test_build_anchored_blocks_stx_transfers_multi() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2004, 2005); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -445,8 +452,9 @@ fn test_build_anchored_blocks_stx_transfers_multi() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -586,8 +594,8 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2016, 2017); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -618,7 +626,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; @@ -630,11 +638,12 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -644,8 +653,9 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -822,8 +832,8 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -854,7 +864,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; @@ -866,11 +876,12 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -882,8 +893,9 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1106,7 +1118,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { } last_block_ch = Some( - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) .unwrap() .consensus_hash, ); @@ -1163,8 +1175,8 @@ fn test_build_anchored_blocks_skip_too_expensive() { initial_balances.push((addr_extra.to_account_principal(), 200000000000)); let mut peer_config = TestPeerConfig::new(function_name!(), 2006, 2007); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: i64::MAX as u64, @@ -1179,15 +1191,16 @@ fn test_build_anchored_blocks_skip_too_expensive() { }, network_epoch: PEER_VERSION_EPOCH_2_0, }])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -1197,8 +1210,9 @@ fn test_build_anchored_blocks_skip_too_expensive() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1238,7 +1252,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { if tenure_id > 0 { let mut expensive_part = vec![]; for i in 0..100 { - expensive_part.push(format!("(define-data-var var-{} int 0)", i)); + expensive_part.push(format!("(define-data-var var-{i} int 0)")); } let contract = format!( "{} @@ -1365,18 +1379,18 @@ fn test_build_anchored_blocks_mempool_fee_transaction_too_low() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1485,18 +1499,18 @@ fn test_build_anchored_blocks_zero_fee_transaction() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1590,12 +1604,12 @@ fn test_build_anchored_blocks_multiple_chaintips() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2008, 2009); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); // make a blank chainstate and mempool so we can mine empty blocks // without punishing the correspondingly "too expensive" transactions @@ -1603,15 +1617,17 @@ fn test_build_anchored_blocks_multiple_chaintips() { let mut blank_mempool = MemPoolDB::open_test(false, 1, &blank_chainstate.root_path).unwrap(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1734,23 +1750,25 @@ fn test_build_anchored_blocks_empty_chaintips() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2010, 2011); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1874,23 +1892,25 @@ fn test_build_anchored_blocks_too_expensive_transactions() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2013, 2014); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2026,15 +2046,16 @@ fn test_build_anchored_blocks_too_expensive_transactions() { #[test] fn test_build_anchored_blocks_invalid() { let peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -2058,7 +2079,7 @@ fn test_build_anchored_blocks_invalid() { for tenure_id in 0..num_blocks { // send transactions to the mempool let mut tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) .unwrap(); if tenure_id == bad_block_ancestor_tenure { @@ -2233,24 +2254,26 @@ fn test_build_anchored_blocks_bad_nonces() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2012, 2013); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { - eprintln!("Start tenure {:?}", tenure_id); + eprintln!("Start tenure {tenure_id:?}"); // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2482,16 +2505,17 @@ fn test_build_microblock_stream_forks() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -2500,8 +2524,9 @@ fn test_build_microblock_stream_forks() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2780,16 +2805,17 @@ fn test_build_microblock_stream_forks_with_descendants() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -2808,8 +2834,9 @@ fn test_build_microblock_stream_forks_with_descendants() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (mut burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3027,7 +3054,7 @@ fn test_build_microblock_stream_forks_with_descendants() { let mblock_pubkey_hash = Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privks[tenure_id])); - test_debug!("Produce tenure {} block off of {}/{}", tenure_id, &parent_consensus_hash, &parent_header_hash); + test_debug!("Produce tenure {tenure_id} block off of {parent_consensus_hash}/{parent_header_hash}"); // force tenures 2 and 3 to mine off of forked siblings deeper than the // detected fork @@ -3249,11 +3276,11 @@ fn test_contract_call_across_clarity_versions() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2024, 2025); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -3285,16 +3312,17 @@ fn test_contract_call_across_clarity_versions() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -3303,8 +3331,9 @@ fn test_contract_call_across_clarity_versions() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -3706,7 +3735,7 @@ fn test_contract_call_across_clarity_versions() { // all contracts deployed and called the right number of times, indicating that // cross-clarity-version contract calls are doable - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -3819,8 +3848,8 @@ fn test_is_tx_problematic() { initial_balances.push((addr_extra.to_account_principal(), 200000000000)); let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[ + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -3836,15 +3865,16 @@ fn test_is_tx_problematic() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -3854,8 +3884,9 @@ fn test_is_tx_problematic() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4292,8 +4323,8 @@ fn mempool_incorporate_pox_unlocks() { let principal = PrincipalData::from(addr.clone()); let mut peer_config = TestPeerConfig::new(function_name!(), 2020, 2021); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[ + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -4316,22 +4347,29 @@ fn mempool_incorporate_pox_unlocks() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ])); - peer_config.burnchain.pox_constants.v1_unlock_height = - peer_config.epochs.as_ref().unwrap()[StacksEpochId::Epoch2_05].end_height as u32 + 1; - let pox_constants = peer_config.burnchain.pox_constants.clone(); - let burnchain = peer_config.burnchain.clone(); + peer_config + .chain_config + .burnchain + .pox_constants + .v1_unlock_height = peer_config.chain_config.epochs.as_ref().unwrap() + [StacksEpochId::Epoch2_05] + .end_height as u32 + + 1; + let pox_constants = peer_config.chain_config.burnchain.pox_constants.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; - let first_block_height = peer.sortdb.as_ref().unwrap().first_block_height; + let first_block_height = peer.chain.sortdb.as_ref().unwrap().first_block_height; let first_pox_cycle = pox_constants .block_height_to_reward_cycle(first_block_height, first_stacks_block_height) .unwrap(); @@ -4355,8 +4393,9 @@ fn mempool_incorporate_pox_unlocks() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4527,16 +4566,17 @@ fn test_fee_order_mismatch_nonce_order() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2002, 2003); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -4545,8 +4585,8 @@ fn test_fee_order_mismatch_nonce_order() { let sender_nonce = 0; // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4714,9 +4754,10 @@ fn paramaterized_mempool_walk_test( ); let mut peer_config = TestPeerConfig::new(&test_name, 2002, 2003); - peer_config.initial_balances = vec![]; + peer_config.chain_config.initial_balances = vec![]; for (privk, addr) in &key_address_pairs { peer_config + .chain_config .initial_balances .push((addr.to_account_principal(), 1000000000)); } @@ -4895,9 +4936,10 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { let test_name = function_name!(); let mut peer_config = TestPeerConfig::new(&test_name, 0, 0); - peer_config.initial_balances = vec![]; + peer_config.chain_config.initial_balances = vec![]; for (privk, addr) in &key_address_pairs { peer_config + .chain_config .initial_balances .push((addr.to_account_principal(), 1000000000)); } @@ -5145,15 +5187,15 @@ fn run_mempool_walk_strategy_nonce_order_test( .collect(); let mut peer_config = TestPeerConfig::new(test_name, 2030, 2031); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = initial_balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -5198,7 +5240,7 @@ fn run_mempool_walk_strategy_nonce_order_test( &privk, tx_nonce, 200 * (tx_nonce + 1), // Higher nonce = higher fee - &format!("contract-{}", tx_nonce), + &format!("contract-{tx_nonce}"), contract, ) }) diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index 3d4037aeaf..7b2dcfdb5c 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -16,7 +16,9 @@ pub mod consensus; use std::fs; -use clarity::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use clarity::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, +}; use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; @@ -33,6 +35,7 @@ use stacks_common::util::vrf::*; use self::nakamoto::test_signers::TestSigners; use super::*; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::bitcoin::spv::BITCOIN_GENESIS_BLOCK_HASH_REGTEST; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::tests::*; use crate::burnchains::*; @@ -40,19 +43,23 @@ use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::coordinator::tests::*; -use crate::chainstate::coordinator::*; +use crate::chainstate::coordinator::{Error as CoordinatorError, *}; use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::{get_nakamoto_parent, TestStacker}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, StacksDBIndexed}; use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::test::get_parent_tip; +use crate::chainstate::stacks::boot::test::{get_parent_tip, make_pox_4_lockup_chain_id}; use crate::chainstate::stacks::db::{StacksChainState, *}; use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::{Error as ChainstateError, StacksMicroblockHeader, *}; use crate::core::{EpochList, StacksEpoch, StacksEpochExtension, BOOT_BLOCK_HASH}; -use crate::net::test::{TestEventObserver, TestPeerConfig}; +use crate::net::relay::Relayer; +use crate::net::test::TestEventObserver; use crate::util_lib::boot::{boot_code_test_addr, boot_code_tx_auth}; +use crate::util_lib::signed_structured_data::pox4::{ + make_pox_4_signer_key_signature, Pox4SignatureTopic, +}; use crate::util_lib::strings::*; // describes a chainstate's initial configuration @@ -77,10 +84,38 @@ pub struct TestChainstateConfig { impl Default for TestChainstateConfig { fn default() -> Self { - let chain_config = TestPeerConfig::default(); - Self::from(chain_config) + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_GENESIS_BLOCK_HASH_REGTEST).unwrap(), + ); + + burnchain.pox_constants = PoxConstants::test_20_no_sunset(); + let mut spending_account = TestMinerFactory::new().next_miner( + burnchain.clone(), + 1, + 1, + AddressHashMode::SerializeP2PKH, + ); + spending_account.test_with_tx_fees = false; // manually set transaction fees + + Self { + network_id: 0x80000000, + current_block: (burnchain.consensus_hash_lifetime + 1) as u64, + burnchain, + test_name: "".into(), + initial_balances: vec![], + initial_lockups: vec![], + spending_account, + setup_code: "".into(), + epochs: None, + aggregate_public_key: None, + test_stackers: None, + test_signers: None, + txindex: false, + } } } + impl TestChainstateConfig { pub fn new(test_name: &str) -> Self { Self { @@ -89,12 +124,12 @@ impl TestChainstateConfig { } } } + pub struct TestChainstate<'a> { pub config: TestChainstateConfig, pub sortdb: Option, pub miner: TestMiner, pub stacks_node: Option, - pub chainstate_path: String, pub indexer: Option, pub coord: ChainsCoordinator< 'a, @@ -109,27 +144,10 @@ pub struct TestChainstate<'a> { /// list of malleablized blocks produced when mining. pub malleablized_blocks: Vec, pub mine_malleablized_blocks: bool, + pub test_path: String, + pub chainstate_path: String, } -impl From for TestChainstateConfig { - fn from(chain_config: TestPeerConfig) -> Self { - Self { - network_id: chain_config.network_id, - current_block: chain_config.current_block, - burnchain: chain_config.burnchain, - test_name: chain_config.test_name, - initial_balances: chain_config.initial_balances, - initial_lockups: chain_config.initial_lockups, - spending_account: chain_config.spending_account, - setup_code: chain_config.setup_code, - epochs: chain_config.epochs, - test_stackers: chain_config.test_stackers, - test_signers: chain_config.test_signers, - aggregate_public_key: chain_config.aggregate_public_key, - txindex: chain_config.txindex, - } - } -} impl<'a> TestChainstate<'a> { pub fn new(config: TestChainstateConfig) -> TestChainstate<'a> { Self::new_with_observer(config, None) @@ -158,7 +176,7 @@ impl<'a> TestChainstate<'a> { mut config: TestChainstateConfig, observer: Option<&'a TestEventObserver>, ) -> TestChainstate<'a> { - let test_path = Self::test_path(&config); + let test_path = Self::make_test_path(&config); let chainstate_path = get_chainstate_path_str(&test_path); let mut miner_factory = TestMinerFactory::new(); miner_factory.chain_id = config.network_id; @@ -335,6 +353,7 @@ impl<'a> TestChainstate<'a> { sortdb: Some(sortdb), miner, stacks_node: Some(stacks_node), + test_path, chainstate_path, coord, indexer: Some(indexer), @@ -344,6 +363,183 @@ impl<'a> TestChainstate<'a> { } } + // Advances a TestChainstate to the Nakamoto epoch + pub fn advance_to_nakamoto_epoch(&mut self, private_key: &StacksPrivateKey, nonce: &mut usize) { + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(private_key)); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); + + let mut sortition_height = self.get_burn_block_height(); + debug!("\n\n======================"); + debug!("PoxConstants = {:#?}", &self.config.burnchain.pox_constants); + debug!("tip = {sortition_height}"); + debug!("========================\n\n"); + + let epoch_25_height = self + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height; + + let epoch_30_height = self + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .unwrap() + .start_height; + + // Advance to just past PoX-4 instantiation + let mut blocks_produced = false; + while sortition_height <= epoch_25_height { + self.tenure_with_txs(&[], nonce); + sortition_height = self.get_burn_block_height(); + blocks_produced = true; + } + + // Ensure at least one block is produced before PoX-4 lockups + if !blocks_produced { + self.tenure_with_txs(&[], nonce); + sortition_height = self.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Make PoX-4 lockups"); + debug!("========================\n\n"); + + let reward_cycle = self + .config + .burnchain + .block_height_to_reward_cycle(sortition_height) + .unwrap(); + + // Create PoX-4 lockup transactions + let stack_txs: Vec<_> = self + .config + .test_stackers + .clone() + .unwrap_or_default() + .iter() + .map(|test_stacker| { + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &test_stacker.signer_private_key, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + self.config.network_id, + 12, + max_amount, + 1, + ) + .unwrap() + .to_rsv(); + make_pox_4_lockup_chain_id( + &test_stacker.stacker_private_key, + 0, + test_stacker.amount, + &pox_addr, + 12, + &StacksPublicKey::from_private(&test_stacker.signer_private_key), + sortition_height + 1, + Some(signature), + max_amount, + 1, + self.config.network_id, + ) + }) + .collect(); + + let stacks_block = self.tenure_with_txs(&stack_txs, nonce); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(stacks_block, stacks_tip); + + debug!("\n\n======================"); + debug!("Advance to the Prepare Phase"); + debug!("========================\n\n"); + + // Advance to the prepare phase + while !self.config.burnchain.is_in_prepare_phase(sortition_height) { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + let stacks_block = self.tenure_with_txs(&[], nonce); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = self.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Advance to Epoch 3.0"); + debug!("========================\n\n"); + + // Advance to Epoch 3.0 + while sortition_height < epoch_30_height - 1 { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + self.tenure_with_txs(&[], nonce); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = self.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Welcome to Nakamoto!"); + debug!("========================\n\n"); + } + + pub fn get_burnchain_db(&self, readwrite: bool) -> BurnchainDB { + BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), readwrite).unwrap() + } + + pub fn get_sortition_at_height(&self, height: u64) -> Option { + let sortdb = self.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + sort_handle.get_block_snapshot_by_height(height).unwrap() + } + + pub fn get_burnchain_block_ops( + &self, + burn_block_hash: &BurnchainHeaderHash, + ) -> Vec { + let burnchain_db = + BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), false).unwrap(); + burnchain_db + .get_burnchain_block_ops(burn_block_hash) + .unwrap() + } + + pub fn get_burnchain_block_ops_at_height( + &self, + height: u64, + ) -> Option> { + let sortdb = self.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + let Some(sn) = sort_handle.get_block_snapshot_by_height(height).unwrap() else { + return None; + }; + Some(self.get_burnchain_block_ops(&sn.burn_header_hash)) + } + pub fn next_burnchain_block( &mut self, blockstack_ops: Vec, @@ -352,6 +548,54 @@ impl<'a> TestChainstate<'a> { (x.0, x.1, x.2) } + pub fn next_burnchain_block_diverge( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, true); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_and_missing_pox_anchor( + &mut self, + blockstack_ops: Vec, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { + self.inner_next_burnchain_block(blockstack_ops, true, true, true, false) + } + + pub fn next_burnchain_block_raw( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_raw_sortition_only( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_raw_and_missing_pox_anchor( + &mut self, + blockstack_ops: Vec, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { + self.inner_next_burnchain_block(blockstack_ops, false, false, true, false) + } + pub fn set_ops_consensus_hash( blockstack_ops: &mut [BlockstackOperationType], ch: &ConsensusHash, @@ -528,6 +772,116 @@ impl<'a> TestChainstate<'a> { ) } + /// Pre-process an epoch 2.x Stacks block. + /// Validate it and store it to staging. + pub fn preprocess_stacks_block(&mut self, block: &StacksBlock) -> Result { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + let res = { + let sn = { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + let sn_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap(); + if sn_opt.is_none() { + return Err(format!( + "No such block in canonical burn fork: {}", + &block.block_hash() + )); + } + sn_opt.unwrap() + }; + + let parent_sn = { + let db_handle = sortdb.index_handle(&sn.sortition_id); + let parent_sn = db_handle + .get_block_snapshot(&sn.parent_burn_header_hash) + .unwrap(); + parent_sn.unwrap() + }; + + let ic = sortdb.index_conn(); + node.chainstate + .preprocess_anchored_block( + &ic, + &sn.consensus_hash, + block, + &parent_sn.consensus_hash, + 5, + ) + .map_err(|e| format!("Failed to preprocess anchored block: {e:?}")) + }; + if res.is_ok() { + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + test_debug!( + "\n\n{:?}: after stacks block {:?}, tip PoX ID is {pox_id:?}\n\n", + &block.block_hash(), + &pox_id + ); + self.coord.handle_new_stacks_block().unwrap(); + } + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + res + } + + /// Preprocess epoch 2.x microblocks. + /// Validate them and store them to staging. + pub fn preprocess_stacks_microblocks( + &mut self, + microblocks: &[StacksMicroblock], + ) -> Result { + assert!(!microblocks.is_empty()); + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + let res = { + let anchor_block_hash = microblocks[0].header.prev_block.clone(); + let sn = { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + let sn_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &anchor_block_hash, + ) + .unwrap(); + if sn_opt.is_none() { + return Err(format!( + "No such block in canonical burn fork: {anchor_block_hash}" + )); + } + sn_opt.unwrap() + }; + + let mut res = Ok(true); + for mblock in microblocks.iter() { + res = node + .chainstate + .preprocess_streamed_microblock(&sn.consensus_hash, &anchor_block_hash, mblock) + .map_err(|e| format!("Failed to preprocess microblock: {e:?}")); + + if res.is_err() { + break; + } + } + res + }; + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + res + } + /// Store the given epoch 2.x Stacks block and microblock to staging, and then try and /// process them. pub fn process_stacks_epoch_at_tip( @@ -554,13 +908,126 @@ impl<'a> TestChainstate<'a> { }; test_debug!( "\n\nafter stacks block {:?}, tip PoX ID is {pox_id:?}\n\n", - block.block_hash() + &block.block_hash() + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + } + + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// using the given sortition DB as well, and then try and process them. + fn inner_process_stacks_epoch_at_tip( + &mut self, + sortdb: &SortitionDB, + node: &mut TestStacksNode, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) -> Result<(), CoordinatorError> { + { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic)?; + node.chainstate + .preprocess_stacks_epoch(&ic, &tip, block, microblocks)?; + } + self.coord.handle_new_stacks_block()?; + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id)?; + sortdb_reader.get_pox_id()? + }; + test_debug!( + "\n\nafter stacks block {:?}, tip PoX ID is {:?}\n\n", + &block.block_hash(), + &pox_id + ); + Ok(()) + } + + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// and then try and process them. + pub fn process_stacks_epoch_at_tip_checked( + &mut self, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) -> Result<(), CoordinatorError> { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + let res = self.inner_process_stacks_epoch_at_tip(&sortdb, &mut node, block, microblocks); + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + res + } + + /// Accept a new Stacks block and microblocks via the relayer, and then try to process + /// them. + pub fn process_stacks_epoch( + &mut self, + block: &StacksBlock, + consensus_hash: &ConsensusHash, + microblocks: &[StacksMicroblock], + ) { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + { + let ic = sortdb.index_conn(); + Relayer::process_new_anchored_block( + &ic, + &mut node.chainstate, + consensus_hash, + block, + 0, + ) + .unwrap(); + + let block_hash = block.block_hash(); + for mblock in microblocks.iter() { + node.chainstate + .preprocess_streamed_microblock(consensus_hash, &block_hash, mblock) + .unwrap(); + } + } + self.coord.handle_new_stacks_block().unwrap(); + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + + test_debug!( + "\n\nafter stacks block {:?}, tip PoX ID is {:?}\n\n", + &block.block_hash(), + &pox_id ); self.sortdb = Some(sortdb); self.stacks_node = Some(node); } + pub fn add_empty_burnchain_block(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { + self.next_burnchain_block(vec![]) + } + + pub fn mine_empty_tenure(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let (burn_ops, ..) = self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let result = self.next_burnchain_block(burn_ops); + // remove the last block commit so that the testpeer doesn't try to build off of this tenure + self.miner.block_commits.pop(); + result + } + + pub fn chainstate(&mut self) -> &mut StacksChainState { + &mut self.stacks_node.as_mut().unwrap().chainstate + } + + pub fn chainstate_ref(&self) -> &StacksChainState { + &self.stacks_node.as_ref().unwrap().chainstate + } + pub fn sortdb(&mut self) -> &mut SortitionDB { self.sortdb.as_mut().unwrap() } @@ -674,7 +1141,7 @@ impl<'a> TestChainstate<'a> { let last_key = stacks_node.get_last_key(&self.miner); let network_id = self.config.network_id; - let chainstate_path = self.chainstate_path.clone(); + let chainstate_path = get_chainstate_path_str(&self.config.test_name); let burn_block_height = burn_block.block_height; let proof = self diff --git a/stackslib/src/clarity_vm/tests/ephemeral.rs b/stackslib/src/clarity_vm/tests/ephemeral.rs index 73b5897212..d3abcf0167 100644 --- a/stackslib/src/clarity_vm/tests/ephemeral.rs +++ b/stackslib/src/clarity_vm/tests/ephemeral.rs @@ -430,8 +430,8 @@ fn test_ephemeral_nakamoto_block_replay_simple() { ); // read out all Nakamoto blocks - let sortdb = peer.sortdb.take().unwrap(); - let mut stacks_node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut stacks_node = peer.chain.stacks_node.take().unwrap(); let naka_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap() @@ -733,8 +733,8 @@ fn test_ephemeral_nakamoto_block_replay_smart_contract() { let (mut peer, _other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); // read out all Nakamoto blocks - let sortdb = peer.sortdb.take().unwrap(); - let mut stacks_node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut stacks_node = peer.chain.stacks_node.take().unwrap(); let naka_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap() diff --git a/stackslib/src/net/api/tests/blockreplay.rs b/stackslib/src/net/api/tests/blockreplay.rs index 31e727727b..6d5fc61875 100644 --- a/stackslib/src/net/api/tests/blockreplay.rs +++ b/stackslib/src/net/api/tests/blockreplay.rs @@ -71,7 +71,7 @@ fn test_block_reply_errors() { let test_observer = TestEventObserver::new(); let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); - let sort_db = rpc_test.peer_1.sortdb.take().unwrap(); + let sort_db = rpc_test.peer_1.chain.sortdb.take().unwrap(); let chainstate = rpc_test.peer_1.chainstate(); let err = handler.block_replay(&sort_db, chainstate).err().unwrap(); diff --git a/stackslib/src/net/api/tests/getblock_v3.rs b/stackslib/src/net/api/tests/getblock_v3.rs index 272a52421d..6c1a37edb6 100644 --- a/stackslib/src/net/api/tests/getblock_v3.rs +++ b/stackslib/src/net/api/tests/getblock_v3.rs @@ -123,11 +123,11 @@ fn test_stream_nakamoto_blocks() { .is_err()); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let ih = sortdb.index_handle(&tip.sortition_id); let nakamoto_tip = ih.get_nakamoto_tip().unwrap().unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); nakamoto_tip }; diff --git a/stackslib/src/net/api/tests/gettenure.rs b/stackslib/src/net/api/tests/gettenure.rs index a4f2652611..96439a4a9f 100644 --- a/stackslib/src/net/api/tests/gettenure.rs +++ b/stackslib/src/net/api/tests/gettenure.rs @@ -127,11 +127,11 @@ fn test_stream_nakamoto_tenure() { .is_err()); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let ih = sortdb.index_handle(&tip.sortition_id); let nakamoto_tip = ih.get_nakamoto_tip().unwrap().unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); nakamoto_tip }; diff --git a/stackslib/src/net/api/tests/gettransaction.rs b/stackslib/src/net/api/tests/gettransaction.rs index 42ea956c60..1020caef1d 100644 --- a/stackslib/src/net/api/tests/gettransaction.rs +++ b/stackslib/src/net/api/tests/gettransaction.rs @@ -111,7 +111,7 @@ fn test_try_make_response() { dummy_tip.0[0] = dummy_tip.0[0].wrapping_add(1); let peer = &rpc_test.peer_1; - let sortdb = peer.sortdb.as_ref().unwrap(); + let sortdb = peer.chain.sortdb.as_ref().unwrap(); let tenure_blocks = rpc_test .peer_1 .chainstate_ref() diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 97a60a6b43..aacc86a0fc 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -313,8 +313,8 @@ impl<'a> TestRPC<'a> { ) .unwrap(); - let mut peer_1_config = TestPeerConfig::new(&format!("{}-peer1", test_name), 0, 0); - let mut peer_2_config = TestPeerConfig::new(&format!("{}-peer2", test_name), 0, 0); + let mut peer_1_config = TestPeerConfig::new(&format!("{test_name}-peer1"), 0, 0); + let mut peer_2_config = TestPeerConfig::new(&format!("{test_name}-peer2"), 0, 0); peer_1_config.private_key = privk1.clone(); peer_2_config.private_key = privk2.clone(); @@ -349,15 +349,17 @@ impl<'a> TestRPC<'a> { StackerDBConfig::noop(), ); - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer_1_config.burnchain.working_dir); - let peer_2_indexer = BitcoinIndexer::new_unit_test(&peer_2_config.burnchain.working_dir); + let peer_1_indexer = + BitcoinIndexer::new_unit_test(&peer_1_config.chain_config.burnchain.working_dir); + let peer_2_indexer = + BitcoinIndexer::new_unit_test(&peer_2_config.chain_config.burnchain.working_dir); - peer_1_config.initial_balances = vec![ + peer_1_config.chain_config.initial_balances = vec![ (addr1.to_account_principal(), 1000000000), (addr2.to_account_principal(), 1000000000), ]; - peer_2_config.initial_balances = vec![ + peer_2_config.chain_config.initial_balances = vec![ (addr1.to_account_principal(), 1000000000), (addr2.to_account_principal(), 1000000000), ]; @@ -365,7 +367,7 @@ impl<'a> TestRPC<'a> { peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let burnchain = peer_1_config.burnchain.clone(); + let burnchain = peer_1_config.chain_config.burnchain.clone(); with_peer_1_config(&mut peer_1_config); with_peer_2_config(&mut peer_2_config); @@ -482,8 +484,9 @@ impl<'a> TestRPC<'a> { tx.commit().unwrap(); } - let tip = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let mut anchor_cost = ExecutionCost::ZERO; let mut anchor_size = 0; @@ -545,7 +548,7 @@ impl<'a> TestRPC<'a> { // build 1-block microblock stream with the contract-call and the unconfirmed contract let microblock = { - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); let mblock = { let sort_iconn = sortdb.index_handle_at_tip(); @@ -568,7 +571,7 @@ impl<'a> TestRPC<'a> { .unwrap(); microblock }; - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); mblock }; @@ -597,8 +600,8 @@ impl<'a> TestRPC<'a> { .unwrap(); // process microblock stream to generate unconfirmed state - let sortdb1 = peer_1.sortdb.take().unwrap(); - let sortdb2 = peer_2.sortdb.take().unwrap(); + let sortdb1 = peer_1.chain.sortdb.take().unwrap(); + let sortdb2 = peer_2.chain.sortdb.take().unwrap(); peer_1 .chainstate() .reload_unconfirmed_state(&sortdb1.index_handle_at_tip(), canonical_tip.clone()) @@ -607,8 +610,8 @@ impl<'a> TestRPC<'a> { .chainstate() .reload_unconfirmed_state(&sortdb2.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); - peer_1.sortdb = Some(sortdb1); - peer_2.sortdb = Some(sortdb2); + peer_1.chain.sortdb = Some(sortdb1); + peer_2.chain.sortdb = Some(sortdb2); } let mut mempool_txids = vec![]; @@ -684,23 +687,23 @@ impl<'a> TestRPC<'a> { mempool_tx.commit().unwrap(); peer_2.mempool.replace(mempool); - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); let _ = peer_1 .network .refresh_burnchain_view(&peer_1_sortdb, &mut peer_1_stacks_node.chainstate, false) .unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); - let peer_2_sortdb = peer_2.sortdb.take().unwrap(); - let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let peer_2_sortdb = peer_2.chain.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.chain.stacks_node.take().unwrap(); let _ = peer_2 .network .refresh_burnchain_view(&peer_2_sortdb, &mut peer_2_stacks_node.chainstate, false) .unwrap(); - peer_2.sortdb = Some(peer_2_sortdb); - peer_2.stacks_node = Some(peer_2_stacks_node); + peer_2.chain.sortdb = Some(peer_2_sortdb); + peer_2.chain.stacks_node = Some(peer_2_stacks_node); // insert some fake Atlas attachment data let attachment = Attachment { @@ -742,8 +745,9 @@ impl<'a> TestRPC<'a> { .unwrap(); // next tip, coinbase - let tip = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, @@ -903,9 +907,10 @@ impl<'a> TestRPC<'a> { }); let mut other_peer = other_peers.pop().unwrap(); - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer.config.burnchain.working_dir); + let peer_1_indexer = + BitcoinIndexer::new_unit_test(&peer.config.chain_config.burnchain.working_dir); let peer_2_indexer = - BitcoinIndexer::new_unit_test(&other_peer.config.burnchain.working_dir); + BitcoinIndexer::new_unit_test(&other_peer.config.chain_config.burnchain.working_dir); let convo_1 = ConversationHttp::new( format!("127.0.0.1:{}", peer.config.http_port) @@ -931,12 +936,12 @@ impl<'a> TestRPC<'a> { let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); tip }; @@ -944,14 +949,14 @@ impl<'a> TestRPC<'a> { let other_tip = SortitionDB::get_canonical_burn_chain_tip(other_peer.sortdb().conn()).unwrap(); let other_nakamoto_tip = { - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header( other_peer.chainstate().db(), &sortdb, ) .unwrap() .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); tip }; @@ -1006,9 +1011,10 @@ impl<'a> TestRPC<'a> { }); let mut other_peer = other_peers.pop().unwrap(); - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer.config.burnchain.working_dir); + let peer_1_indexer = + BitcoinIndexer::new_unit_test(&peer.config.chain_config.burnchain.working_dir); let peer_2_indexer = - BitcoinIndexer::new_unit_test(&other_peer.config.burnchain.working_dir); + BitcoinIndexer::new_unit_test(&other_peer.config.chain_config.burnchain.working_dir); let convo_1 = ConversationHttp::new( format!("127.0.0.1:{}", peer.config.http_port) @@ -1034,12 +1040,12 @@ impl<'a> TestRPC<'a> { let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); tip }; @@ -1047,14 +1053,14 @@ impl<'a> TestRPC<'a> { let other_tip = SortitionDB::get_canonical_burn_chain_tip(other_peer.sortdb().conn()).unwrap(); let other_nakamoto_tip = { - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header( other_peer.chainstate().db(), &sortdb, ) .unwrap() .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); tip }; @@ -1127,8 +1133,8 @@ impl<'a> TestRPC<'a> { convo_send_recv(&mut convo_1, &mut convo_2); // hack around the borrow-checker - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); if unconfirmed_state { Relayer::setup_unconfirmed_state( @@ -1152,21 +1158,21 @@ impl<'a> TestRPC<'a> { &mut peer_1_mempool, &rpc_args, false, - peer_1.config.txindex, + peer_1.config.chain_config.txindex, ); convo_1.chat(&mut node_state).unwrap(); } - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); peer_1.mempool = Some(peer_1_mempool); peer_2.mempool = Some(peer_2_mempool); debug!("test_rpc: Peer 2 sends to Peer 1"); // hack around the borrow-checker - let peer_2_sortdb = peer_2.sortdb.take().unwrap(); - let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let peer_2_sortdb = peer_2.chain.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.chain.stacks_node.take().unwrap(); let mut peer_2_mempool = peer_2.mempool.take().unwrap(); let _ = peer_2 @@ -1196,13 +1202,13 @@ impl<'a> TestRPC<'a> { &mut peer_2_mempool, &rpc_args, false, - peer_2.config.txindex, + peer_2.config.chain_config.txindex, ); convo_2.chat(&mut node_state).unwrap(); } - peer_2.sortdb = Some(peer_2_sortdb); - peer_2.stacks_node = Some(peer_2_stacks_node); + peer_2.chain.sortdb = Some(peer_2_sortdb); + peer_2.chain.stacks_node = Some(peer_2_stacks_node); peer_2.mempool = Some(peer_2_mempool); convo_send_recv(&mut convo_2, &mut convo_1); @@ -1212,8 +1218,8 @@ impl<'a> TestRPC<'a> { // hack around the borrow-checker convo_send_recv(&mut convo_1, &mut convo_2); - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); let _ = peer_1 .network @@ -1228,15 +1234,15 @@ impl<'a> TestRPC<'a> { .unwrap(); } - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); let resp_opt = loop { debug!("Peer 1 try get response"); convo_send_recv(&mut convo_1, &mut convo_2); { - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); let mut peer_1_mempool = peer_1.mempool.take().unwrap(); let rpc_args = peer_1 @@ -1251,13 +1257,13 @@ impl<'a> TestRPC<'a> { &mut peer_1_mempool, &rpc_args, false, - peer_1.config.txindex, + peer_1.config.chain_config.txindex, ); convo_1.chat(&mut node_state).unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); peer_1.mempool = Some(peer_1_mempool); } diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index efca96863b..381b4b0378 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -234,17 +234,18 @@ fn test_try_make_response() { let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); let mut requests = vec![]; - let tip = - SortitionDB::get_canonical_burn_chain_tip(rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + rpc_test.peer_1.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( - rpc_test.peer_1.sortdb.as_ref().unwrap().conn(), + rpc_test.peer_1.chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let mut good_block = { let chainstate = rpc_test.peer_1.chainstate(); @@ -305,7 +306,11 @@ fn test_try_make_response() { // Increment the timestamp by 1 to ensure it is different from the previous block good_block.header.timestamp += 1; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut good_block); + rpc_test + .peer_1 + .chain + .miner + .sign_nakamoto_block(&mut good_block); // post the valid block proposal let proposal = NakamotoBlockProposal { @@ -329,6 +334,7 @@ fn test_try_make_response() { early_time_block.header.timestamp -= 400; rpc_test .peer_1 + .chain .miner .sign_nakamoto_block(&mut early_time_block); @@ -354,6 +360,7 @@ fn test_try_make_response() { late_time_block.header.timestamp += 20000; rpc_test .peer_1 + .chain .miner .sign_nakamoto_block(&mut late_time_block); @@ -377,7 +384,11 @@ fn test_try_make_response() { // Set the timestamp to a value in the past (BEFORE the timeout) let mut stale_block = good_block.clone(); stale_block.header.timestamp -= 10000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut stale_block); + rpc_test + .peer_1 + .chain + .miner + .sign_nakamoto_block(&mut stale_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { @@ -501,7 +512,7 @@ fn replay_validation_test( let mut requests = vec![]; let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( - rpc_test.peer_1.sortdb.as_ref().unwrap().conn(), + rpc_test.peer_1.chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); @@ -560,6 +571,7 @@ fn replay_validation_test( proposed_block.header.timestamp += 1; rpc_test .peer_1 + .chain .miner .sign_nakamoto_block(&mut proposed_block); @@ -630,7 +642,7 @@ fn replay_validation_test( /// Tx replay test with mismatching mineable transactions. fn replay_validation_test_transaction_mismatch() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Transaction expected in the replay set (different amount) let tx_for_replay = make_stacks_transfer_tx( miner_privk, @@ -671,7 +683,7 @@ fn replay_validation_test_transaction_mismatch() { /// The block has the one mineable tx. fn replay_validation_test_transaction_unmineable_match() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Transaction expected in the replay set (different amount) let unmineable_tx = make_stacks_transfer_tx( miner_privk, @@ -712,7 +724,7 @@ fn replay_validation_test_transaction_unmineable_match() { fn replay_validation_test_transaction_unmineable_match_2() { let mut replay_set = vec![]; let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Unmineable tx let unmineable_tx = make_stacks_transfer_tx( miner_privk, @@ -767,7 +779,7 @@ fn replay_validation_test_transaction_unmineable_match_2() { /// The block has [mineable, mineable, tx_b, mineable] fn replay_validation_test_transaction_mineable_mismatch_series() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Mineable tx let mineable_tx_1 = make_stacks_transfer_tx( miner_privk, @@ -845,7 +857,7 @@ fn replay_validation_test_transaction_mineable_mismatch_series() { /// The block has [mineable, tx_a, tx_b] fn replay_validation_test_transaction_mineable_mismatch_series_2() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let recipient_sk = StacksPrivateKey::random(); let recipient_addr = to_addr(&recipient_sk); @@ -906,7 +918,7 @@ fn replay_validation_test_transaction_mineable_mismatch_series_2() { /// have cost too much to include. fn replay_validation_test_budget_exceeded() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let miner_addr = to_addr(miner_privk); let contract_code = make_big_read_count_contract(BLOCK_LIMIT_MAINNET_21, 50); @@ -984,7 +996,7 @@ fn replay_validation_test_budget_exceeded() { fn replay_validation_test_budget_exhausted() { let mut replay_set = vec![]; let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let miner_addr = to_addr(miner_privk); let contract_code = make_big_read_count_contract(BLOCK_LIMIT_MAINNET_21, 50); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 92d5a5e0f9..519474fb0a 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2235,16 +2235,12 @@ pub mod test { use std::net::*; use std::ops::{Deref, DerefMut}; use std::sync::Mutex; - use std::{fs, io, thread}; + use std::{io, thread}; use clarity::types::sqlite::NO_PARAMS; - use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; use clarity::vm::costs::ExecutionCost; - use clarity::vm::database::STXBalance; use clarity::vm::types::*; - use clarity::vm::ContractName; - use rand::{thread_rng, Rng, RngCore}; - use stacks_common::address::*; + use rand::RngCore; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; use stacks_common::types::StacksEpochId; @@ -2253,19 +2249,15 @@ pub mod test { use stacks_common::util::vrf::*; use {mio, rand}; - use self::nakamoto::test_signers::TestSigners; use super::*; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; - use crate::burnchains::bitcoin::spv::BITCOIN_GENESIS_BLOCK_HASH_REGTEST; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::tests::*; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; - use crate::chainstate::coordinator::tests::*; use crate::chainstate::coordinator::{Error as coordinator_error, *}; - use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::get_parent_tip; use crate::chainstate::stacks::boot::*; @@ -2275,8 +2267,8 @@ pub mod test { use crate::chainstate::stacks::tests::chain_histories::mine_smart_contract_block_contract_call_microblock; use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::{StacksMicroblockHeader, *}; - use crate::chainstate::*; - use crate::core::{EpochList, StacksEpoch, StacksEpochExtension}; + use crate::chainstate::tests::{TestChainstate, TestChainstateConfig}; + use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::cost_estimates::metrics::UnitMetric; use crate::cost_estimates::tests::fee_rate_fuzzer::ConstantFeeEstimator; use crate::cost_estimates::UnitEstimator; @@ -2288,7 +2280,6 @@ pub mod test { use crate::net::relay::*; use crate::net::stackerdb::{StackerDBSync, StackerDBs}; use crate::net::Error as net_error; - use crate::util_lib::boot::{boot_code_test_addr, boot_code_tx_auth}; use crate::util_lib::strings::*; impl StacksMessageCodec for BlockstackOperationType { @@ -2622,14 +2613,12 @@ pub mod test { // describes a peer's initial configuration #[derive(Debug, Clone)] pub struct TestPeerConfig { - pub network_id: u32, + pub chain_config: TestChainstateConfig, pub peer_version: u32, - pub current_block: u64, pub private_key: Secp256k1PrivateKey, pub private_key_expire: u64, pub initial_neighbors: Vec, pub asn4_entries: Vec, - pub burnchain: Burnchain, pub connection_opts: ConnectionOptions, pub server_port: u16, pub http_port: u16, @@ -2638,12 +2627,7 @@ pub mod test { pub allowed: i64, pub denied: i64, pub data_url: UrlString, - pub test_name: String, - pub initial_balances: Vec<(PrincipalData, u64)>, - pub initial_lockups: Vec, - pub spending_account: TestMiner, pub setup_code: String, - pub epochs: Option, /// If some(), TestPeer should check the PoX-2 invariants /// on cycle numbers bounded (inclusive) by the supplied u64s pub check_pox_invariants: Option<(u64, u64)>, @@ -2654,41 +2638,18 @@ pub mod test { pub stacker_db_configs: Vec>, /// What services should this peer support? pub services: u16, - /// aggregate public key to use - /// (NOTE: will be used post-Nakamoto) - pub aggregate_public_key: Option>, - pub test_stackers: Option>, - pub test_signers: Option, - pub txindex: bool, } - impl TestPeerConfig { - pub fn default() -> TestPeerConfig { + impl Default for TestPeerConfig { + fn default() -> Self { let conn_opts = ConnectionOptions::default(); - let start_block = 0; - let mut burnchain = Burnchain::default_unittest( - start_block, - &BurnchainHeaderHash::from_hex(BITCOIN_GENESIS_BLOCK_HASH_REGTEST).unwrap(), - ); - - burnchain.pox_constants = PoxConstants::test_20_no_sunset(); - let mut spending_account = TestMinerFactory::new().next_miner( - burnchain.clone(), - 1, - 1, - AddressHashMode::SerializeP2PKH, - ); - spending_account.test_with_tx_fees = false; // manually set transaction fees - - TestPeerConfig { - network_id: 0x80000000, + Self { + chain_config: TestChainstateConfig::default(), peer_version: 0x01020304, - current_block: start_block + (burnchain.consensus_hash_lifetime + 1) as u64, private_key: Secp256k1PrivateKey::random(), - private_key_expire: start_block + conn_opts.private_key_lifetime, + private_key_expire: conn_opts.private_key_lifetime, initial_neighbors: vec![], asn4_entries: vec![], - burnchain, connection_opts: conn_opts, server_port: 32000, http_port: 32001, @@ -2697,25 +2658,18 @@ pub mod test { allowed: 0, denied: 0, data_url: "".into(), - test_name: "".into(), - initial_balances: vec![], - initial_lockups: vec![], - spending_account, setup_code: "".into(), - epochs: None, check_pox_invariants: None, stacker_db_configs: vec![], stacker_dbs: vec![], services: (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16) | (ServiceFlags::STACKERDB as u16), - aggregate_public_key: None, - test_stackers: None, - test_signers: None, - txindex: false, } } + } + impl TestPeerConfig { pub fn from_port(p: u16) -> TestPeerConfig { let mut config = TestPeerConfig { server_port: p, @@ -2730,7 +2684,7 @@ pub mod test { pub fn new(test_name: &str, p2p_port: u16, rpc_port: u16) -> TestPeerConfig { let mut config = TestPeerConfig { - test_name: test_name.into(), + chain_config: TestChainstateConfig::new(test_name), server_port: p2p_port, http_port: rpc_port, ..TestPeerConfig::default() @@ -2749,7 +2703,7 @@ pub mod test { Neighbor { addr: NeighborKey { peer_version: self.peer_version, - network_id: self.network_id, + network_id: self.chain_config.network_id, addrbytes: PeerAddress([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1, ]), @@ -2818,28 +2772,9 @@ pub mod test { pub struct TestPeer<'a> { pub config: TestPeerConfig, pub network: PeerNetwork, - pub sortdb: Option, - pub miner: TestMiner, - pub stacks_node: Option, pub relayer: Relayer, pub mempool: Option, - pub chainstate_path: String, - pub indexer: Option, - pub coord: ChainsCoordinator< - 'a, - TestEventObserver, - (), - OnChainRewardSetProvider<'a, TestEventObserver>, - (), - (), - BitcoinIndexer, - >, - /// list of malleablized blocks produced when mining. - pub malleablized_blocks: Vec, - pub mine_malleablized_blocks: bool, - /// tenure-start block of tenure to mine on. - /// gets consumed on the call to begin_nakamoto_tenure - pub nakamoto_parent_tenure_opt: Option>, + pub chain: TestChainstate<'a>, /// RPC handler args to use pub rpc_handler_args: Option, } @@ -2849,37 +2784,13 @@ pub mod test { TestPeer::new_with_observer(config, None) } - pub fn test_path(config: &TestPeerConfig) -> String { - let random = thread_rng().gen::(); - let random_bytes = to_hex(&random.to_be_bytes()); - let cleaned_config_test_name = config.test_name.replace("::", "_"); - format!( - "/tmp/stacks-node-tests/units-test-peer/{}-{}", - &cleaned_config_test_name, random_bytes - ) - } - - pub fn stackerdb_path(config: &TestPeerConfig) -> String { - format!("{}/stacker_db.sqlite", &Self::test_path(config)) - } - - pub fn make_test_path(config: &TestPeerConfig) -> String { - let test_path = TestPeer::test_path(config); - if fs::metadata(&test_path).is_ok() { - fs::remove_dir_all(&test_path).unwrap(); - }; - - fs::create_dir_all(&test_path).unwrap(); - test_path - } - fn init_stackerdb_syncs( root_path: &str, peerdb: &PeerDB, stacker_dbs: &mut HashMap, ) -> HashMap)> { - let stackerdb_path = format!("{}/stacker_db.sqlite", root_path); + let stackerdb_path = format!("{root_path}/stacker_db.sqlite"); let mut stacker_db_syncs = HashMap::new(); let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); for (i, (contract_id, db_config)) in stacker_dbs.iter_mut().enumerate() { @@ -2916,16 +2827,16 @@ pub mod test { ) -> TestPeer<'a> { let mut config = self.config.clone(); config.private_key = privkey; - config.test_name = format!( + config.chain_config.test_name = format!( "{}.neighbor-{}", - &self.config.test_name, + &self.config.chain_config.test_name, Hash160::from_node_public_key(&StacksPublicKey::from_private( &self.config.private_key )) ); config.server_port = 0; config.http_port = 0; - config.test_stackers = self.config.test_stackers.clone(); + config.chain_config.test_stackers = self.config.chain_config.test_stackers.clone(); config.initial_neighbors = vec![self.to_neighbor()]; let peer = TestPeer::new_with_observer(config, observer); @@ -2936,54 +2847,19 @@ pub mod test { mut config: TestPeerConfig, observer: Option<&'a TestEventObserver>, ) -> TestPeer<'a> { - let test_path = TestPeer::make_test_path(&config); - let mut miner_factory = TestMinerFactory::new(); - miner_factory.chain_id = config.network_id; - let mut miner = miner_factory.next_miner( - config.burnchain.clone(), - 1, - 1, - AddressHashMode::SerializeP2PKH, - ); - // manually set fees - miner.test_with_tx_fees = false; - - config.burnchain.working_dir = get_burnchain(&test_path, None).working_dir; - - let epochs = config.epochs.clone().unwrap_or_else(|| { - StacksEpoch::unit_test_pre_2_05(config.burnchain.first_block_height) - }); - - let mut sortdb = SortitionDB::connect( - &config.burnchain.get_db_path(), - config.burnchain.first_block_height, - &config.burnchain.first_block_hash, - 0, - &epochs, - config.burnchain.pox_constants.clone(), - None, - true, - ) - .unwrap(); + let mut chain = + TestChainstate::new_with_observer(config.chain_config.clone(), observer); + // Write back the chain config as TestChainstate::new may have made modifications. + config.chain_config = chain.config.clone(); + let test_path = chain.test_path.clone(); - let first_burnchain_block_height = config.burnchain.first_block_height; - let first_burnchain_block_hash = config.burnchain.first_block_hash.clone(); - - let _burnchain_blocks_db = BurnchainDB::connect( - &config.burnchain.get_burnchaindb_path(), - &config.burnchain, - true, - ) - .unwrap(); - - let chainstate_path = get_chainstate_path_str(&test_path); - let peerdb_path = format!("{}/peers.sqlite", &test_path); + let peerdb_path = format!("{test_path}/peers.sqlite"); let mut peerdb = PeerDB::connect( &peerdb_path, true, - config.network_id, - config.burnchain.network_id, + config.chain_config.network_id, + config.chain_config.burnchain.network_id, None, config.private_key_expire, PeerAddress::from_ipv4(127, 0, 0, 1), @@ -3011,138 +2887,9 @@ pub mod test { tx.commit().unwrap(); } - let atlasdb_path = format!("{}/atlas.sqlite", &test_path); + let atlasdb_path = format!("{test_path}/atlas.sqlite"); let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, true).unwrap(); - let agg_pub_key_opt = config.aggregate_public_key.clone(); - - let conf = config.clone(); - let post_flight_callback = move |clarity_tx: &mut ClarityTx| { - let mut receipts = vec![]; - - if let Some(agg_pub_key) = agg_pub_key_opt { - debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); - NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); - } else { - debug!("Not setting aggregate public key"); - } - // add test-specific boot code - if !conf.setup_code.is_empty() { - let receipt = clarity_tx.connection().as_transaction(|clarity| { - let boot_code_addr = boot_code_test_addr(); - let boot_code_account = StacksAccount { - principal: boot_code_addr.to_account_principal(), - nonce: 0, - stx_balance: STXBalance::zero(), - }; - - let boot_code_auth = boot_code_tx_auth(boot_code_addr.clone()); - - debug!( - "Instantiate test-specific boot code contract '{}.{}' ({} bytes)...", - &boot_code_addr.to_string(), - &conf.test_name, - conf.setup_code.len() - ); - - let smart_contract = TransactionPayload::SmartContract( - TransactionSmartContract { - name: ContractName::try_from( - conf.test_name - .replace("::", "-") - .chars() - .skip( - conf.test_name - .len() - .saturating_sub(CONTRACT_MAX_NAME_LENGTH), - ) - .collect::() - .trim_start_matches(|c: char| !c.is_alphabetic()) - .to_string(), - ) - .expect("FATAL: invalid boot-code contract name"), - code_body: StacksString::from_str(&conf.setup_code) - .expect("FATAL: invalid boot code body"), - }, - None, - ); - - let boot_code_smart_contract = StacksTransaction::new( - TransactionVersion::Testnet, - boot_code_auth, - smart_contract, - ); - StacksChainState::process_transaction_payload( - clarity, - &boot_code_smart_contract, - &boot_code_account, - None, - ) - .unwrap() - }); - receipts.push(receipt); - } - debug!("Bootup receipts: {:?}", &receipts); - }; - - let mut boot_data = ChainStateBootData::new( - &config.burnchain, - config.initial_balances.clone(), - Some(Box::new(post_flight_callback)), - ); - - if !config.initial_lockups.is_empty() { - let lockups = config.initial_lockups.clone(); - boot_data.get_bulk_initial_lockups = - Some(Box::new(move || Box::new(lockups.into_iter()))); - } - - let (chainstate, _) = StacksChainState::open_and_exec( - false, - config.network_id, - &chainstate_path, - Some(&mut boot_data), - None, - ) - .unwrap(); - - let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); - let mut coord = ChainsCoordinator::test_new_full( - &config.burnchain, - config.network_id, - &test_path, - OnChainRewardSetProvider(observer), - observer, - indexer, - None, - config.txindex, - ); - coord.handle_new_burnchain_block().unwrap(); - - let mut stacks_node = TestStacksNode::from_chainstate(chainstate); - - { - // pre-populate burnchain, if running on bitcoin - let prev_snapshot = SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(); - let mut fork = TestBurnchainFork::new( - prev_snapshot.block_height, - &prev_snapshot.burn_header_hash, - &prev_snapshot.index_root, - 0, - ); - for i in prev_snapshot.block_height..config.current_block { - let burn_block = { - let ic = sortdb.index_conn(); - let mut burn_block = fork.next_block(&ic); - stacks_node.add_key_register(&mut burn_block, &mut miner); - burn_block - }; - fork.append_block(burn_block); - - fork.mine_pending_blocks_pox(&mut sortdb, &config.burnchain, &mut coord); - } - } - let local_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), config.server_port); let http_local_addr = @@ -3167,11 +2914,16 @@ pub mod test { let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); let burnchain_view = { - let chaintip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - SortitionDB::get_burnchain_view(&sortdb.index_conn(), &config.burnchain, &chaintip) - .unwrap() + let chaintip = + SortitionDB::get_canonical_burn_chain_tip(chain.sortdb().conn()).unwrap(); + SortitionDB::get_burnchain_view( + &chain.sortdb().index_conn(), + &config.chain_config.burnchain, + &chaintip, + ) + .unwrap() }; - let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let stackerdb_path = format!("{test_path}/stacker_db.sqlite"); let mut stacker_dbs_conn = StackerDBs::connect(&stackerdb_path, true).unwrap(); let relayer_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let p2p_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); @@ -3187,10 +2939,11 @@ pub mod test { .unwrap_or(StackerDBConfig::noop()), ); } + let mut stacks_node = chain.stacks_node.take().unwrap(); let mut stackerdb_configs = stacker_dbs_conn .create_or_reconfigure_stackerdbs( &mut stacks_node.chainstate, - &sortdb, + chain.sortdb_ref(), old_stackerdb_configs, &config.connection_opts, ) @@ -3201,7 +2954,15 @@ pub mod test { let stackerdb_contracts: Vec<_> = stacker_db_syncs.keys().cloned().collect(); - let burnchain_db = config.burnchain.open_burnchain_db(false).unwrap(); + let burnchain_db = config + .chain_config + .burnchain + .open_burnchain_db(false) + .unwrap(); + + let epochs = config.chain_config.epochs.clone().unwrap_or_else(|| { + StacksEpoch::unit_test_pre_2_05(config.chain_config.burnchain.first_block_height) + }); let mut peer_network = PeerNetwork::new( peerdb, @@ -3210,7 +2971,7 @@ pub mod test { burnchain_db, local_peer, config.peer_version, - config.burnchain.clone(), + config.chain_config.burnchain.clone(), burnchain_view, config.connection_opts.clone(), stacker_db_syncs, @@ -3220,26 +2981,30 @@ pub mod test { peer_network.bind(&local_addr, &http_local_addr).unwrap(); let relayer = Relayer::from_p2p(&mut peer_network, relayer_stacker_dbs); - let mempool = MemPoolDB::open_test(false, config.network_id, &chainstate_path).unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + let mempool = MemPoolDB::open_test( + false, + config.chain_config.network_id, + &chain.chainstate_path, + ) + .unwrap(); // extract bound ports (which may be different from what's in the config file, if e.g. // they were 0) let p2p_port = peer_network.bound_neighbor_key().port; let http_port = peer_network.http.as_ref().unwrap().http_server_addr.port(); - debug!("Bound to (p2p={}, http={})", p2p_port, http_port); + debug!("Bound to (p2p={p2p_port}, http={http_port})"); config.server_port = p2p_port; config.http_port = http_port; config.data_url = - UrlString::try_from(format!("http://127.0.0.1:{}", http_port).as_str()).unwrap(); + UrlString::try_from(format!("http://127.0.0.1:{http_port}").as_str()).unwrap(); peer_network .peerdb .update_local_peer( - config.network_id, - config.burnchain.network_id, + config.chain_config.network_id, + config.chain_config.burnchain.network_id, config.data_url.clone(), p2p_port, &stackerdb_contracts, @@ -3248,38 +3013,30 @@ pub mod test { let local_peer = PeerDB::get_local_peer(peer_network.peerdb.conn()).unwrap(); debug!( - "{:?}: initial neighbors: {:?}", - &local_peer, &config.initial_neighbors + "{local_peer:?}: initial neighbors: {:?}", + &config.initial_neighbors ); peer_network.local_peer = local_peer; - + chain.stacks_node = Some(stacks_node); TestPeer { config, + chain, network: peer_network, - sortdb: Some(sortdb), - miner, - stacks_node: Some(stacks_node), relayer, mempool: Some(mempool), - chainstate_path, - coord, - indexer: Some(indexer), - malleablized_blocks: vec![], - mine_malleablized_blocks: true, - nakamoto_parent_tenure_opt: None, rpc_handler_args: None, } } pub fn connect_initial(&mut self) -> Result<(), net_error> { let local_peer = PeerDB::get_local_peer(self.network.peerdb.conn()).unwrap(); - let chain_view = match self.sortdb { + let chain_view = match self.chain.sortdb { Some(ref mut sortdb) => { let chaintip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); SortitionDB::get_burnchain_view( &sortdb.index_conn(), - &self.config.burnchain, + &self.config.chain_config.burnchain, &chaintip, ) .unwrap() @@ -3311,7 +3068,7 @@ pub mod test { if bootstrap { PeerDB::set_initial_peer( &tx, - self.config.network_id, + self.config.chain_config.network_id, &n.addr.addrbytes, n.addr.port, ) @@ -3343,8 +3100,8 @@ pub mod test { } pub fn step(&mut self) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let stacks_node = self.chain.stacks_node.take().unwrap(); let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; @@ -3356,12 +3113,12 @@ pub mod test { .map(|hdr| hdr.anchored_header.height()) .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( - &self.config.burnchain, + &self.config.chain_config.burnchain, stacks_tip_height, burn_tip_height, ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.step_with_ibd(ibd) } @@ -3375,10 +3132,10 @@ pub mod test { ibd: bool, dns_client: Option<&mut DNSClient>, ) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); - let indexer = self.indexer.take().unwrap(); + let indexer = self.chain.indexer.take().unwrap(); let rpc_handler_args = self .rpc_handler_args .as_ref() @@ -3401,7 +3158,7 @@ pub mod test { ibd, 100, &rpc_handler_args, - self.config.txindex, + self.config.chain_config.txindex, ); if self.network.get_current_epoch().epoch_id >= StacksEpochId::Epoch30 { @@ -3428,10 +3185,10 @@ pub mod test { assert_eq!(self.network.epoch2_state_machine_passes, epoch2_passes + 1); } - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.mempool = Some(mempool); - self.indexer = Some(indexer); + self.chain.indexer = Some(indexer); ret } @@ -3441,10 +3198,10 @@ pub mod test { dns_client: Option<&mut DNSClient>, ) -> Result<(NetworkResult, ProcessedNetReceipts), net_error> { let net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); - let indexer = self.indexer.take().unwrap(); + let indexer = self.chain.indexer.take().unwrap(); let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), @@ -3458,23 +3215,24 @@ pub mod test { None, ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.mempool = Some(mempool); - self.indexer = Some(indexer); + self.chain.indexer = Some(indexer); - self.coord.handle_new_burnchain_block().unwrap(); - self.coord.handle_new_stacks_block().unwrap(); - self.coord.handle_new_nakamoto_stacks_block().unwrap(); + self.chain.coord.handle_new_burnchain_block().unwrap(); + self.chain.coord.handle_new_stacks_block().unwrap(); + self.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); receipts_res.map(|receipts| (net_result, receipts)) } pub fn step_dns(&mut self, dns_client: &mut DNSClient) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let indexer = + BitcoinIndexer::new_unit_test(&self.config.chain_config.burnchain.working_dir); let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -3487,11 +3245,12 @@ pub mod test { .map(|hdr| hdr.anchored_header.height()) .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( - &self.config.burnchain, + &self.config.chain_config.burnchain, stacks_tip_height, burn_tip_height, ); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let indexer = + BitcoinIndexer::new_unit_test(&self.config.chain_config.burnchain.working_dir); let rpc_handler_args = self .rpc_handler_args .as_ref() @@ -3513,7 +3272,7 @@ pub mod test { ibd, 100, &rpc_handler_args, - self.config.txindex, + self.config.chain_config.txindex, ); if self.network.get_current_epoch().epoch_id >= StacksEpochId::Epoch30 { @@ -3540,29 +3299,30 @@ pub mod test { assert_eq!(self.network.epoch2_state_machine_passes, epoch2_passes + 1); } - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.mempool = Some(mempool); ret } pub fn refresh_burnchain_view(&mut self) { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let indexer = + BitcoinIndexer::new_unit_test(&self.config.chain_config.burnchain.working_dir); self.network .refresh_burnchain_view(&sortdb, &mut stacks_node.chainstate, false) .unwrap(); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); } pub fn refresh_reward_cycles(&mut self) { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let tip_block_id = self.network.stacks_tip.block_id(); @@ -3578,8 +3338,8 @@ pub mod test { ) .unwrap(); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); } pub fn for_each_convo_p2p(&mut self, mut f: F) -> Vec> @@ -3595,14 +3355,15 @@ pub mod test { } pub fn get_burnchain_db(&self, readwrite: bool) -> BurnchainDB { - let burnchain_db = - BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), readwrite) - .unwrap(); - burnchain_db + BurnchainDB::open( + &self.config.chain_config.burnchain.get_burnchaindb_path(), + readwrite, + ) + .unwrap() } pub fn get_sortition_at_height(&self, height: u64) -> Option { - let sortdb = self.sortdb.as_ref().unwrap(); + let sortdb = self.chain.sortdb.as_ref().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let sort_handle = sortdb.index_handle(&tip.sortition_id); sort_handle.get_block_snapshot_by_height(height).unwrap() @@ -3612,8 +3373,11 @@ pub mod test { &self, burn_block_hash: &BurnchainHeaderHash, ) -> Vec { - let burnchain_db = - BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), false).unwrap(); + let burnchain_db = BurnchainDB::open( + &self.config.chain_config.burnchain.get_burnchaindb_path(), + false, + ) + .unwrap(); burnchain_db .get_burnchain_block_ops(burn_block_hash) .unwrap() @@ -3623,7 +3387,7 @@ pub mod test { &self, height: u64, ) -> Option> { - let sortdb = self.sortdb.as_ref().unwrap(); + let sortdb = self.chain.sortdb.as_ref().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let sort_handle = sortdb.index_handle(&tip.sortition_id); let Some(sn) = sort_handle.get_block_snapshot_by_height(height).unwrap() else { @@ -3812,7 +3576,7 @@ pub mod test { ConsensusHash, Option, ) { - let sortdb = self.sortdb.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); let (block_height, block_hash, epoch_id) = { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), tip.block_height + 1) @@ -3825,7 +3589,7 @@ pub mod test { } let block_header = Self::make_next_burnchain_block( - &self.config.burnchain, + &self.config.chain_config.burnchain, tip.block_height, &tip.burn_header_hash, blockstack_ops.len() as u64, @@ -3841,7 +3605,7 @@ pub mod test { if update_burnchain { Self::add_burnchain_block( - &self.config.burnchain, + &self.config.chain_config.burnchain, &block_header, blockstack_ops.clone(), ); @@ -3850,11 +3614,17 @@ pub mod test { }; let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { - self.coord + self.chain + .coord .handle_new_burnchain_block() .unwrap() .into_missing_block_hash() - } else if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { + } else if self + .chain + .coord + .handle_new_nakamoto_burnchain_block() + .unwrap() + { None } else { Some(BlockHeaderHash([0x00; 32])) @@ -3875,7 +3645,7 @@ pub mod test { ); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - self.sortdb = Some(sortdb); + self.chain.sortdb = Some(sortdb); ( block_height, block_hash, @@ -3887,8 +3657,8 @@ pub mod test { /// Pre-process an epoch 2.x Stacks block. /// Validate it and store it to staging. pub fn preprocess_stacks_block(&mut self, block: &StacksBlock) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let res = { let sn = { let ic = sortdb.index_conn(); @@ -3942,11 +3712,11 @@ pub mod test { &block.block_hash(), &pox_id ); - self.coord.handle_new_stacks_block().unwrap(); + self.chain.coord.handle_new_stacks_block().unwrap(); } - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); res } @@ -3957,8 +3727,8 @@ pub mod test { microblocks: &[StacksMicroblock], ) -> Result { assert!(!microblocks.is_empty()); - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let res = { let anchor_block_hash = microblocks[0].header.prev_block.clone(); let sn = { @@ -3997,8 +3767,8 @@ pub mod test { res }; - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); res } @@ -4009,8 +3779,8 @@ pub mod test { block: &StacksBlock, microblocks: &[StacksMicroblock], ) { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); { let ic = sortdb.index_conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); @@ -4018,7 +3788,7 @@ pub mod test { .preprocess_stacks_epoch(&ic, &tip, block, microblocks) .unwrap(); } - self.coord.handle_new_stacks_block().unwrap(); + self.chain.coord.handle_new_stacks_block().unwrap(); let pox_id = { let ic = sortdb.index_conn(); @@ -4033,8 +3803,8 @@ pub mod test { &pox_id ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); } /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, @@ -4052,13 +3822,13 @@ pub mod test { node.chainstate .preprocess_stacks_epoch(&ic, &tip, block, microblocks)?; } - self.coord.handle_new_stacks_block()?; + self.chain.coord.handle_new_stacks_block()?; let pox_id = { let ic = sortdb.index_conn(); let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id)?; - sortdb_reader.get_pox_id()?; + sortdb_reader.get_pox_id()? }; test_debug!( "\n\n{:?}: after stacks block {:?}, tip PoX ID is {:?}\n\n", @@ -4076,12 +3846,12 @@ pub mod test { block: &StacksBlock, microblocks: &[StacksMicroblock], ) -> Result<(), coordinator_error> { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let res = self.inner_process_stacks_epoch_at_tip(&sortdb, &mut node, block, microblocks); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); res } @@ -4093,8 +3863,8 @@ pub mod test { consensus_hash: &ConsensusHash, microblocks: &[StacksMicroblock], ) { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); { let ic = sortdb.index_conn(); Relayer::process_new_anchored_block( @@ -4113,7 +3883,7 @@ pub mod test { .unwrap(); } } - self.coord.handle_new_stacks_block().unwrap(); + self.chain.coord.handle_new_stacks_block().unwrap(); let pox_id = { let ic = sortdb.index_conn(); @@ -4129,8 +3899,8 @@ pub mod test { &pox_id ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); } pub fn add_empty_burnchain_block(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { @@ -4141,7 +3911,7 @@ pub mod test { let (burn_ops, ..) = self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); let result = self.next_burnchain_block(burn_ops); // remove the last block commit so that the testpeer doesn't try to build off of this tenure - self.miner.block_commits.pop(); + self.chain.miner.block_commits.pop(); result } @@ -4150,33 +3920,33 @@ pub mod test { } pub fn chainstate(&mut self) -> &mut StacksChainState { - &mut self.stacks_node.as_mut().unwrap().chainstate + &mut self.chain.stacks_node.as_mut().unwrap().chainstate } pub fn chainstate_ref(&self) -> &StacksChainState { - &self.stacks_node.as_ref().unwrap().chainstate + &self.chain.stacks_node.as_ref().unwrap().chainstate } pub fn sortdb(&mut self) -> &mut SortitionDB { - self.sortdb.as_mut().unwrap() + self.chain.sortdb.as_mut().unwrap() } pub fn sortdb_ref(&mut self) -> &SortitionDB { - self.sortdb.as_ref().unwrap() + self.chain.sortdb.as_ref().unwrap() } pub fn with_dbs(&mut self, f: F) -> R where F: FnOnce(&mut TestPeer, &mut SortitionDB, &mut TestStacksNode, &mut MemPoolDB) -> R, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f(self, &mut sortdb, &mut stacks_node, &mut mempool); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4190,8 +3960,8 @@ pub mod test { &mut MemPoolDB, ) -> Result, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f( @@ -4201,8 +3971,8 @@ pub mod test { &mut mempool, ); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4216,16 +3986,16 @@ pub mod test { &mut TestStacksNode, ) -> Result, { - let mut stacks_node = self.stacks_node.take().unwrap(); - let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); let res = f( &mut sortdb, - &mut self.miner, - &mut self.config.spending_account, + &mut self.chain.miner, + &mut self.config.chain_config.spending_account, &mut stacks_node, ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); res } @@ -4239,8 +4009,8 @@ pub mod test { &mut MemPoolDB, ) -> Result, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f( @@ -4251,8 +4021,8 @@ pub mod test { &mut mempool, ); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4266,14 +4036,14 @@ pub mod test { &mut MemPoolDB, ) -> Result, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f(self, &mut sortdb, &mut stacks_node.chainstate, &mut mempool); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4285,13 +4055,14 @@ pub mod test { txs: &[StacksTransaction], coinbase_nonce: &mut usize, ) -> StacksBlockId { - let microblock_privkey = self.miner.next_microblock_privkey(); + let microblock_privkey = self.chain.miner.next_microblock_privkey(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(self.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let burnchain = self.config.burnchain.clone(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + self.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let burnchain = self.config.chain_config.burnchain.clone(); let (burn_ops, stacks_block, microblocks) = self.make_tenure( |ref mut miner, @@ -4374,13 +4145,13 @@ pub mod test { Option<&StacksMicroblockHeader>, ) -> (StacksBlock, Vec), { - let mut sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let mut burn_block = TestBurnchainBlock::new(&tip, 0); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); - let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); + let parent_block_opt = stacks_node.get_last_anchored_block(&self.chain.miner); let parent_sortition_opt = parent_block_opt.as_ref().and_then(|parent_block| { let ic = sortdb.index_conn(); SortitionDB::get_block_snapshot_for_winning_stacks_block( @@ -4391,15 +4162,19 @@ pub mod test { .unwrap() }); - let parent_microblock_header_opt = - get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); - let last_key = stacks_node.get_last_key(&self.miner); + let parent_microblock_header_opt = get_last_microblock_header( + &stacks_node, + &self.chain.miner, + parent_block_opt.as_ref(), + ); + let last_key = stacks_node.get_last_key(&self.chain.miner); - let network_id = self.config.network_id; - let chainstate_path = self.chainstate_path.clone(); + let network_id = self.config.chain_config.network_id; + let chainstate_path = self.chain.chainstate_path.clone(); let burn_block_height = burn_block.block_height; let proof = self + .chain .miner .make_proof( &last_key.public_key, @@ -4408,7 +4183,7 @@ pub mod test { .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", last_key.public_key)); let (stacks_block, microblocks) = tenure_builder( - &mut self.miner, + &mut self.chain.miner, &mut sortdb, &mut stacks_node.chainstate, &proof, @@ -4419,7 +4194,7 @@ pub mod test { let mut block_commit_op = stacks_node.make_tenure_commitment( &sortdb, &mut burn_block, - &mut self.miner, + &mut self.chain.miner, &stacks_block, microblocks.clone(), 1000, @@ -4433,14 +4208,15 @@ pub mod test { block_commit_op.parent_vtxindex = 0; } - let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + let leader_key_op = + stacks_node.add_key_register(&mut burn_block, &mut self.chain.miner); // patch in reward set info match get_next_recipients( &tip, &mut stacks_node.chainstate, &mut sortdb, - &self.config.burnchain, + &self.config.chain_config.burnchain, &OnChainRewardSetProvider::new(), ) { Ok(recipients) => { @@ -4459,6 +4235,7 @@ pub mod test { None => { if self .config + .chain_config .burnchain .is_in_prepare_phase(burn_block.block_height) { @@ -4483,8 +4260,8 @@ pub mod test { } }; - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); ( vec![ BlockstackOperationType::LeaderKeyRegister(leader_key_op), @@ -4503,26 +4280,29 @@ pub mod test { StacksBlock, Vec, ) { - let sortdb = self.sortdb.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); let mut burn_block = { let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); TestBurnchainBlock::new(&sn, 0) }; - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); - let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); - let parent_microblock_header_opt = - get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); - let last_key = stacks_node.get_last_key(&self.miner); + let parent_block_opt = stacks_node.get_last_anchored_block(&self.chain.miner); + let parent_microblock_header_opt = get_last_microblock_header( + &stacks_node, + &self.chain.miner, + parent_block_opt.as_ref(), + ); + let last_key = stacks_node.get_last_key(&self.chain.miner); - let network_id = self.config.network_id; - let chainstate_path = self.chainstate_path.clone(); + let network_id = self.config.chain_config.network_id; + let chainstate_path = self.chain.chainstate_path.clone(); let burn_block_height = burn_block.block_height; let (stacks_block, microblocks, block_commit_op) = stacks_node.mine_stacks_block( &sortdb, - &mut self.miner, + &mut self.chain.miner, &mut burn_block, &last_key, parent_block_opt.as_ref(), @@ -4554,10 +4334,11 @@ pub mod test { }, ); - let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + let leader_key_op = + stacks_node.add_key_register(&mut burn_block, &mut self.chain.miner); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); ( vec![ BlockstackOperationType::LeaderKeyRegister(leader_key_op), @@ -4586,17 +4367,17 @@ pub mod test { } pub fn get_burnchain_view(&mut self) -> Result { - let sortdb = self.sortdb.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); let view_res = { let chaintip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.index_conn()).unwrap(); SortitionDB::get_burnchain_view( &sortdb.index_conn(), - &self.config.burnchain, + &self.config.chain_config.burnchain, &chaintip, ) }; - self.sortdb = Some(sortdb); + self.chain.sortdb = Some(sortdb); view_res } @@ -4617,9 +4398,9 @@ pub mod test { pub fn make_client_convo(&self) -> ConversationP2P { ConversationP2P::new( - self.config.network_id, + self.config.chain_config.network_id, self.config.peer_version, - &self.config.burnchain, + &self.config.chain_config.burnchain, &SocketAddr::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), self.config.server_port, @@ -4628,6 +4409,7 @@ pub mod test { false, 0, self.config + .chain_config .epochs .clone() .unwrap_or(StacksEpoch::unit_test_3_0(0)), @@ -4636,7 +4418,7 @@ pub mod test { pub fn make_client_local_peer(&self, privk: StacksPrivateKey) -> LocalPeer { LocalPeer::new( - self.config.network_id, + self.config.chain_config.network_id, self.network.local_peer.parent_network_id, PeerAddress::from_socketaddr(&SocketAddr::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -4653,7 +4435,11 @@ pub mod test { pub fn get_burn_block_height(&self) -> u64 { SortitionDB::get_canonical_burn_chain_tip( - self.sortdb.as_ref().expect("Failed to get sortdb").conn(), + self.chain + .sortdb + .as_ref() + .expect("Failed to get sortdb") + .conn(), ) .expect("Failed to get canonical burn chain tip") .block_height @@ -4662,6 +4448,7 @@ pub mod test { pub fn get_reward_cycle(&self) -> u64 { let block_height = self.get_burn_block_height(); self.config + .chain_config .burnchain .block_height_to_reward_cycle(block_height) .unwrap_or_else(|| { @@ -4671,8 +4458,8 @@ pub mod test { /// Verify that the sortition DB migration into Nakamoto worked correctly. pub fn check_nakamoto_migration(&mut self) { - let mut sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let chainstate = &mut node.chainstate; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -4776,8 +4563,8 @@ pub mod test { tx.commit().unwrap(); let migrator = SortitionDBMigrator::new( - self.config.burnchain.clone(), - &self.chainstate_path, + self.config.chain_config.burnchain.clone(), + &self.chain.chainstate_path, None, ) .unwrap(); @@ -4826,8 +4613,8 @@ pub mod test { assert_eq!(restored_chain_tips, all_chain_tips); assert_eq!(restored_reward_sets, all_preprocessed_reward_sets); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); } /// Verify that all malleablized blocks are duly processed @@ -4836,7 +4623,7 @@ pub mod test { all_blocks: Vec, expected_siblings: usize, ) { - if !self.mine_malleablized_blocks { + if !self.chain.mine_malleablized_blocks { return; } for block in all_blocks.iter() { @@ -4866,12 +4653,12 @@ pub mod test { /// Set the nakamoto tenure to mine on pub fn mine_nakamoto_on(&mut self, parent_tenure: Vec) { - self.nakamoto_parent_tenure_opt = Some(parent_tenure); + self.chain.nakamoto_parent_tenure_opt = Some(parent_tenure); } /// Clear the tenure to mine on. This causes the miner to build on the canonical tip pub fn mine_nakamoto_on_canonical_tip(&mut self) { - self.nakamoto_parent_tenure_opt = None; + self.chain.nakamoto_parent_tenure_opt = None; } /// Get an account off of a tip @@ -4880,8 +4667,13 @@ pub mod test { tip: &StacksBlockId, account: &PrincipalData, ) -> StacksAccount { - let sortdb = self.sortdb.take().expect("FATAL: sortdb not restored"); + let sortdb = self + .chain + .sortdb + .take() + .expect("FATAL: sortdb not restored"); let mut node = self + .chain .stacks_node .take() .expect("FATAL: chainstate not restored"); @@ -4896,8 +4688,8 @@ pub mod test { .unwrap() .unwrap(); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); acct } } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 83941a33cc..df43b11c44 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -681,8 +681,8 @@ mod test { let view = peer.get_burnchain_view().unwrap(); let (http_sx, http_rx) = sync_channel(1); - let network_id = peer.config.network_id; - let chainstate_path = peer.chainstate_path.clone(); + let network_id = peer.config.chain_config.network_id; + let chainstate_path = peer.chain.chainstate_path.clone(); let (num_events_sx, num_events_rx) = sync_channel(1); let http_thread = thread::spawn(move || { diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index 853de5da3c..8440954eff 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -87,7 +87,7 @@ fn setup_peer_config( conf.connection_opts.disable_block_download = true; let j = i as u32; - conf.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer + conf.chain_config.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer // even-number peers support stacker DBs. // odd-number peers do not diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index c07f9c73c7..5c929b58c0 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -77,16 +77,21 @@ fn test_get_block_availability() { peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let reward_cycle_length = peer_1_config.burnchain.pox_constants.reward_cycle_length as u64; + let reward_cycle_length = peer_1_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length as u64; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); let num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_1.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -105,17 +110,19 @@ fn test_get_block_availability() { if i < 6 { peer_1.next_burnchain_block_raw(burn_ops); } - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_2.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_2.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); block_data.push((sn.consensus_hash.clone(), stacks_block, microblocks)); } let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - peer_1.config.burnchain.first_block_height + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_1.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height - peer_1.config.chain_config.burnchain.first_block_height }; let mut round = 0; @@ -208,7 +215,7 @@ fn test_get_block_availability() { fn get_blocks_inventory(peer: &TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { let block_hashes = { let num_headers = end_height - start_height; - let ic = peer.sortdb.as_ref().unwrap().index_conn(); + let ic = peer.chain.sortdb.as_ref().unwrap().index_conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id) .unwrap() @@ -262,7 +269,7 @@ where port_base + ((2 * i) as u16), port_base + ((2 * i + 1) as u16), ); - peer_config.burnchain.first_block_height = first_sortition_height; + peer_config.chain_config.burnchain.first_block_height = first_sortition_height; peer_configs.push(peer_config); } @@ -273,9 +280,10 @@ where let mut num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peers[0].chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -283,9 +291,10 @@ where num_blocks = block_data.len(); let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peers[0].chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -339,7 +348,7 @@ where peer.with_peer_state(|peer, sortdb, chainstate, mempool| { for i in 0..(result.blocks.len() + result.confirmed_microblocks.len() + 1) { - peer.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); let pox_id = { let ic = sortdb.index_conn(); @@ -363,9 +372,10 @@ where assert!(check_breakage(peer)); let peer_num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -532,7 +542,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -667,18 +677,24 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { // peer[1] has a big initial balance let initial_balances = vec![( - PrincipalData::from(peer_configs[1].spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_configs[1] + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1_000_000_000_000_000, )]; - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances; + peer_configs[0].chain_config.initial_balances = initial_balances.clone(); + peer_configs[1].chain_config.initial_balances = initial_balances; }, |num_blocks, ref mut peers| { // build up block data to replicate let mut block_data = vec![]; - let spending_account = &mut peers[1].config.spending_account.clone(); - let burnchain = peers[1].config.burnchain.clone(); + let spending_account = &mut peers[1].config.chain_config.spending_account.clone(); + let burnchain = peers[1].config.chain_config.burnchain.clone(); // function to make a tenure in which a the peer's miner stacks its STX let mut make_stacking_tenure = |miner: &mut TestMiner, @@ -809,7 +825,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -897,7 +913,7 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -968,7 +984,7 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1047,7 +1063,7 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1123,7 +1139,7 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1208,7 +1224,7 @@ pub fn test_get_blocks_and_microblocks_ban_url() { peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1303,7 +1319,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc let signed_tx = sign_standard_singlesig_tx( next_microblock_payload, - &peers[1].miner.privks[0], + &peers[1].chain.miner.privks[0], last_nonce + 1, 0, ); @@ -1317,7 +1333,15 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc mblock.header.sequence += 1; mblock .header - .sign(peers[1].miner.microblock_privks.last().as_ref().unwrap()) + .sign( + peers[1] + .chain + .miner + .microblock_privks + .last() + .as_ref() + .unwrap(), + ) .unwrap(); microblocks.push(mblock); @@ -1333,7 +1357,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); @@ -1348,12 +1372,12 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc } else { test_debug!("Build child block {}", i); let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); - let chainstate_path = peers[1].chainstate_path.clone(); - let burnchain = peers[1].config.burnchain.clone(); + let chainstate_path = peers[1].chain.chainstate_path.clone(); + let burnchain = peers[1].config.chain_config.burnchain.clone(); let (mut burn_ops, stacks_block, _) = peers[1].make_tenure( |ref mut miner, @@ -1423,7 +1447,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index c09020d6d8..dee9c22bd8 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -400,10 +400,11 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let peer = make_nakamoto_peer_from_invs(function_name!(), &observer, rc_len as u32, 3, bitvecs); let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -606,7 +607,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -617,7 +618,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -681,7 +682,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -692,7 +693,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -780,7 +781,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -791,7 +792,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -878,7 +879,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -889,7 +890,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -955,7 +956,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -966,7 +967,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -1018,7 +1019,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -1029,7 +1030,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -1325,8 +1326,9 @@ fn test_make_tenure_downloaders() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -2107,8 +2109,9 @@ fn test_nakamoto_download_run_2_peers() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -2148,13 +2151,17 @@ fn test_nakamoto_download_run_2_peers() { ); test_debug!("ops = {:?}", &ops); let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestPeer::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2216,8 +2223,9 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -2255,13 +2263,17 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { ); test_debug!("ops = {:?}", &ops); let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestPeer::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2336,8 +2348,9 @@ fn test_nakamoto_microfork_download_run_2_peers() { }); peer.refresh_burnchain_view(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a microfork let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -2435,13 +2448,17 @@ fn test_nakamoto_microfork_download_run_2_peers() { ); test_debug!("ops = {:?}", &ops); let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestPeer::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2513,8 +2530,9 @@ fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a shadow block let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -2610,21 +2628,25 @@ fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { ); test_debug!("ops = {:?}", &ops); let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestPeer::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } { - let mut node = boot_peer.stacks_node.take().unwrap(); + let mut node = boot_peer.chain.stacks_node.take().unwrap(); let tx = node.chainstate.staging_db_tx_begin().unwrap(); tx.add_shadow_block(&shadow_block).unwrap(); tx.commit().unwrap(); - boot_peer.stacks_node = Some(node); + boot_peer.chain.stacks_node = Some(node); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2693,8 +2715,9 @@ fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a shadow block let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -2812,22 +2835,26 @@ fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { ); test_debug!("ops = {:?}", &ops); let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestPeer::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } { - let mut node = boot_peer.stacks_node.take().unwrap(); + let mut node = boot_peer.chain.stacks_node.take().unwrap(); let tx = node.chainstate.staging_db_tx_begin().unwrap(); for shadow_block in shadow_blocks.into_iter() { tx.add_shadow_block(&shadow_block).unwrap(); } tx.commit().unwrap(); - boot_peer.stacks_node = Some(node); + boot_peer.chain.stacks_node = Some(node); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2896,8 +2923,9 @@ fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a shadow block let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -3015,24 +3043,28 @@ fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { sn.block_height, &sn.burn_header_hash ); - test_debug!("ops = {:?}", &ops); + test_debug!("ops = {ops:?}"); let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestPeer::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } { - let mut node = boot_peer.stacks_node.take().unwrap(); + let mut node = boot_peer.chain.stacks_node.take().unwrap(); let tx = node.chainstate.staging_db_tx_begin().unwrap(); for shadow_block in shadow_blocks.into_iter() { tx.add_shadow_block(&shadow_block).unwrap(); } tx.commit().unwrap(); - boot_peer.stacks_node = Some(node); + boot_peer.chain.stacks_node = Some(node); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index 1d2ff1a1a6..0f7a10e52a 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -23,6 +23,7 @@ use crate::burnchains::db::BurnchainHeaderReader; use crate::burnchains::tests::BURNCHAIN_TEST_BLOCK_TIME; use crate::burnchains::{Burnchain, BurnchainBlockHeader, BurnchainView, PoxConstants}; use crate::chainstate::coordinator::tests::get_burnchain; +use crate::chainstate::tests::TestChainstate; use crate::net::chat::ConversationP2P; use crate::net::inv::inv2x::*; use crate::net::test::*; @@ -514,14 +515,14 @@ fn test_sync_inv_set_blocks_microblocks_available() { let mut peer_1 = TestPeer::new(peer_1_config.clone()); let mut peer_2 = TestPeer::new(peer_2_config.clone()); - let peer_1_test_path = TestPeer::make_test_path(&peer_1.config); - let peer_2_test_path = TestPeer::make_test_path(&peer_2.config); + let peer_1_test_path = TestChainstate::make_test_path(&peer_1.config.chain_config); + let peer_2_test_path = TestChainstate::make_test_path(&peer_2.config.chain_config); assert!(peer_1_test_path != peer_2_test_path); for (test_path, burnchain) in [ - (peer_1_test_path, &mut peer_1.config.burnchain), - (peer_2_test_path, &mut peer_2.config.burnchain), + (peer_1_test_path, &mut peer_1.config.chain_config.burnchain), + (peer_2_test_path, &mut peer_2.config.chain_config.burnchain), ] .iter_mut() { @@ -566,22 +567,21 @@ fn test_sync_inv_set_blocks_microblocks_available() { burnchain.first_block_hash = hdr.block_hash; } - peer_1_config.burnchain.first_block_height = 5; - peer_2_config.burnchain.first_block_height = 5; - peer_1.config.burnchain.first_block_height = 5; - peer_2.config.burnchain.first_block_height = 5; + peer_1_config.chain_config.burnchain.first_block_height = 5; + peer_2_config.chain_config.burnchain.first_block_height = 5; + peer_1.config.chain_config.burnchain.first_block_height = 5; + peer_2.config.chain_config.burnchain.first_block_height = 5; assert_eq!( - peer_1_config.burnchain.first_block_hash, - peer_2_config.burnchain.first_block_hash + peer_1_config.chain_config.burnchain.first_block_hash, + peer_2_config.chain_config.burnchain.first_block_hash ); - let burnchain = peer_1_config.burnchain; + let burnchain = peer_1_config.chain_config.burnchain; let num_blocks = 5; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height }; @@ -594,15 +594,15 @@ fn test_sync_inv_set_blocks_microblocks_available() { } let (tip, num_burn_blocks) = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let num_burn_blocks = sn.block_height - peer_1.config.burnchain.first_block_height; + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); + let num_burn_blocks = + sn.block_height - peer_1.config.chain_config.burnchain.first_block_height; (sn, num_burn_blocks) }; let nk = peer_1.to_neighbor().addr; - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); peer_1.network.init_inv_sync_epoch2x(&sortdb); match peer_1.network.inv_state { Some(ref mut inv) => { @@ -612,10 +612,10 @@ fn test_sync_inv_set_blocks_microblocks_available() { panic!("No inv state"); } }; - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); for i in 0..num_blocks { - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); let sn = { let ic = sortdb.index_conn(); let sn = SortitionDB::get_ancestor_snapshot( @@ -625,14 +625,14 @@ fn test_sync_inv_set_blocks_microblocks_available() { ) .unwrap() .unwrap(); - eprintln!("{:?}", &sn); + eprintln!("{sn:?}"); sn }; - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); } for i in 0..num_blocks { - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); match peer_1.network.inv_state { Some(ref mut inv) => { assert!(!inv @@ -657,7 +657,7 @@ fn test_sync_inv_set_blocks_microblocks_available() { ) .unwrap() .unwrap(); - eprintln!("{:?}", &sn); + eprintln!("{sn:?}"); sn }; @@ -733,7 +733,7 @@ fn test_sync_inv_set_blocks_microblocks_available() { panic!("No inv state"); } } - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); } } @@ -741,17 +741,25 @@ fn test_sync_inv_set_blocks_microblocks_available() { fn test_sync_inv_make_inv_messages() { let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); - let indexer = BitcoinIndexer::new_unit_test(&peer_1_config.burnchain.working_dir); - let reward_cycle_length = peer_1_config.burnchain.pox_constants.reward_cycle_length; - let num_blocks = peer_1_config.burnchain.pox_constants.reward_cycle_length * 2; + let indexer = BitcoinIndexer::new_unit_test(&peer_1_config.chain_config.burnchain.working_dir); + let reward_cycle_length = peer_1_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length; + let num_blocks = peer_1_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length + * 2; assert_eq!(reward_cycle_length, 5); let mut peer_1 = TestPeer::new(peer_1_config); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height }; @@ -763,9 +771,9 @@ fn test_sync_inv_make_inv_messages() { } let (tip, num_burn_blocks) = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let num_burn_blocks = sn.block_height - peer_1.config.burnchain.first_block_height; + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); + let num_burn_blocks = + sn.block_height - peer_1.config.chain_config.burnchain.first_block_height; (sn, num_burn_blocks) }; @@ -1249,42 +1257,42 @@ fn test_inv_sync_start_reward_cycle() { let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 7); peer_1.network.connection_opts.inv_reward_cycles = 1; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 7); peer_1.network.connection_opts.inv_reward_cycles = 2; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 6); peer_1.network.connection_opts.inv_reward_cycles = 3; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 5); peer_1.network.connection_opts.inv_reward_cycles = 300; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 0); peer_1.network.connection_opts.inv_reward_cycles = 0; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 1); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 1); assert_eq!(block_scan_start, 1); } @@ -1339,9 +1347,7 @@ fn test_sync_inv_2_peers_plain() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1356,9 +1362,7 @@ fn test_sync_inv_2_peers_plain() { } let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1510,9 +1514,7 @@ fn test_sync_inv_2_peers_stale() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1552,7 +1554,8 @@ fn test_sync_inv_2_peers_stale() { if let Some(peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { if peer_2_inv.inv.num_sortitions - == first_stacks_block_height - peer_1.config.burnchain.first_block_height + == first_stacks_block_height + - peer_1.config.chain_config.burnchain.first_block_height { for i in 0..first_stacks_block_height { assert!(!peer_2_inv.inv.has_ith_block(i)); @@ -1571,7 +1574,8 @@ fn test_sync_inv_2_peers_stale() { if let Some(peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { if peer_1_inv.inv.num_sortitions - == first_stacks_block_height - peer_1.config.burnchain.first_block_height + == first_stacks_block_height + - peer_1.config.chain_config.burnchain.first_block_height { peer_1_check = true; } @@ -1600,7 +1604,7 @@ fn test_sync_inv_2_peers_unstable() { peer_1_config.connection_opts.inv_reward_cycles = 10; peer_2_config.connection_opts.inv_reward_cycles = 10; - let stable_confs = peer_1_config.burnchain.stable_confirmations as u64; + let stable_confs = peer_1_config.chain_config.burnchain.stable_confirmations as u64; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -1611,9 +1615,7 @@ fn test_sync_inv_2_peers_unstable() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1641,20 +1643,18 @@ fn test_sync_inv_2_peers_unstable() { // tips must differ { let sn1 = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let sn2 = - SortitionDB::get_canonical_burn_chain_tip(peer_2.sortdb.as_ref().unwrap().conn()) - .unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); + let sn2 = SortitionDB::get_canonical_burn_chain_tip( + peer_2.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_ne!(sn1.burn_header_hash, sn2.burn_header_hash); } let num_stable_blocks = num_blocks - stable_confs; let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index a510c992c8..9618cea64b 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -153,7 +153,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { // sanity check -- nakamoto begins at height 37 assert_eq!( - peer.config.epochs, + peer.config.chain_config.epochs, Some(StacksEpoch::unit_test_3_0_only(37)) ); @@ -161,8 +161,8 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); assert_eq!(reward_cycle_invs.len(), 10); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); @@ -235,7 +235,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { // sanity check -- nakamoto begins at height 37 assert_eq!( - peer.config.epochs, + peer.config.chain_config.epochs, Some(StacksEpoch::unit_test_3_0_only(37)) ); @@ -243,8 +243,8 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); assert_eq!(reward_cycle_invs.len(), 8); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); @@ -310,7 +310,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { // sanity check -- nakamoto begins at height 37 assert_eq!( - peer.config.epochs, + peer.config.chain_config.epochs, Some(StacksEpoch::unit_test_3_0_only(37)) ); @@ -318,8 +318,8 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); assert_eq!(reward_cycle_invs.len(), 10); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); @@ -624,8 +624,9 @@ fn test_nakamoto_invs_full() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); eprintln!("{:#?}", &reward_cycle_invs); assert_eq!(reward_cycle_invs.len(), 10); @@ -657,8 +658,9 @@ fn test_nakamoto_invs_alternating() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); eprintln!("{:#?}", &reward_cycle_invs); assert_eq!(reward_cycle_invs.len(), 10); @@ -696,10 +698,11 @@ fn test_nakamoto_invs_sparse() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); - eprintln!("{:#?}", &reward_cycle_invs); + eprintln!("{reward_cycle_invs:#?}"); assert_eq!(reward_cycle_invs.len(), 12); check_inv_messages(bitvecs, 10, nakamoto_start, reward_cycle_invs); } @@ -731,8 +734,9 @@ fn test_nakamoto_invs_different_anchor_blocks() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); eprintln!("{:#?}", &reward_cycle_invs); assert_eq!(reward_cycle_invs.len(), 12); @@ -871,15 +875,17 @@ fn test_nakamoto_inv_sync_state_machine() { make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 3, bitvecs.clone(), 1); let mut other_peer = other_peers.pop().unwrap(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; let total_rcs = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -909,11 +915,11 @@ fn test_nakamoto_inv_sync_state_machine() { // `observer` std::thread::scope(|s| { s.spawn(|| { - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); inv_machine .process_getnakamotoinv_begins(&mut other_peer.network, &sortdb, false) .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); let mut last_learned_rc = 0; loop { @@ -942,11 +948,11 @@ fn test_nakamoto_inv_sync_state_machine() { break; } - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); inv_machine .process_getnakamotoinv_begins(&mut other_peer.network, &sortdb, false) .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); } sx.send(true).unwrap(); @@ -995,15 +1001,17 @@ fn test_nakamoto_inv_sync_across_epoch_change() { make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 3, bitvecs, 1); let mut other_peer = other_peers.pop().unwrap(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; let total_rcs = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1135,7 +1143,7 @@ fn test_nakamoto_make_tenure_inv_in_forks() { initial_balances, ); peer.refresh_burnchain_view(); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); @@ -1766,7 +1774,7 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { initial_balances, ); peer.refresh_burnchain_view(); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); @@ -2274,7 +2282,7 @@ fn test_nakamoto_make_tenure_inv_from_old_tips() { initial_balances, ); peer.refresh_burnchain_view(); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; let sortdb = peer.sortdb_ref().reopen().unwrap(); let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); @@ -2371,8 +2379,9 @@ fn test_nakamoto_invs_shadow_blocks() { 0, initial_balances, ); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let mut expected_ids = vec![]; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index de617001b7..e3acb17211 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -48,8 +48,8 @@ fn test_mempool_sync_2_peers() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -59,8 +59,9 @@ fn test_mempool_sync_2_peers() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -154,8 +155,9 @@ fn test_mempool_sync_2_peers() { } let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -314,8 +316,8 @@ fn test_mempool_sync_2_peers_paginated() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -325,8 +327,9 @@ fn test_mempool_sync_2_peers_paginated() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -408,8 +411,9 @@ fn test_mempool_sync_2_peers_paginated() { peer_1.mempool = Some(peer_1_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -503,8 +507,8 @@ fn test_mempool_sync_2_peers_blacklisted() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -514,8 +518,9 @@ fn test_mempool_sync_2_peers_blacklisted() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -615,8 +620,9 @@ fn test_mempool_sync_2_peers_blacklisted() { peer_2.mempool = Some(peer_2_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -712,8 +718,8 @@ fn test_mempool_sync_2_peers_problematic() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -723,8 +729,9 @@ fn test_mempool_sync_2_peers_problematic() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -753,7 +760,7 @@ fn test_mempool_sync_2_peers_problematic() { let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx = make_contract_tx( pk, @@ -801,8 +808,9 @@ fn test_mempool_sync_2_peers_problematic() { peer_2.mempool = Some(peer_2_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -909,7 +917,7 @@ pub fn test_mempool_storage_nakamoto() { let mut total_blocks = 0; let mut all_txs = vec![]; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); let stx_miner_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -919,8 +927,12 @@ pub fn test_mempool_storage_nakamoto() { .unwrap(); // duplicate handles to the chainstates so we can submit txs - let mut mempool = - MemPoolDB::open_test(false, peer.config.network_id, &peer.chainstate_path).unwrap(); + let mut mempool = MemPoolDB::open_test( + false, + peer.config.chain_config.network_id, + &peer.chain.chainstate_path, + ) + .unwrap(); let (mut chainstate, _) = peer.chainstate().reopen().unwrap(); let sortdb = peer.sortdb().reopen().unwrap(); @@ -935,9 +947,10 @@ pub fn test_mempool_storage_nakamoto() { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -1017,8 +1030,8 @@ pub fn test_mempool_storage_nakamoto() { } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -1092,15 +1105,17 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { ); let mut peer_2 = other_peers.pop().unwrap(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer_1.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer_1.config.chain_config.burnchain.pox_constants, + ); let tip = { - let sort_db = peer_1.sortdb.as_mut().unwrap(); + let sort_db = peer_1.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; let total_rcs = peer_1 .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1196,8 +1211,9 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { peer_1.mempool = Some(peer_1_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 57859fed31..60f31432ce 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -37,7 +37,7 @@ use stacks_common::types::chainstate::{ StacksPublicKey, TrieHash, }; use stacks_common::types::net::PeerAddress; -use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::types::Address; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; @@ -50,8 +50,7 @@ use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::test::{key_to_stacks_addr, make_pox_4_lockup_chain_id}; +use crate::chainstate::stacks::boot::test::key_to_stacks_addr; use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; @@ -73,7 +72,6 @@ use crate::net::{ PingData, StackerDBPushChunkData, StacksMessage, StacksMessageType, StacksNodeState, }; use crate::util_lib::boot::boot_code_id; -use crate::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; /// One step of a simulated Nakamoto node's bootup procedure. #[derive(Debug, PartialEq, Clone)] @@ -109,10 +107,10 @@ pub struct NakamotoBootPlan { impl NakamotoBootPlan { pub fn new(test_name: &str) -> Self { let (test_signers, test_stackers) = TestStacker::common_signing_set(); - let pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let default_config = TestChainstateConfig::default(); Self { test_name: test_name.to_string(), - pox_constants: TestPeerConfig::default().burnchain.pox_constants, + pox_constants: default_config.burnchain.pox_constants, private_key: StacksPrivateKey::from_seed(&[2]), initial_balances: vec![], test_stackers, @@ -121,12 +119,66 @@ impl NakamotoBootPlan { num_peers: 0, add_default_balance: true, malleablized_blocks: true, - network_id: TestPeerConfig::default().network_id, + network_id: default_config.network_id, txindex: false, epochs: None, } } + // Builds a TestChainstateConfig with shared parameters + fn build_nakamoto_chainstate_config(&self) -> TestChainstateConfig { + let mut chainstate_config = TestChainstateConfig::new(&self.test_name); + chainstate_config.network_id = self.network_id; + chainstate_config.txindex = self.txindex; + + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&self.private_key)], + ) + .unwrap(); + + let default_epoch = StacksEpoch::unit_test_3_0_only( + (self.pox_constants.pox_4_activation_height + + self.pox_constants.reward_cycle_length + + 1) + .into(), + ); + chainstate_config.epochs = Some(self.epochs.clone().unwrap_or(default_epoch)); + chainstate_config.initial_balances = vec![]; + if self.add_default_balance { + chainstate_config + .initial_balances + .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); + } + chainstate_config + .initial_balances + .extend(self.initial_balances.clone()); + + let fee_payment_balance = 10_000; + let stacker_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), + u64::try_from(test_stacker.amount).expect("Stacking amount too large"), + ) + }); + let signer_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.signer_private_key)), + fee_payment_balance, + ) + }); + + chainstate_config.initial_balances.extend(stacker_balances); + chainstate_config.initial_balances.extend(signer_balances); + chainstate_config.test_signers = Some(self.test_signers.clone()); + chainstate_config.test_stackers = Some(self.test_stackers.clone()); + chainstate_config.burnchain.pox_constants = self.pox_constants.clone(); + + chainstate_config + } + pub fn with_private_key(mut self, privk: StacksPrivateKey) -> Self { self.private_key = privk; self @@ -266,8 +318,8 @@ impl NakamotoBootPlan { for (i, peer) in other_peers.iter_mut().enumerate() { peer.next_burnchain_block(burn_ops.to_vec()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); @@ -276,10 +328,9 @@ impl NakamotoBootPlan { for block in blocks { debug!( - "Apply block {} (sighash {}) to peer {} ({})", + "Apply block {} (sighash {}) to peer {i} ({})", &block.block_id(), &block.header.signer_signature_hash(), - i, &peer.to_neighbor().addr ); let block_id = block.block_id(); @@ -294,29 +345,25 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted.is_accepted() { - test_debug!("Accepted Nakamoto block {block_id} to other peer {}", i); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); - } else { - panic!( - "Did NOT accept Nakamoto block {block_id} to other peer {}", - i - ); - } + assert!( + accepted.is_accepted(), + "Did NOT accept Nakamoto block {block_id} to other peer {i}" + ); + test_debug!("Accepted Nakamoto block {block_id} to other peer {i}"); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); possible_chain_tips.insert(block.block_id()); // process it - peer.coord.handle_new_stacks_block().unwrap(); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); } for block in malleablized_blocks { debug!( - "Apply malleablized block {} (sighash {}) to peer {} ({})", + "Apply malleablized block {} (sighash {}) to peer {i} ({})", &block.block_id(), &block.header.signer_signature_hash(), - i, &peer.to_neighbor().addr ); let block_id = block.block_id(); @@ -331,28 +378,22 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted.is_accepted() { - test_debug!( - "Accepted malleablized Nakamoto block {block_id} to other peer {}", - i - ); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); - } else { - panic!( - "Did NOT accept malleablized Nakamoto block {block_id} to other peer {}", - i - ); - } + assert!( + accepted.is_accepted(), + "Did NOT accept malleablized Nakamoto block {block_id} to other peer {i}" + ); + test_debug!("Accepted malleablized Nakamoto block {block_id} to other peer {i}"); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); possible_chain_tips.insert(block.block_id()); // process it - peer.coord.handle_new_stacks_block().unwrap(); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); } - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); peer.refresh_burnchain_view(); assert!(possible_chain_tips.contains(&peer.network.stacks_tip.block_id())); @@ -362,525 +403,67 @@ impl NakamotoBootPlan { /// Make a chainstate and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. pub fn boot_nakamoto_chainstate( - mut self, + self, observer: Option<&TestEventObserver>, ) -> TestChainstate<'_> { - let mut chainstate_config = TestChainstateConfig::new(&self.test_name); - chainstate_config.txindex = self.txindex; - chainstate_config.network_id = self.network_id; - - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&self.private_key)], - ) - .unwrap(); - - let default_epoch = StacksEpoch::unit_test_3_0_only( - (self.pox_constants.pox_4_activation_height - + self.pox_constants.reward_cycle_length - + 1) - .into(), - ); - chainstate_config.epochs = Some(self.epochs.clone().unwrap_or(default_epoch)); - chainstate_config.initial_balances = vec![]; - if self.add_default_balance { - chainstate_config - .initial_balances - .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); - } - chainstate_config - .initial_balances - .append(&mut self.initial_balances.clone()); - - // Create some balances for test Stackers - // They need their stacking amount + enough to pay fees - let fee_payment_balance = 10_000; - let stacker_balances = self.test_stackers.iter().map(|test_stacker| { - ( - PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), - u64::try_from(test_stacker.amount).expect("Stacking amount too large"), - ) - }); - let signer_balances = self.test_stackers.iter().map(|test_stacker| { - ( - PrincipalData::from(key_to_stacks_addr(&test_stacker.signer_private_key)), - fee_payment_balance, - ) - }); - - chainstate_config.initial_balances.extend(stacker_balances); - chainstate_config.initial_balances.extend(signer_balances); - chainstate_config.test_signers = Some(self.test_signers.clone()); - chainstate_config.test_stackers = Some(self.test_stackers.clone()); - chainstate_config.burnchain.pox_constants = self.pox_constants.clone(); - let mut chain = TestChainstate::new_with_observer(chainstate_config.clone(), observer); - + let chainstate_config = self.build_nakamoto_chainstate_config(); + let mut chain = TestChainstate::new_with_observer(chainstate_config, observer); chain.mine_malleablized_blocks = self.malleablized_blocks; - - self.advance_to_nakamoto_chainstate(&mut chain); - chain - } - - /// Bring a TestChainstate into the Nakamoto Epoch - fn advance_to_nakamoto_chainstate(&mut self, chain: &mut TestChainstate) { let mut chain_nonce = 0; - let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); - let default_pox_addr = - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); - - let mut sortition_height = chain.get_burn_block_height(); - debug!("\n\n======================"); - debug!( - "PoxConstants = {:#?}", - &chain.config.burnchain.pox_constants - ); - debug!("tip = {sortition_height}"); - debug!("========================\n\n"); - - let epoch_25_height = chain - .config - .epochs - .as_ref() - .unwrap() - .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch25) - .unwrap() - .start_height; - - let epoch_30_height = chain - .config - .epochs - .as_ref() - .unwrap() - .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch30) - .unwrap() - .start_height; - - // advance to just past pox-4 instantiation - let mut blocks_produced = false; - while sortition_height <= epoch_25_height { - chain.tenure_with_txs(&[], &mut chain_nonce); - sortition_height = chain.get_burn_block_height(); - blocks_produced = true; - } - - // need to produce at least 1 block before making pox-4 lockups: - // the way `burn-block-height` constant works in Epoch 2.5 is such - // that if its the first block produced, this will be 0 which will - // prevent the lockups from being valid. - if !blocks_produced { - chain.tenure_with_txs(&[], &mut chain_nonce); - sortition_height = chain.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Make PoX-4 lockups"); - debug!("========================\n\n"); - - let reward_cycle = chain - .config - .burnchain - .block_height_to_reward_cycle(sortition_height) - .unwrap(); - - // Make all the test Stackers stack - let stack_txs: Vec<_> = chain - .config - .test_stackers - .clone() - .unwrap_or_default() - .iter() - .map(|test_stacker| { - let pox_addr = test_stacker - .pox_addr - .clone() - .unwrap_or(default_pox_addr.clone()); - let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); - let signature = make_pox_4_signer_key_signature( - &pox_addr, - &test_stacker.signer_private_key, - reward_cycle.into(), - &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, - chain.config.network_id, - 12, - max_amount, - 1, - ) - .unwrap() - .to_rsv(); - make_pox_4_lockup_chain_id( - &test_stacker.stacker_private_key, - 0, - test_stacker.amount, - &pox_addr, - 12, - &StacksPublicKey::from_private(&test_stacker.signer_private_key), - sortition_height + 1, - Some(signature), - max_amount, - 1, - chain.config.network_id, - ) - }) - .collect(); - - let mut stacks_block = chain.tenure_with_txs(&stack_txs, &mut chain_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(stacks_block, stacks_tip); - - debug!("\n\n======================"); - debug!("Advance to the Prepare Phase"); - debug!("========================\n\n"); - while !chain.config.burnchain.is_in_prepare_phase(sortition_height) { - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); - let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - stacks_block = chain.tenure_with_txs(&[], &mut chain_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_ne!(old_tip, stacks_tip); - sortition_height = chain.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Advance to Epoch 3.0"); - debug!("========================\n\n"); - - // advance to the start of epoch 3.0 - while sortition_height < epoch_30_height - 1 { - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); - let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - chain.tenure_with_txs(&[], &mut chain_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_ne!(old_tip, stacks_tip); - sortition_height = chain.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Welcome to Nakamoto!"); - debug!("========================\n\n"); + chain.advance_to_nakamoto_epoch(&self.private_key, &mut chain_nonce); + chain } /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. - fn boot_nakamoto_peers( - mut self, + /// Boot a TestPeer and followers into the Nakamoto epoch + pub fn boot_nakamoto_peers( + self, observer: Option<&TestEventObserver>, ) -> (TestPeer<'_>, Vec>) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); - peer_config.network_id = self.network_id; + peer_config.chain_config = self.build_nakamoto_chainstate_config(); peer_config.private_key = self.private_key.clone(); - peer_config.txindex = self.txindex; - - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&self.private_key)], - ) - .unwrap(); - - // reward cycles are 5 blocks long - // first 25 blocks are boot-up - // reward cycle 6 instantiates pox-3 - // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + peer_config.connection_opts.auth_token = Some("password".to_string()); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only( - (self.pox_constants.pox_4_activation_height - + self.pox_constants.reward_cycle_length - + 1) - .into(), - )); - peer_config.initial_balances = vec![]; - if self.add_default_balance { - peer_config - .initial_balances - .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); - } - peer_config - .initial_balances - .append(&mut self.initial_balances.clone()); - peer_config.connection_opts.auth_token = Some("password".to_string()); - // Create some balances for test Stackers - // They need their stacking amount + enough to pay fees - let fee_payment_balance = 10_000; - let stacker_balances = self.test_stackers.iter().map(|test_stacker| { - ( - PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), - u64::try_from(test_stacker.amount).expect("Stacking amount too large"), - ) - }); - let signer_balances = self.test_stackers.iter().map(|test_stacker| { - ( - PrincipalData::from(key_to_stacks_addr(&test_stacker.signer_private_key)), - fee_payment_balance, - ) - }); - - peer_config.initial_balances.extend(stacker_balances); - peer_config.initial_balances.extend(signer_balances); - peer_config.test_signers = Some(self.test_signers.clone()); - peer_config.test_stackers = Some(self.test_stackers.clone()); - peer_config.burnchain.pox_constants = self.pox_constants.clone(); let mut peer = TestPeer::new_with_observer(peer_config.clone(), observer); - - peer.mine_malleablized_blocks = self.malleablized_blocks; + peer.chain.mine_malleablized_blocks = self.malleablized_blocks; let mut other_peers = vec![]; for i in 0..self.num_peers { let mut other_config = peer_config.clone(); - other_config.test_name = format!("{}.follower", &peer.config.test_name); + other_config.chain_config.test_name = + format!("{}.follower", &peer_config.chain_config.test_name); other_config.server_port = 0; other_config.http_port = 0; - other_config.test_stackers = peer.config.test_stackers.clone(); + other_config.chain_config.test_stackers = + peer_config.chain_config.test_stackers.clone(); other_config.private_key = StacksPrivateKey::from_seed(&(i as u128).to_be_bytes()); - other_config.add_neighbor(&peer.to_neighbor()); let mut other_peer = TestPeer::new_with_observer(other_config, None); - other_peer.mine_malleablized_blocks = self.malleablized_blocks; - + other_peer.chain.mine_malleablized_blocks = self.malleablized_blocks; other_peers.push(other_peer); } - self.advance_to_nakamoto(&mut peer, &mut other_peers); - (peer, other_peers) - } - - /// Bring a TestPeer into the Nakamoto Epoch - fn advance_to_nakamoto(&mut self, peer: &mut TestPeer, other_peers: &mut [TestPeer]) { let mut peer_nonce = 0; let mut other_peer_nonces = vec![0; other_peers.len()]; - let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); - let default_pox_addr = - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); - - let mut sortition_height = peer.get_burn_block_height(); - debug!("\n\n======================"); - debug!("PoxConstants = {:#?}", &peer.config.burnchain.pox_constants); - debug!("tip = {}", sortition_height); - debug!("========================\n\n"); - - let epoch_25_height = peer - .config - .epochs - .as_ref() - .unwrap() - .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch25) - .unwrap() - .start_height; - - let epoch_30_height = peer - .config - .epochs - .as_ref() - .unwrap() - .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch30) - .unwrap() - .start_height; - - // advance to just past pox-4 instantiation - let mut blocks_produced = false; - while sortition_height <= epoch_25_height { - peer.tenure_with_txs(&[], &mut peer_nonce); - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - other_peer.tenure_with_txs(&[], other_peer_nonce); - } - - sortition_height = peer.get_burn_block_height(); - blocks_produced = true; - } - - // need to produce at least 1 block before making pox-4 lockups: - // the way `burn-block-height` constant works in Epoch 2.5 is such - // that if its the first block produced, this will be 0 which will - // prevent the lockups from being valid. - if !blocks_produced { - peer.tenure_with_txs(&[], &mut peer_nonce); - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - other_peer.tenure_with_txs(&[], other_peer_nonce); - } - - sortition_height = peer.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Make PoX-4 lockups"); - debug!("========================\n\n"); - - let reward_cycle = peer - .config - .burnchain - .block_height_to_reward_cycle(sortition_height) - .unwrap(); - - // Make all the test Stackers stack - let stack_txs: Vec<_> = peer - .config - .test_stackers - .clone() - .unwrap_or_default() - .iter() - .map(|test_stacker| { - let pox_addr = test_stacker - .pox_addr - .clone() - .unwrap_or(default_pox_addr.clone()); - let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); - let signature = make_pox_4_signer_key_signature( - &pox_addr, - &test_stacker.signer_private_key, - reward_cycle.into(), - &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, - peer.config.network_id, - 12, - max_amount, - 1, - ) - .unwrap() - .to_rsv(); - make_pox_4_lockup_chain_id( - &test_stacker.stacker_private_key, - 0, - test_stacker.amount, - &pox_addr, - 12, - &StacksPublicKey::from_private(&test_stacker.signer_private_key), - sortition_height + 1, - Some(signature), - max_amount, - 1, - peer.config.network_id, - ) - }) - .collect(); - - let mut old_tip = peer.network.stacks_tip.clone(); - let mut stacks_block = peer.tenure_with_txs(&stack_txs, &mut peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } + // Advance primary peer and other peers to Nakamoto epoch + peer.chain + .advance_to_nakamoto_epoch(&self.private_key, &mut peer_nonce); for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let mut old_tip = other_peer.network.stacks_tip.clone(); - other_peer.tenure_with_txs(&stack_txs, other_peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) - .unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = other_peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, other_peer.network.parent_stacks_tip); - } + other_peer + .chain + .advance_to_nakamoto_epoch(&self.private_key, other_peer_nonce); } - debug!("\n\n======================"); - debug!("Advance to the Prepare Phase"); - debug!("========================\n\n"); - while !peer.config.burnchain.is_in_prepare_phase(sortition_height) { - let mut old_tip = peer.network.stacks_tip.clone(); - stacks_block = peer.tenure_with_txs(&[], &mut peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } - other_peers - .iter_mut() - .zip(other_peer_nonces.iter_mut()) - .for_each(|(peer, nonce)| { - let mut old_tip = peer.network.stacks_tip.clone(); - peer.tenure_with_txs(&[], nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()) - .unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } - }); - sortition_height = peer.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Advance to Epoch 3.0"); - debug!("========================\n\n"); - - // advance to the start of epoch 3.0 - while sortition_height < epoch_30_height - 1 { - let mut old_tip = peer.network.stacks_tip.clone(); - peer.tenure_with_txs(&[], &mut peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } - - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - let mut old_tip = peer.network.stacks_tip.clone(); - other_peer.tenure_with_txs(&[], other_peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) - .unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = - other_peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, other_peer.network.parent_stacks_tip); - } - } - sortition_height = peer.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Welcome to Nakamoto!"); - debug!("========================\n\n"); + (peer, other_peers) } pub fn boot_into_nakamoto_peers( @@ -926,6 +509,7 @@ impl NakamotoBootPlan { blocks_since_last_tenure, ); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change_extend.clone()); @@ -988,7 +572,7 @@ impl NakamotoBootPlan { .collect(); let malleablized_blocks = - std::mem::replace(&mut peer.malleablized_blocks, vec![]); + std::mem::replace(&mut peer.chain.malleablized_blocks, vec![]); for mblk in malleablized_blocks.iter() { malleablized_block_ids.insert(mblk.block_id()); } @@ -1020,10 +604,11 @@ impl NakamotoBootPlan { last_tenure_change = Some(tenure_change.clone()); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("\n\nNew tenure: {}\n\n", &consensus_hash); @@ -1087,7 +672,7 @@ impl NakamotoBootPlan { .collect(); let malleablized_blocks = - std::mem::replace(&mut peer.malleablized_blocks, vec![]); + std::mem::replace(&mut peer.chain.malleablized_blocks, vec![]); for mblk in malleablized_blocks.iter() { malleablized_block_ids.insert(mblk.block_id()); } @@ -1111,8 +696,8 @@ impl NakamotoBootPlan { // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -1149,8 +734,18 @@ impl NakamotoBootPlan { // transaction in `all_blocks` ran to completion if let Some(observer) = observer { let mut observed_blocks = observer.get_blocks(); - let mut block_idx = (peer.config.burnchain.pox_constants.pox_4_activation_height - + peer.config.burnchain.pox_constants.reward_cycle_length + let mut block_idx = (peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height + + peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length - 25) as usize; // filter out observed blocks that are malleablized @@ -1206,8 +801,8 @@ impl NakamotoBootPlan { // verify that all other peers kept pace with this peer for other_peer in other_peers.iter_mut() { let (other_highest_tenure, other_sort_tip) = { - let chainstate = &mut other_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = other_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut other_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = other_peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -2055,8 +1650,8 @@ fn test_update_highest_stacks_height_of_neighbors( old_height.map(|h| (SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), h)); peer.network.highest_stacks_neighbor = prev_highest_neighbor; - let peer_sortdb = peer.sortdb.take().unwrap(); - let mut peer_stacks_node = peer.stacks_node.take().unwrap(); + let peer_sortdb = peer.chain.sortdb.take().unwrap(); + let mut peer_stacks_node = peer.chain.stacks_node.take().unwrap(); let mut peer_mempool = peer.mempool.take().unwrap(); let rpc_args = RPCHandlerArgsType::make_default(); let mut node_state = StacksNodeState::new( diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index 496567b31e..874d385c5e 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -322,7 +322,7 @@ fn test_step_walk_1_neighbor_bad_epoch() { // peer 1 thinks its always epoch 2.0 peer_1_config.peer_version = 0x18000000; - peer_1_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_1_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: STACKS_EPOCH_MAX, @@ -332,7 +332,7 @@ fn test_step_walk_1_neighbor_bad_epoch() { // peer 2 thinks its always epoch 2.05 peer_2_config.peer_version = 0x18000005; - peer_2_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_2_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch2_05, start_height: 0, end_height: STACKS_EPOCH_MAX, @@ -1401,7 +1401,7 @@ fn test_step_walk_2_neighbors_rekey() { peer_2_config.connection_opts.disable_inv_sync = true; peer_2_config.connection_opts.disable_block_download = true; - let first_block_height = peer_1_config.current_block + 1; + let first_block_height = peer_1_config.chain_config.current_block + 1; // make keys expire soon peer_1_config.private_key_expire = first_block_height + 3; @@ -1500,13 +1500,13 @@ fn test_step_walk_2_neighbors_different_networks() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.network_id = peer_2_config.network_id + 1; + peer_1_config.chain_config.network_id = peer_2_config.chain_config.network_id + 1; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); let mut peer_1_neighbor = peer_1.to_neighbor(); - peer_1_neighbor.addr.network_id = peer_2.config.network_id; + peer_1_neighbor.addr.network_id = peer_2.config.chain_config.network_id; peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1_neighbor, None, true); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index c293346e42..64db7bd6d0 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -564,12 +564,11 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { peer_configs[2].add_neighbor(&peer_1); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -581,12 +580,12 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { // cycle, since pushing block/microblock // announcements in reward cycles the remote // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -607,10 +606,9 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -628,7 +626,7 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { // work, and for (Micro)BlocksAvailable messages to be accepted let peer_1_nk = peers[1].to_neighbor().addr; let peer_2_nk = peers[2].to_neighbor().addr; - let bc = peers[1].config.burnchain.clone(); + let bc = peers[1].config.chain_config.burnchain.clone(); match peers[2].network.inv_state { Some(ref mut inv_state) => { if inv_state.get_stats(&peer_1_nk).is_none() { @@ -656,12 +654,11 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { } } - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -860,12 +857,9 @@ fn push_block( dest ); - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); + let sn = SortitionDB::get_block_snapshot_consensus(peer.sortdb_ref().conn(), &consensus_hash) + .unwrap() + .unwrap(); let consensus_hash = sn.consensus_hash; let msg = StacksMessageType::Blocks(BlocksData { @@ -887,12 +881,9 @@ fn broadcast_block( block.block_hash(), ); - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); + let sn = SortitionDB::get_block_snapshot_consensus(peer.sortdb_ref().conn(), &consensus_hash) + .unwrap() + .unwrap(); let consensus_hash = sn.consensus_hash; let msg = StacksMessageType::Blocks(BlocksData { @@ -1091,12 +1082,11 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( } }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1104,12 +1094,12 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( // build up block data to replicate let mut block_data = vec![]; for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -1129,10 +1119,9 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -1203,8 +1192,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( if let Some((consensus_hash, block, microblocks)) = data_to_push { test_debug!( - "Push block {}/{} and microblocks", - &consensus_hash, + "Push block {consensus_hash}/{} and microblocks", block.block_hash() ); @@ -1399,12 +1387,11 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { let peer_1 = peer_configs[1].to_neighbor(); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1416,12 +1403,12 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { // cycle, since pushing block/microblock // announcements in reward cycles the remote // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -1442,10 +1429,9 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -1595,20 +1581,28 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let initial_balances = vec![ ( PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), + peer_configs[0] + .chain_config + .spending_account + .origin_address() + .unwrap(), ), 1000000, ), ( PrincipalData::from( - peer_configs[1].spending_account.origin_address().unwrap(), + peer_configs[1] + .chain_config + .spending_account + .origin_address() + .unwrap(), ), 1000000, ), ]; - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances; + peer_configs[0].chain_config.initial_balances = initial_balances.clone(); + peer_configs[1].chain_config.initial_balances = initial_balances; let peer_0 = peer_configs[0].to_neighbor(); let peer_1 = peer_configs[1].to_neighbor(); @@ -1617,12 +1611,11 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { peer_configs[1].add_neighbor(&peer_0); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1630,12 +1623,12 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { // build up block data to replicate let mut block_data = vec![]; for b in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -1659,10 +1652,9 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { } } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -1971,12 +1963,18 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { } let initial_balances = vec![( - PrincipalData::from(peer_configs[0].spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_configs[0] + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1000000, )]; for i in 0..peer_configs.len() { - peer_configs[i].initial_balances = initial_balances.clone(); + peer_configs[i].chain_config.initial_balances = initial_balances.clone(); } // connectivity @@ -1988,12 +1986,11 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { } }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -2001,12 +1998,12 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { // build up block data to replicate let mut block_data = vec![]; for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -2026,10 +2023,9 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), @@ -2300,12 +2296,11 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { peer_configs[1].add_neighbor(&peer_0); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -2313,12 +2308,12 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { // build up block data to replicate let mut block_data = vec![]; for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -2338,10 +2333,9 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -2355,10 +2349,8 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { for i in 1..peers.len() { peers[i].next_burnchain_block_raw(vec![]); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push((sn.consensus_hash.clone(), None, None)); block_data @@ -2432,12 +2424,11 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { peer_configs[1].add_neighbor(&peer_0); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -2445,10 +2436,9 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { // build up block data to replicate let mut block_data = vec![]; for block_num in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); let (_, burn_header_hash, consensus_hash) = @@ -2467,10 +2457,9 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { all_sortitions.push(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -2515,12 +2504,12 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { debug!( "Push at {}, need {}", tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height + - peers[1].config.chain_config.burnchain.first_block_height - 1, *pushed_i ); if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height + - peers[1].config.chain_config.burnchain.first_block_height - 1 == *pushed_i as u64 { @@ -2543,14 +2532,13 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { *pushed_i += 1; } debug!( - "Sortition at {}, need {}", + "Sortition at {}, need {i}", tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *i + - peers[1].config.chain_config.burnchain.first_block_height + - 1 ); if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height + - peers[1].config.chain_config.burnchain.first_block_height - 1 == *i as u64 { @@ -2569,7 +2557,7 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { for ((event_id, _neighbor_key), pending) in peers[1].network.pending_messages.iter() { - debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); + debug!("Pending at {i} is ({event_id}, {})", pending.len()); if !pending.is_empty() { update_sortition = true; } @@ -2732,8 +2720,8 @@ fn process_new_blocks_rejects_problematic_asts() { let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[ + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -2749,18 +2737,17 @@ fn process_new_blocks_rejects_problematic_asts() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); // activate new AST rules right away let mut peer = TestPeer::new(peer_config); - let sortdb = peer.sortdb.take().unwrap(); - peer.sortdb = Some(sortdb); + let sortdb = peer.chain.sortdb.take().unwrap(); + peer.chain.sortdb = Some(sortdb); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); sn.block_height }; @@ -2775,8 +2762,7 @@ fn process_new_blocks_rejects_problematic_asts() { bytes.len() as u64 }; - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); let mblock_privk = StacksPrivateKey::random(); @@ -2838,8 +2824,7 @@ fn process_new_blocks_rejects_problematic_asts() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); peer.process_stacks_epoch(&block, &consensus_hash, &[]); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( |ref mut miner, @@ -3087,12 +3072,12 @@ fn process_new_blocks_rejects_problematic_asts() { .confirmed_microblocks .push((new_consensus_hash.clone(), vec![bad_mblock], 234)); - let mut sortdb = peer.sortdb.take().unwrap(); + let mut sortdb = peer.chain.sortdb.take().unwrap(); let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = Relayer::process_new_blocks( &mut network_result, &mut sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, + &mut peer.chain.stacks_node.as_mut().unwrap().chainstate, None, ) .unwrap(); @@ -3107,7 +3092,7 @@ fn process_new_blocks_rejects_problematic_asts() { let txs_relayed = Relayer::process_transactions( &mut network_result, &sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, + &mut peer.chain.stacks_node.as_mut().unwrap().chainstate, peer.mempool.as_mut().unwrap(), None, ) @@ -3148,8 +3133,8 @@ fn test_block_pay_to_contract_gated_at_v210() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -3233,8 +3218,8 @@ fn test_block_pay_to_contract_gated_at_v210() { let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); match Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, @@ -3247,11 +3232,11 @@ fn test_block_pay_to_contract_gated_at_v210() { } Err(chainstate_error::InvalidStacksBlock(_)) => {} Err(e) => { - panic!("Got unexpected error {:?}", &e); + panic!("Got unexpected error {e:?}"); } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // *now* it should succeed, since tenure 28 was in epoch 2.1 @@ -3259,8 +3244,8 @@ fn test_block_pay_to_contract_gated_at_v210() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); match Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, @@ -3279,8 +3264,8 @@ fn test_block_pay_to_contract_gated_at_v210() { panic!("Got unexpected error {:?}", &e); } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } #[test] @@ -3288,7 +3273,13 @@ fn test_block_versioned_smart_contract_gated_at_v210() { let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_config + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1000000, )]; @@ -3323,9 +3314,9 @@ fn test_block_versioned_smart_contract_gated_at_v210() { }, ]); - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config.chain_config.initial_balances = initial_balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -3412,8 +3403,8 @@ fn test_block_versioned_smart_contract_gated_at_v210() { let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); match Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, @@ -3422,16 +3413,16 @@ fn test_block_versioned_smart_contract_gated_at_v210() { 123, ) { Ok(x) => { - eprintln!("{:?}", &stacks_block); + eprintln!("{stacks_block:?}"); panic!("Stored pay-to-contract stacks block before epoch 2.1"); } Err(chainstate_error::InvalidStacksBlock(_)) => {} Err(e) => { - panic!("Got unexpected error {:?}", &e); + panic!("Got unexpected error {e:?}"); } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // *now* it should succeed, since tenure 28 was in epoch 2.1 @@ -3439,28 +3430,23 @@ fn test_block_versioned_smart_contract_gated_at_v210() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); + let x = Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, &consensus_hash, &stacks_block, 123, - ) { - Ok(x) => { - assert_eq!( - x, - BlockAcceptResponse::Accepted, - "Failed to process valid versioned smart contract block" - ); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + ) + .unwrap_or_else(|e| panic!("Got unexpected error {e:?}")); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid versioned smart contract block" + ); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } #[test] @@ -3468,7 +3454,13 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_config + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1000000, )]; @@ -3503,9 +3495,9 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { }, ]); - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config.chain_config.initial_balances = initial_balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); let versioned_contract_opt: RefCell> = RefCell::new(None); @@ -3599,8 +3591,8 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); // the empty block should be accepted match Relayer::process_new_anchored_block( @@ -3623,7 +3615,7 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { }; // process it - peer.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); // the mempool would reject a versioned contract transaction, since we're not yet at // tenure 28 @@ -3648,16 +3640,16 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // *now* it should succeed, since tenure 28 was in epoch 2.1 let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); match Relayer::process_new_anchored_block( @@ -3680,7 +3672,7 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { }; // process it - peer.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); // the mempool would accept a versioned contract transaction, since we're not yet at // tenure 28 @@ -3696,8 +3688,8 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { panic!("will_admit_mempool_tx {:?}", &e); }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // TODO: process bans diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index f1c5fdf39f..0196406f6a 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -33,6 +33,7 @@ use crate::chainstate::nakamoto::NakamotoBlockHeader; use crate::chainstate::stacks::test::{make_codec_test_block, make_codec_test_microblock}; use crate::chainstate::stacks::tests::TestStacksNode; use crate::chainstate::stacks::*; +use crate::chainstate::tests::TestChainstate; use crate::core::*; use crate::net::relay::{AcceptedNakamotoBlocks, ProcessedNetReceipts, Relayer}; use crate::net::stackerdb::StackerDBs; @@ -60,13 +61,13 @@ impl ExitedPeer { Self { config: peer.config, network: peer.network, - sortdb: peer.sortdb, - miner: peer.miner, - stacks_node: peer.stacks_node, + sortdb: peer.chain.sortdb, + miner: peer.chain.miner, + stacks_node: peer.chain.stacks_node, relayer: peer.relayer, mempool: peer.mempool, - chainstate_path: peer.chainstate_path, - indexer: peer.indexer, + chainstate_path: peer.chain.chainstate_path, + indexer: peer.chain.indexer, } } @@ -91,7 +92,7 @@ impl ExitedPeer { ibd, 100, &RPCHandlerArgs::default(), - self.config.txindex, + self.config.chain_config.txindex, )?; let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), @@ -180,8 +181,8 @@ impl SeedNode { ) .unwrap(); - let mut test_signers = peer.config.test_signers.take().unwrap(); - let test_stackers = peer.config.test_stackers.take().unwrap(); + let mut test_signers = peer.config.chain_config.test_signers.take().unwrap(); + let test_stackers = peer.config.chain_config.test_stackers.take().unwrap(); let mut all_blocks: Vec = vec![]; @@ -208,9 +209,10 @@ impl SeedNode { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let num_blocks: usize = (thread_rng().gen::() % 10) + 1; @@ -264,8 +266,8 @@ impl SeedNode { // relay these blocks let local_peer = peer.network.get_local_peer().clone(); - let sortdb = peer.sortdb.take().unwrap(); - let stacks_node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let stacks_node = peer.chain.stacks_node.take().unwrap(); peer.relayer.relay_epoch3_blocks( &local_peer, @@ -276,8 +278,8 @@ impl SeedNode { }], ); - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(stacks_node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(stacks_node); // send the blocks to the unit test as well if comms @@ -291,11 +293,12 @@ impl SeedNode { // if we're starting a new reward cycle, then save the current one let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; if peer .config + .chain_config .burnchain .is_reward_cycle_start(tip.block_height) { @@ -305,8 +308,8 @@ impl SeedNode { all_blocks.append(&mut blocks); } - peer.config.test_signers = Some(test_signers); - peer.config.test_stackers = Some(test_stackers); + peer.config.chain_config.test_signers = Some(test_signers); + peer.config.chain_config.test_stackers = Some(test_stackers); let exited_peer = ExitedPeer::from_test_peer(peer); @@ -534,7 +537,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { let peer_nk = peer.to_neighbor().addr; let mut follower = followers.pop().unwrap(); - let test_path = TestPeer::make_test_path(&follower.config); + let test_path = TestChainstate::make_test_path(&follower.config.chain_config); let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); @@ -570,8 +573,8 @@ fn test_no_buffer_ready_nakamoto_blocks() { Some(SeedData::Blocks(blocks)) => { debug!("Follower got Nakamoto blocks {:?}", &blocks); - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let mut sortdb = follower.chain.sortdb.take().unwrap(); + let mut node = follower.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -728,8 +731,8 @@ fn test_no_buffer_ready_nakamoto_blocks() { )); } - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); } Some(SeedData::Exit(exited)) => { debug!("Follower got seed exit"); @@ -739,20 +742,24 @@ fn test_no_buffer_ready_nakamoto_blocks() { } } - follower.coord.handle_new_burnchain_block().unwrap(); - follower.coord.handle_new_stacks_block().unwrap(); - follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + follower.chain.coord.handle_new_burnchain_block().unwrap(); + follower.chain.coord.handle_new_stacks_block().unwrap(); + follower + .chain + .coord + .handle_new_nakamoto_stacks_block() + .unwrap(); } // compare chain tips - let sortdb = follower.sortdb.take().unwrap(); - let stacks_node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let stacks_node = follower.chain.stacks_node.take().unwrap(); let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let follower_stacks_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap(); - follower.stacks_node = Some(stacks_node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(stacks_node); + follower.chain.sortdb = Some(sortdb); let mut exited_peer = exited_peer.unwrap(); let sortdb = exited_peer.sortdb.take().unwrap(); @@ -785,7 +792,7 @@ fn test_buffer_nonready_nakamoto_blocks() { let peer_nk = peer.to_neighbor().addr; let mut follower = followers.pop().unwrap(); - let test_path = TestPeer::make_test_path(&follower.config); + let test_path = TestChainstate::make_test_path(&follower.config.chain_config); let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); @@ -850,8 +857,8 @@ fn test_buffer_nonready_nakamoto_blocks() { debug!("Follower got Nakamoto blocks {:?}", &blocks); all_blocks.push(blocks.clone()); - let sortdb = follower.sortdb.take().unwrap(); - let node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let node = follower.chain.stacks_node.take().unwrap(); // we will need to buffer this since the sortition for these blocks hasn't been // processed yet @@ -908,8 +915,8 @@ fn test_buffer_nonready_nakamoto_blocks() { true, ); - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); } Some(SeedData::Exit(exited)) => { debug!("Follower got seed exit"); @@ -928,8 +935,8 @@ fn test_buffer_nonready_nakamoto_blocks() { } // process the last buffered messages - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let mut sortdb = follower.chain.sortdb.take().unwrap(); + let mut node = follower.chain.stacks_node.take().unwrap(); if let Some(mut network_result) = network_result.take() { follower_relayer.process_new_epoch3_blocks( @@ -943,8 +950,8 @@ fn test_buffer_nonready_nakamoto_blocks() { ); } - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); network_result = follower .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) @@ -957,8 +964,8 @@ fn test_buffer_nonready_nakamoto_blocks() { } if let Some(mut network_result) = network_result.take() { - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let mut sortdb = follower.chain.sortdb.take().unwrap(); + let mut node = follower.chain.stacks_node.take().unwrap(); let num_processed = follower_relayer.process_new_epoch3_blocks( follower.network.get_local_peer(), &mut network_result, @@ -969,24 +976,28 @@ fn test_buffer_nonready_nakamoto_blocks() { None, ); info!("Processed {} unsolicited Nakamoto blocks", num_processed); - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); } - follower.coord.handle_new_burnchain_block().unwrap(); - follower.coord.handle_new_stacks_block().unwrap(); - follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + follower.chain.coord.handle_new_burnchain_block().unwrap(); + follower.chain.coord.handle_new_stacks_block().unwrap(); + follower + .chain + .coord + .handle_new_nakamoto_stacks_block() + .unwrap(); } // compare chain tips - let sortdb = follower.sortdb.take().unwrap(); - let stacks_node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let stacks_node = follower.chain.stacks_node.take().unwrap(); let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let follower_stacks_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap(); - follower.stacks_node = Some(stacks_node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(stacks_node); + follower.chain.sortdb = Some(sortdb); let mut exited_peer = exited_peer.unwrap(); let sortdb = exited_peer.sortdb.take().unwrap(); @@ -1024,7 +1035,7 @@ fn test_nakamoto_boot_node_from_block_push() { let peer_nk = peer.to_neighbor().addr; let mut follower = followers.pop().unwrap(); - let test_path = TestPeer::make_test_path(&follower.config); + let test_path = TestChainstate::make_test_path(&follower.config.chain_config); let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); @@ -1074,9 +1085,13 @@ fn test_nakamoto_boot_node_from_block_push() { } } - follower.coord.handle_new_burnchain_block().unwrap(); - follower.coord.handle_new_stacks_block().unwrap(); - follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + follower.chain.coord.handle_new_burnchain_block().unwrap(); + follower.chain.coord.handle_new_stacks_block().unwrap(); + follower + .chain + .coord + .handle_new_nakamoto_stacks_block() + .unwrap(); } // recover exited peer and get its chain tips @@ -1100,8 +1115,8 @@ fn test_nakamoto_boot_node_from_block_push() { .unwrap(); // compare chain tips - let sortdb = follower.sortdb.take().unwrap(); - let stacks_node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let stacks_node = follower.chain.stacks_node.take().unwrap(); let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let follower_stacks_tip = NakamotoChainState::get_canonical_block_header( @@ -1109,8 +1124,8 @@ fn test_nakamoto_boot_node_from_block_push() { &sortdb, ) .unwrap(); - follower.stacks_node = Some(stacks_node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(stacks_node); + follower.chain.sortdb = Some(sortdb); debug!("{}: Follower sortition tip: {:?}", i, &follower_burn_tip); debug!("{}: Seed sortition tip: {:?}", i, &exited_peer_burn_tip); diff --git a/versions.toml b/versions.toml index 9a0ae15341..21949185d8 100644 --- a/versions.toml +++ b/versions.toml @@ -1,4 +1,4 @@ # Update these values when a new release is created. # `stacks-common/build.rs` will automatically update `versions.rs` with these values. -stacks_node_version = "3.2.0.0.1" -stacks_signer_version = "3.2.0.0.1.1" +stacks_node_version = "3.2.0.0.2" +stacks_signer_version = "3.2.0.0.2.0" From 61f9a62c9019acb3bd92929e01095f6549db6a6c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 7 Oct 2025 13:19:06 -0400 Subject: [PATCH 79/86] chore: update `block-time` --> `stacks-block-time` This is both more clear and more consistent, since it matches `stacks-block-height`. --- CHANGELOG.md | 6 ++++++ .../src/vm/analysis/arithmetic_checker/mod.rs | 2 +- .../src/vm/analysis/type_checker/v2_05/mod.rs | 4 ++-- .../src/vm/analysis/type_checker/v2_1/mod.rs | 2 +- clarity/src/vm/docs/mod.rs | 8 +++---- clarity/src/vm/tests/variables.rs | 16 +++++++------- clarity/src/vm/variables.rs | 4 ++-- .../src/tests/nakamoto_integrations.rs | 21 +++++++++++-------- 8 files changed, 37 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72de8c0556..619a4571ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## Unreleased + +### Changed + +- Renamed Clarity 4's new `block-time` to `stacks-block-time` + ## [3.2.0.0.2] ### Added diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index 83be68ab23..cfacd5b9ee 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -143,7 +143,7 @@ impl ArithmeticOnlyChecker<'_> { match native_var { ContractCaller | TxSender | TotalLiquidMicroSTX | BlockHeight | BurnBlockHeight | Regtest | TxSponsor | Mainnet | ChainId | StacksBlockHeight | TenureHeight - | BlockTime | CurrentContract => Err(Error::VariableForbidden(native_var)), + | StacksBlockTime | CurrentContract => Err(Error::VariableForbidden(native_var)), NativeNone | NativeTrue | NativeFalse => Ok(()), } } else { diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index 86a10c264d..12686fea8b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -330,9 +330,9 @@ fn type_reserved_variable(variable_name: &str) -> Result, NativeFalse => TypeSignature::BoolType, TotalLiquidMicroSTX => TypeSignature::UIntType, Regtest => TypeSignature::BoolType, - TxSponsor | Mainnet | ChainId | StacksBlockHeight | TenureHeight | BlockTime | CurrentContract => { + TxSponsor | Mainnet | ChainId | StacksBlockHeight | TenureHeight | StacksBlockTime | CurrentContract => { return Err(CheckErrors::Expects( - "tx-sponsor, mainnet, chain-id, stacks-block-height, tenure-height, block-time, and current-contract should not reach here in 2.05".into(), + "tx-sponsor, mainnet, chain-id, stacks-block-height, tenure-height, stacks-block-time, and current-contract should not reach here in 2.05".into(), ) .into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 1fa7b19ed3..29f0a8e715 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -1023,7 +1023,7 @@ fn type_reserved_variable( Mainnet => TypeSignature::BoolType, ChainId => TypeSignature::UIntType, CurrentContract => TypeSignature::PrincipalType, - BlockTime => TypeSignature::UIntType, + StacksBlockTime => TypeSignature::UIntType, }; Ok(Some(var_type)) } else { diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 148247e59a..8ab926834a 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -154,13 +154,13 @@ At the start of epoch 3.0, `tenure-height` will return the same value as `block- }; const BLOCK_TIME_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { - name: "block-time", - snippet: "block-time", + name: "stacks-block-time", + snippet: "stacks-block-time", output_type: "uint", description: "Returns the Unix timestamp (in seconds) of the current Stacks block. Introduced in Clarity 4. Provides access to the timestamp of the current block, which is not available with `get-stacks-block-info?`.", - example: "(>= block-time u1755820800) ;; returns true if current block timestamp is at or after 2025-07-22.", + example: "(>= stacks-block-time u1755820800) ;; returns true if current block timestamp is at or after 2025-07-22.", }; const TX_SENDER_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { @@ -2700,7 +2700,7 @@ pub fn make_keyword_reference(variable: &NativeVariables) -> Option NativeVariables::ChainId => CHAINID_KEYWORD.clone(), NativeVariables::TxSponsor => TX_SPONSOR_KEYWORD.clone(), NativeVariables::CurrentContract => CURRENT_CONTRACT_KEYWORD.clone(), - NativeVariables::BlockTime => BLOCK_TIME_KEYWORD.clone(), + NativeVariables::StacksBlockTime => BLOCK_TIME_KEYWORD.clone(), }; Some(KeywordAPI { name: keyword.name, diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index 7ce5095846..852533a001 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -1110,7 +1110,7 @@ fn test_block_time( epoch: StacksEpochId, mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, ) { - let contract = "(define-read-only (test-func) block-time)"; + let contract = "(define-read-only (test-func) stacks-block-time)"; let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); @@ -1125,11 +1125,11 @@ fn test_block_time( type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) }); - // block-time should only be available in Clarity 4 + // stacks-block-time should only be available in Clarity 4 if version < ClarityVersion::Clarity4 { let err = analysis.unwrap_err(); assert_eq!( - CheckErrors::UndefinedVariable("block-time".to_string()), + CheckErrors::UndefinedVariable("stacks-block-time".to_string()), *err.err ); } else { @@ -1156,7 +1156,9 @@ fn test_block_time( if version < ClarityVersion::Clarity4 { let err = eval_result.unwrap_err(); assert_eq!( - Error::Unchecked(CheckErrors::UndefinedVariable("block-time".to_string(),)), + Error::Unchecked(CheckErrors::UndefinedVariable( + "stacks-block-time".to_string(), + )), err ); } else { @@ -1173,11 +1175,11 @@ fn test_block_time_in_expressions() { let contract = r#" (define-read-only (time-comparison (threshold uint)) - (>= block-time threshold)) + (>= stacks-block-time threshold)) (define-read-only (time-arithmetic) - (+ block-time u100)) + (+ stacks-block-time u100)) (define-read-only (time-in-response) - (ok block-time)) + (ok stacks-block-time)) "#; let placeholder_context = diff --git a/clarity/src/vm/variables.rs b/clarity/src/vm/variables.rs index cb84336dc9..d84be69215 100644 --- a/clarity/src/vm/variables.rs +++ b/clarity/src/vm/variables.rs @@ -40,7 +40,7 @@ define_versioned_named_enum_with_max!(NativeVariables(ClarityVersion) { ChainId("chain-id", ClarityVersion::Clarity2, None), StacksBlockHeight("stacks-block-height", ClarityVersion::Clarity3, None), TenureHeight("tenure-height", ClarityVersion::Clarity3, None), - BlockTime("block-time", ClarityVersion::Clarity4, None), + StacksBlockTime("stacks-block-time", ClarityVersion::Clarity4, None), CurrentContract("current-contract", ClarityVersion::Clarity4, None) }); @@ -140,7 +140,7 @@ pub fn lookup_reserved_variable( let contract = env.contract_context.contract_identifier.clone(); Ok(Some(Value::Principal(PrincipalData::Contract(contract)))) } - NativeVariables::BlockTime => { + NativeVariables::StacksBlockTime => { runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; let block_time = env.global_context.database.get_current_block_time()?; Ok(Some(Value::UInt(u128::from(block_time)))) diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index 9fe72b8887..ed27609f1c 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -15046,7 +15046,7 @@ fn contract_limit_percentage_mempool_strategy_low_limit() { #[test] #[ignore] -/// Verify the block timestamp using `block-time`. +/// Verify the block timestamp using `stacks-block-time`. fn check_block_time_keyword() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -15153,25 +15153,25 @@ fn check_block_time_keyword() { let mut sender_nonce = 0; let contract_name = "test-contract"; let contract = r#" -(define-constant deploy-time block-time) +(define-constant deploy-time stacks-block-time) (define-constant deploy-height stacks-block-height) (define-read-only (get-current-time) - block-time + stacks-block-time ) (define-read-only (get-ihh (height uint)) (get-stacks-block-info? id-header-hash height)) (define-read-only (get-time (height uint)) (get-stacks-block-info? time height)) (define-read-only (get-height) stacks-block-height) (define-read-only (get-previous-time (height uint)) (ok (at-block (unwrap! (get-stacks-block-info? id-header-hash height) (err u100)) - block-time + stacks-block-time )) ) (define-public (get-current-time-call) - (ok block-time) + (ok stacks-block-time) ) (define-public (get-previous-time-call (height uint)) (ok (at-block (unwrap! (get-stacks-block-info? id-header-hash height) (err u100)) - block-time + stacks-block-time )) ) "#; @@ -15217,7 +15217,7 @@ fn check_block_time_keyword() { let current_time = current_time_value.expect_u128().unwrap(); assert!( current_time > deploy_time, - "block-time should be greater than the time at deployment" + "stacks-block-time should be greater than the time at deployment" ); let previous_time_result = call_read_only( @@ -15288,13 +15288,16 @@ fn check_block_time_keyword() { match contract_call.function_name.as_str() { "get-current-time-call" => { info!("Current time: {}", time); - assert!(time > current_time, "block-time should have advanced"); + assert!( + time > current_time, + "stacks-block-time should have advanced" + ); } "get-previous-time-call" => { info!("Previous time: {}", time); assert_eq!( time, deploy_time, - "block-time should be the same as at deployment" + "stacks-block-time should be the same as at deployment" ); } _ => panic!("Unexpected contract call"), From 90e6d8b78a052d1a51d2c5c48ab7f98accded141 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 7 Oct 2025 10:33:32 -0700 Subject: [PATCH 80/86] use larger runner for release builds --- .github/workflows/github-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index d3f20ba763..ccff1185fd 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -62,7 +62,7 @@ jobs: inputs.node_tag != '' || inputs.signer_tag != '' name: Build Binaries - runs-on: ubuntu-latest + runs-on: ubuntu-latest-m needs: - andon-cord permissions: From b1f0ccce2765d99cb23ea1e7d43e2500286f3dba Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 7 Oct 2025 11:29:46 -0700 Subject: [PATCH 81/86] update dockerfiles for release --- .github/actions/dockerfiles/Dockerfile.alpine-binary | 2 +- .github/actions/dockerfiles/Dockerfile.debian-binary | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/dockerfiles/Dockerfile.alpine-binary b/.github/actions/dockerfiles/Dockerfile.alpine-binary index 310958faf0..691d39ee3d 100644 --- a/.github/actions/dockerfiles/Dockerfile.alpine-binary +++ b/.github/actions/dockerfiles/Dockerfile.alpine-binary @@ -31,7 +31,7 @@ RUN case "${TAG}" in \ ;; \ *) \ echo "/bin/stacks-node mainnet" > /tmp/command.sh && \ - rm /bin/blockstack-cli /bin/clarity-cli /bin/stacks-events /bin/stacks-inspect \ + rm /bin/stacks-cli /bin/clarity-cli /bin/stacks-inspect \ ;; \ esac && \ chmod +x /tmp/command.sh diff --git a/.github/actions/dockerfiles/Dockerfile.debian-binary b/.github/actions/dockerfiles/Dockerfile.debian-binary index 135cb0319d..83687722d3 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-binary +++ b/.github/actions/dockerfiles/Dockerfile.debian-binary @@ -31,7 +31,7 @@ RUN case "${TAG}" in \ ;; \ *) \ echo "/bin/stacks-node mainnet" > /tmp/command.sh && \ - rm /bin/blockstack-cli /bin/clarity-cli /bin/stacks-events /bin/stacks-inspect \ + rm /bin/stacks-cli /bin/clarity-cli /bin/stacks-inspect \ ;; \ esac && \ chmod +x /tmp/command.sh From ef3aa827a32fce64df202d37cd0cf614b18dc222 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 8 Oct 2025 12:08:06 +0200 Subject: [PATCH 82/86] chore: address test compile issue on windows requiring specify generic argument --- stacks-signer/src/client/mod.rs | 2 +- .../src/chainstate/nakamoto/test_signers.rs | 18 +++++++---- .../src/chainstate/nakamoto/tests/mod.rs | 30 +++++++++++++++---- 3 files changed, 37 insertions(+), 13 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index e518ff4a7b..11014a4a6c 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -224,7 +224,7 @@ pub(crate) mod tests { pub fn generate_random_consensus_hash() -> ConsensusHash { let rng = rand::thread_rng(); - let bytes: Vec = rng.sample_iter(Standard).take(20).collect(); + let bytes: Vec = rng.sample_iter::(Standard).take(20).collect(); let mut hash = [0u8; 20]; hash.copy_from_slice(&bytes); ConsensusHash(hash) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 1aab6b4467..c7a6cb72f8 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -44,8 +44,10 @@ pub struct TestSigners { impl Default for TestSigners { fn default() -> Self { - let aggregate_public_key: Vec = - rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_public_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let num_signers = 5; let threshold = 5 * 7 / 10; @@ -70,8 +72,10 @@ impl TestSigners { /// Internal function to generate aggregate key information fn default_with_signers(signer_keys: Vec) -> Self { - let aggregate_public_key: Vec = - rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_public_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let num_signers = signer_keys.len(); let threshold = u32::try_from(num_signers * 7 / 10).unwrap(); Self { @@ -227,8 +231,10 @@ impl TestSigners { return self.aggregate_public_key.clone(); } - let aggregate_public_key: Vec = - rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_public_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); self.aggregate_public_key.clone_from(&aggregate_public_key); aggregate_public_key } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index d734623333..6e59899a54 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2270,7 +2270,10 @@ fn parse_vote_for_aggregate_public_key_valid() { let signer_index = thread_rng().next_u64(); let signer_index_arg = Value::UInt(signer_index as u128); - let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let aggregate_key_arg = Value::buff_from(aggregate_key.clone()).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2316,7 +2319,10 @@ fn parse_vote_for_aggregate_public_key_invalid() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2498,7 +2504,10 @@ fn valid_vote_transaction() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2548,7 +2557,10 @@ fn valid_vote_transaction_malformed_transactions() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2782,7 +2794,10 @@ fn filter_one_transaction_per_signer_multiple_addresses() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2910,7 +2925,10 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key: Vec = rand::thread_rng() + .sample_iter::(Standard) + .take(33) + .collect(); let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); From 12521f01fbaba315b0d805d147c6c559e9f771c1 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 8 Oct 2025 12:30:55 +0200 Subject: [PATCH 83/86] refactor: aac update test marf computation --- stackslib/src/chainstate/tests/consensus.rs | 86 ++++++++++----------- 1 file changed, 39 insertions(+), 47 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 3647caf64d..850f6891db 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -38,7 +38,7 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt}; +use crate::chainstate::stacks::db::{ClarityTx, StacksChainState, StacksEpochReceipt}; use crate::chainstate::stacks::{ Error as ChainstateError, StacksTransaction, TenureChangeCause, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, @@ -209,8 +209,6 @@ impl From> for ExpectedResult { /// Represents a block to be appended in a test and its expected result. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct TestBlock { - /// Hex representation of the MARF hash for block construction. - pub marf_hash: String, /// Transactions to include in the block pub transactions: Vec, } @@ -430,13 +428,17 @@ impl ConsensusTest<'_> { }; block.header.tx_merkle_root = tx_merkle_root; - // Set the MARF root hash: compute it for success cases, - // or use an all-zero hash for failure cases. - block.header.state_index_root = if test_block.is_success() { - self.compute_block_marf_root_hash(block.header.timestamp, &block.txs) - } else { - TrieHash::from_bytes(&[0; 32]).unwrap() + // Set the MARF root hash or use an all-zero hash in case of failure. + // NOTE: It is expected to fail when trying computing the marf for invalid block/transactions. + /* + let marf_result = self.compute_block_marf_root_hash(block.header.timestamp, &block.txs); + block.header.state_index_root = match marf_result { + Ok(marf) => marf, + Err(_) => TrieHash::from_bytes(&[0; 32]).unwrap(), }; + */ + + block.header.state_index_root = TrieHash::from_bytes(&[0; 32]).unwrap(); self.chain.miner.sign_nakamoto_block(&mut block); let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); @@ -457,7 +459,7 @@ impl ConsensusTest<'_> { &mut self, block_time: u64, block_txs: &Vec, - ) -> TrieHash { + ) -> Result { let node = self.chain.stacks_node.as_mut().unwrap(); let sortdb = self.chain.sortdb.as_ref().unwrap(); let burndb_conn = sortdb.index_handle_at_tip(); @@ -479,7 +481,26 @@ impl ConsensusTest<'_> { &MINER_BLOCK_CONSENSUS_HASH, &MINER_BLOCK_HEADER_HASH, ); + let result = Self::inner_compute_block_marf_root_hash( + &mut clarity_tx, + block_time, + block_txs, + chain_tip.burn_header_height, + ); + clarity_tx.rollback_block(); + return result; + } + /// This is where the real MARF computation happens. + /// It is extrapolated into an _inner_ method to simplify rollback handling, + /// ensuring that rollback can be applied consistently on both success and failure + /// in the _outer_ method. + fn inner_compute_block_marf_root_hash( + clarity_tx: &mut ClarityTx, + block_time: u64, + block_txs: &Vec, + burn_header_height: u32, + ) -> Result { clarity_tx .connection() .as_free_transaction(|clarity_tx_conn| { @@ -488,22 +509,17 @@ impl ConsensusTest<'_> { Ok(()) }) }) - .expect("MARF: Failure on block metadata setup!"); + .map_err(|e| e.to_string())?; - StacksChainState::process_block_transactions(&mut clarity_tx, block_txs, 0) - .expect("MARF: Failure on processing block transactions!"); + StacksChainState::process_block_transactions(clarity_tx, block_txs, 0) + .map_err(|e| e.to_string())?; - NakamotoChainState::finish_block( - &mut clarity_tx, - None, - false, - chain_tip.burn_header_height, - ) - .expect("MARF: Failure on finishing block!"); + NakamotoChainState::finish_block(clarity_tx, None, false, burn_header_height) + .map_err(|e| e.to_string())?; let trie_hash = clarity_tx.seal(); - clarity_tx.rollback_block(); - return trie_hash; + //clarity_tx.rollback_block(); + Ok(trie_hash) } } @@ -513,28 +529,24 @@ fn test_append_empty_blocks() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), transactions: vec![], }], ); @@ -547,34 +559,30 @@ fn test_append_empty_blocks() { insta::assert_ron_snapshot!(result); } -#[test] +//#[test] fn test_append_state_index_root_mismatches() { let mut epoch_blocks = HashMap::new(); epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![], }], ); @@ -620,28 +628,24 @@ fn test_append_stx_transfers_success() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), transactions: transactions.clone(), }], ); epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), transactions: transactions.clone(), }], ); epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), transactions: transactions.clone(), }], ); epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), transactions: transactions.clone(), }], ); @@ -678,28 +682,24 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], }], ); epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], }], ); epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], }], ); epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), transactions: vec![tx.clone()], }], ); @@ -732,28 +732,24 @@ fn test_append_block_with_contract_upload_success() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "b45acd35f4c48a834a2f898ca8bb6c48416ac6bec9d8a3f3662b61ab97b1edde".into(), transactions: vec![tx.clone()], }], ); epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "521d75234ec6c64f68648b6b0f6f385d89b58efb581211a411e0e88aa71f3371".into(), transactions: vec![tx.clone()], }], ); epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "511e1cc37e83ef3de4ea56962574d6ddd2d8840d24d9238f19eee5a35127df6a".into(), transactions: vec![tx.clone()], }], ); epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "3520c2dd96f7d91e179c4dcd00f3c49c16d6ec21434fb16921922558282eab26".into(), transactions: vec![tx.clone()], }], ); @@ -829,7 +825,6 @@ fn test_append_block_with_contract_call_success() { epoch_blocks.insert( StacksEpochId::Epoch30, vec![TestBlock { - marf_hash: "186c8e49bcfc59bb67ed22f031f009a44681f296392e0f92bed520918ba463ae".into(), transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], }], ); @@ -837,7 +832,6 @@ fn test_append_block_with_contract_call_success() { epoch_blocks.insert( StacksEpochId::Epoch31, vec![TestBlock { - marf_hash: "ad23713f072473cad6a32125ed5fa822bb62bbfae8ed2302209c12d2f1958128".into(), transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], }], ); @@ -845,7 +839,6 @@ fn test_append_block_with_contract_call_success() { epoch_blocks.insert( StacksEpochId::Epoch32, vec![TestBlock { - marf_hash: "021bd30b09b5ac6ff34abd11f05244a966af937b584b1752f272cd717bb25f1d".into(), transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], }], ); @@ -853,7 +846,6 @@ fn test_append_block_with_contract_call_success() { epoch_blocks.insert( StacksEpochId::Epoch33, vec![TestBlock { - marf_hash: "416e728daeec4de695c89d15eede8ddb7b85fb4af82daffb1e0d8166a3e93451".into(), transactions: vec![tx_contract_deploy, tx_contract_call], }], ); From 111595a9e2f9d2df65e9a4733975d381f97dc956 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 8 Oct 2025 15:03:08 +0200 Subject: [PATCH 84/86] feat: add marf_hash to ExpectedBlockOutput --- stackslib/src/chainstate/tests/consensus.rs | 210 ++++++++++-------- ...pend_block_with_contract_call_success.snap | 185 ++++++++++++--- ...tests__consensus__append_empty_blocks.snap | 4 + ...s__append_state_index_root_mismatches.snap | 10 - ...nsensus__append_stx_transfers_success.snap | 4 + 5 files changed, 279 insertions(+), 134 deletions(-) delete mode 100644 stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 850f6891db..f224f72646 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -166,6 +166,8 @@ pub struct ExpectedTransactionOutput { /// Represents the expected outputs for a block's execution. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ExpectedBlockOutput { + /// The expected block marf + pub marf_hash: TrieHash, /// The expected outputs for each transaction, in input order. pub transactions: Vec, /// The total execution cost of the block. @@ -183,8 +185,11 @@ pub enum ExpectedResult { Failure(String), } -impl From> for ExpectedResult { - fn from(result: Result) -> Self { +impl ExpectedResult { + fn create_from( + result: Result, + marf_hash: TrieHash, + ) -> Self { match result { Ok(epoch_receipt) => { let transactions: Vec = epoch_receipt @@ -197,6 +202,7 @@ impl From> for ExpectedResult { .collect(); let total_block_cost = epoch_receipt.anchored_block_cost.clone(); ExpectedResult::Success(ExpectedBlockOutput { + marf_hash, transactions, total_block_cost, }) @@ -367,14 +373,12 @@ impl ConsensusTest<'_> { ); debug!("--------- Appended block: {} ---------", result.is_ok()); - results.push( - result - .map(|(receipt, clarity_commit, _, _)| { - clarity_commit.commit(); - receipt - }) - .into(), - ); + let remapped_result = result.map(|(receipt, clarity_commit, _, _)| { + clarity_commit.commit(); + receipt + }); + let expected_marf = nakamoto_block.header.state_index_root; + results.push(ExpectedResult::create_from(remapped_result, expected_marf)); chainstate_tx.commit().unwrap(); } @@ -430,15 +434,11 @@ impl ConsensusTest<'_> { // Set the MARF root hash or use an all-zero hash in case of failure. // NOTE: It is expected to fail when trying computing the marf for invalid block/transactions. - /* let marf_result = self.compute_block_marf_root_hash(block.header.timestamp, &block.txs); block.header.state_index_root = match marf_result { Ok(marf) => marf, Err(_) => TrieHash::from_bytes(&[0; 32]).unwrap(), }; - */ - - block.header.state_index_root = TrieHash::from_bytes(&[0; 32]).unwrap(); self.chain.miner.sign_nakamoto_block(&mut block); let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); @@ -517,9 +517,7 @@ impl ConsensusTest<'_> { NakamotoChainState::finish_block(clarity_tx, None, false, burn_header_height) .map_err(|e| e.to_string())?; - let trie_hash = clarity_tx.seal(); - //clarity_tx.rollback_block(); - Ok(trie_hash) + Ok(clarity_tx.seal()) } } @@ -559,42 +557,6 @@ fn test_append_empty_blocks() { insta::assert_ron_snapshot!(result); } -//#[test] -fn test_append_state_index_root_mismatches() { - let mut epoch_blocks = HashMap::new(); - epoch_blocks.insert( - StacksEpochId::Epoch30, - vec![TestBlock { - transactions: vec![], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch31, - vec![TestBlock { - transactions: vec![], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch32, - vec![TestBlock { - transactions: vec![], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch33, - vec![TestBlock { - transactions: vec![], - }], - ); - - let test_vector = ConsensusTestVector { - initial_balances: vec![], - epoch_blocks, - }; - let result = ConsensusTest::new(function_name!(), test_vector).run(); - insta::assert_ron_snapshot!(result); -} - #[test] fn test_append_stx_transfers_success() { let sender_privks = [ @@ -759,38 +721,110 @@ fn test_append_block_with_contract_upload_success() { }; let result = ConsensusTest::new(function_name!(), test_vector).run(); - // Example of expecting the same result across all blocks - insta::allow_duplicates! { - for res in result { - // Example of inline snapshot - insta::assert_ron_snapshot!(res, @r" - Success(ExpectedBlockOutput( - transactions: [ - ExpectedTransactionOutput( - return_type: Response(ResponseData( - committed: true, - data: Bool(true), - )), - cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - ), - ], - total_block_cost: ExecutionCost( - write_length: 13, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 8114, - ), - )) - "); - } - } + insta::assert_ron_snapshot!(result, @r#" + [ + Success(ExpectedBlockOutput( + marf_hash: "b45acd35f4c48a834a2f898ca8bb6c48416ac6bec9d8a3f3662b61ab97b1edde", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "521d75234ec6c64f68648b6b0f6f385d89b58efb581211a411e0e88aa71f3371", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "511e1cc37e83ef3de4ea56962574d6ddd2d8840d24d9238f19eee5a35127df6a", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "3520c2dd96f7d91e179c4dcd00f3c49c16d6ec21434fb16921922558282eab26", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 13, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 8114, + ), + )), + ] + "#); } #[test] @@ -856,9 +890,5 @@ fn test_append_block_with_contract_call_success() { }; let result = ConsensusTest::new(function_name!(), test_vector).run(); - insta::allow_duplicates! { - for res in result { - insta::assert_ron_snapshot!(res); - } - } + insta::assert_ron_snapshot!(result); } diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap index f50380ee57..8aec4b5474 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap @@ -1,41 +1,158 @@ --- source: stackslib/src/chainstate/tests/consensus.rs -expression: res +expression: result --- -Success(ExpectedBlockOutput( - transactions: [ - ExpectedTransactionOutput( - return_type: Response(ResponseData( - committed: true, - data: Bool(true), - )), - cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, +[ + Success(ExpectedBlockOutput( + marf_hash: "186c8e49bcfc59bb67ed22f031f009a44681f296392e0f92bed520918ba463ae", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), ), + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 104, + read_count: 4, + runtime: 12467, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "ad23713f072473cad6a32125ed5fa822bb62bbfae8ed2302209c12d2f1958128", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 104, + read_count: 4, + runtime: 12467, ), - ExpectedTransactionOutput( - return_type: Response(ResponseData( - committed: true, - data: UInt(1), - )), - cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 103, - read_count: 3, - runtime: 499, + )), + Success(ExpectedBlockOutput( + marf_hash: "021bd30b09b5ac6ff34abd11f05244a966af937b584b1752f272cd717bb25f1d", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 104, + read_count: 4, + runtime: 12467, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "416e728daeec4de695c89d15eede8ddb7b85fb4af82daffb1e0d8166a3e93451", + transactions: [ + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ExpectedTransactionOutput( + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 104, + read_count: 4, + runtime: 12467, ), - ], - total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 104, - read_count: 4, - runtime: 12467, - ), -)) + )), +] diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap index 017c5a91da..a1f13d92b8 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap @@ -4,6 +4,7 @@ expression: result --- [ Success(ExpectedBlockOutput( + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5", transactions: [], total_block_cost: ExecutionCost( write_length: 0, @@ -14,6 +15,7 @@ expression: result ), )), Success(ExpectedBlockOutput( + marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf", transactions: [], total_block_cost: ExecutionCost( write_length: 0, @@ -24,6 +26,7 @@ expression: result ), )), Success(ExpectedBlockOutput( + marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f", transactions: [], total_block_cost: ExecutionCost( write_length: 0, @@ -34,6 +37,7 @@ expression: result ), )), Success(ExpectedBlockOutput( + marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78", transactions: [], total_block_cost: ExecutionCost( write_length: 0, diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap deleted file mode 100644 index 1d73839536..0000000000 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_state_index_root_mismatches.snap +++ /dev/null @@ -1,10 +0,0 @@ ---- -source: stackslib/src/chainstate/tests/consensus.rs -expression: result ---- -[ - Failure("Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5"), - Failure("Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf"), - Failure("Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f"), - Failure("Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78"), -] diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap index dd47089fbb..c4be6d8a74 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap @@ -4,6 +4,7 @@ expression: result --- [ Success(ExpectedBlockOutput( + marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -54,6 +55,7 @@ expression: result ), )), Success(ExpectedBlockOutput( + marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -104,6 +106,7 @@ expression: result ), )), Success(ExpectedBlockOutput( + marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -154,6 +157,7 @@ expression: result ), )), Success(ExpectedBlockOutput( + marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( From 67ce9302a567976ff8d0e89a6226966955a2f2fa Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 9 Oct 2025 10:39:21 -0400 Subject: [PATCH 85/86] Pass error from validate_normal_nakamoto_block_burnchain in accept_block up Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/mod.rs | 60 +++++++++++------------- 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 9d3bded117..0d6bc89ba5 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2478,28 +2478,23 @@ impl NakamotoChainState { reward_set: &RewardSet, obtain_method: NakamotoBlockObtainMethod, ) -> Result { - test_debug!("Consider Nakamoto block {}", &block.block_id()); + let block_id = block.block_id(); + test_debug!("Consider Nakamoto block {block_id}"); // do nothing if we already have this block - if Self::get_block_header(headers_conn, &block.header.block_id())?.is_some() { - debug!("Already have block {}", &block.header.block_id()); + if Self::get_block_header(headers_conn, &block_id)?.is_some() { + debug!("Already have block {block_id}"); return Ok(false); } // if this is the first tenure block, then make sure it's well-formed block.is_wellformed_tenure_start_block().map_err(|_| { - warn!( - "Block {} is not a well-formed first tenure block", - &block.block_id() - ); + warn!("Block {block_id} is not a well-formed first tenure block"); ChainstateError::InvalidStacksBlock("Not a well-formed first-tenure block".into()) })?; // if this is a tenure-extend block, then make sure it's well-formed block.is_wellformed_tenure_extend_block().map_err(|_| { - warn!( - "Block {} is not a well-formed tenure-extend block", - &block.block_id() - ); + warn!("Block {block_id} is not a well-formed tenure-extend block"); ChainstateError::InvalidStacksBlock("Not a well-formed tenure-extend block".into()) })?; @@ -2510,51 +2505,50 @@ impl NakamotoChainState { if block.is_shadow_block() { // this block is already present in the staging DB, so just perform some prefunctory // validation (since they're constructed a priori to be valid) - if let Err(e) = Self::validate_shadow_nakamoto_block_burnchain( + Self::validate_shadow_nakamoto_block_burnchain( staging_db_tx.conn(), db_handle, expected_burn_opt, block, config.mainnet, config.chain_id, - ) { + ) + .unwrap_or_else(|e| { error!("Unacceptable shadow Nakamoto block"; - "stacks_block_id" => %block.block_id(), - "error" => ?e + "stacks_block_id" => %block_id, + "error" => ?e ); panic!("Unacceptable shadow Nakamoto block"); - } - + }); return Ok(false); } // this block must be consistent with its miner's leader-key and block-commit, and must // contain only transactions that are valid in this epoch. - if let Err(e) = Self::validate_normal_nakamoto_block_burnchain( + Self::validate_normal_nakamoto_block_burnchain( staging_db_tx.conn(), db_handle, expected_burn_opt, block, config.mainnet, config.chain_id, - ) { + ) + .inspect_err(|e| { warn!("Unacceptable Nakamoto block; will not store"; - "stacks_block_id" => %block.block_id(), - "error" => ?e + "stacks_block_id" => %block_id, + "error" => ?e ); - return Ok(false); - }; + })?; - let signing_weight = match block.header.verify_signer_signatures(reward_set) { - Ok(x) => x, - Err(e) => { + let signing_weight = block + .header + .verify_signer_signatures(reward_set) + .inspect_err(|e| { warn!("Received block, but the signer signatures are invalid"; - "block_id" => %block.block_id(), - "error" => ?e, + "block_id" => %block_id, + "error" => ?e, ); - return Err(e); - } - }; + })?; // if we pass all the tests, then along the way, we will have verified (in // Self::validate_nakamoto_block_burnchain) that the consensus hash of this block is on the @@ -2569,9 +2563,9 @@ impl NakamotoChainState { obtain_method, )?; if ret { - test_debug!("Stored Nakamoto block {}", &block.block_id()); + test_debug!("Stored Nakamoto block {block_id}"); } else { - test_debug!("Did NOT store Nakamoto block {}", &block.block_id()); + test_debug!("Did NOT store Nakamoto block {block_id}"); } Ok(ret) } From e7340e814edae0ccdb4e889cf2d27cffa468ca52 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 9 Oct 2025 13:57:34 -0400 Subject: [PATCH 86/86] Implement process_pushed_next_ready_block to enable calling process_next_nakamoto_block Signed-off-by: Jacinta Ferrant --- .../src/chainstate/nakamoto/tests/node.rs | 99 +++++ stackslib/src/chainstate/tests/consensus.rs | 380 +++++++----------- ...pend_block_with_contract_call_success.snap | 8 +- ...tests__consensus__append_empty_blocks.snap | 48 +-- ...nsensus__append_stx_transfers_success.snap | 8 +- 5 files changed, 261 insertions(+), 282 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index e7a6135d18..794def1106 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1060,6 +1060,105 @@ impl TestStacksNode { let cost = builder.tenure_finish(tenure_tx).unwrap(); Ok((block, size, cost)) } + + /// Insert a staging Nakamoto block as a pushed block and + /// then process it as the next ready block + /// NOTE: Will panic if called with unprocessed staging + /// blocks already in the queue. + pub fn process_pushed_next_ready_block<'a>( + stacks_node: &mut TestStacksNode, + sortdb: &mut SortitionDB, + miner: &mut TestMiner, + tenure_id_consensus_hash: &ConsensusHash, + coord: &mut ChainsCoordinator< + 'a, + TestEventObserver, + (), + OnChainRewardSetProvider<'a, TestEventObserver>, + (), + (), + BitcoinIndexer, + >, + nakamoto_block: NakamotoBlock, + ) -> Result, ChainstateError> { + // Before processeding, make sure the caller did not accidentally construct a test with unprocessed blocks already in the queue + let nakamoto_blocks_db = stacks_node.chainstate.nakamoto_blocks_db(); + assert!(nakamoto_blocks_db + .next_ready_nakamoto_block(stacks_node.chainstate.db()) + .unwrap().is_none(), "process_pushed_next_ready_block can only be called if the staging blocks queue is empty"); + + let tenure_sn = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), tenure_id_consensus_hash)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + + let cycle = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, tenure_sn.block_height) + .unwrap(); + + // Get the reward set + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + let reward_set = load_nakamoto_reward_set( + miner + .burnchain + .block_height_to_reward_cycle(sort_tip_sn.block_height) + .expect("FATAL: no reward cycle for sortition"), + &sort_tip_sn.sortition_id, + &miner.burnchain, + &mut stacks_node.chainstate, + &nakamoto_block.header.parent_block_id, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to load reward set") + .expect("Expected a reward set") + .0 + .known_selected_anchor_block_owned() + .expect("Unknown reward set"); + + let block_id = nakamoto_block.block_id(); + + debug!( + "Process Nakamoto block {block_id} ({:?}", + &nakamoto_block.header + ); + + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; + let mut sort_handle = sortdb.index_handle(&sort_tip); + + // Force the block to be added to the nakamoto_staging_blocks table + let config = stacks_node.chainstate.config(); + let (headers_conn, staging_db_tx) = + stacks_node.chainstate.headers_conn_and_staging_tx_begin()?; + let accepted = NakamotoChainState::accept_block( + &config, + &nakamoto_block, + &mut sort_handle, + &staging_db_tx, + headers_conn, + &reward_set, + NakamotoBlockObtainMethod::Pushed, + )?; + staging_db_tx.commit()?; + debug!("Accepted Nakamoto block {}", &nakamoto_block.block_id()); + // Actually attempt to process the accepted block added to nakamoto_staging_blocks + // Will attempt to execute the transactions via a call to append_block + let res = NakamotoChainState::process_next_nakamoto_block( + &mut coord.chain_state_db, + &mut coord.sortition_db, + &coord.canonical_sortition_tip.clone().expect( + "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", + ), + coord.dispatcher, + coord.config.txindex, + )?; + if res.is_some() { + // If we successfully processed the block, make sure we append the block to our current tenure + // so subsequent blocks do not attempt to reorg it. + stacks_node.add_nakamoto_extended_blocks(vec![nakamoto_block]); + } + Ok(res) + } } /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index f224f72646..2526ceaf1a 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -37,8 +37,8 @@ use stacks_common::bitvec::BitVec; use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::{ClarityTx, StacksChainState, StacksEpochReceipt}; +use crate::chainstate::stacks::tests::TestStacksNode; use crate::chainstate::stacks::{ Error as ChainstateError, StacksTransaction, TenureChangeCause, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, @@ -54,6 +54,13 @@ pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +const EPOCHS_TO_TEST: [StacksEpochId; 4] = [ + StacksEpochId::Epoch30, + StacksEpochId::Epoch31, + StacksEpochId::Epoch32, + StacksEpochId::Epoch33, +]; + /// The private key for the faucet account. pub const FAUCET_PRIV_KEY: LazyCell = LazyCell::new(|| { StacksPrivateKey::from_hex("510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101") @@ -298,9 +305,15 @@ impl ConsensusTest<'_> { let tenure_change_tx = self.chain.miner.make_nakamoto_tenure_change(tenure_change); let coinbase_tx = self.chain.miner.make_nakamoto_coinbase(None, vrf_proof); - let _blocks_and_sizes = - self.chain - .make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + let blocks_and_sizes = self + .chain + .make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)) + .unwrap(); + assert_eq!( + blocks_and_sizes.len(), + 1, + "Mined more than one Nakamoto block" + ); let burn_block_height = self.chain.get_burn_block_height(); current_epoch = SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) @@ -331,59 +344,38 @@ impl ConsensusTest<'_> { for (i, block) in epoch_blocks.iter().enumerate() { debug!("--------- Running block {i} for epoch {epoch:?} ---------"); let (nakamoto_block, block_size) = self.construct_nakamoto_block(&block); - let sortdb = self.chain.sortdb.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let chain_tip = NakamotoChainState::get_canonical_block_header( - self.chain.stacks_node().chainstate.db(), + stacks_node.chainstate.db(), &sortdb, ) .unwrap() .unwrap(); let pox_constants = PoxConstants::test_default(); + let sig_hash = nakamoto_block.header.signer_signature_hash(); debug!( - "--------- Appending block {} ---------", - nakamoto_block.header.signer_signature_hash(); + "--------- Processing block {sig_hash} ---------"; "block" => ?nakamoto_block ); - { - let (mut chainstate_tx, clarity_instance) = self - .chain - .stacks_node() - .chainstate - .chainstate_tx_begin() - .unwrap(); - - let mut burndb_conn = sortdb.index_handle_at_tip(); - - let result = NakamotoChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut burndb_conn, - &chain_tip.consensus_hash, - &pox_constants, - &chain_tip, - &chain_tip.burn_header_hash, - chain_tip.burn_header_height, - chain_tip.burn_header_timestamp, - &nakamoto_block, - block_size.try_into().unwrap(), - nakamoto_block.header.burn_spent, - 1500, - &RewardSet::empty(), - false, - ); - - debug!("--------- Appended block: {} ---------", result.is_ok()); - let remapped_result = result.map(|(receipt, clarity_commit, _, _)| { - clarity_commit.commit(); - receipt - }); - let expected_marf = nakamoto_block.header.state_index_root; - results.push(ExpectedResult::create_from(remapped_result, expected_marf)); - chainstate_tx.commit().unwrap(); - } - + let expected_marf = nakamoto_block.header.state_index_root; + let res = TestStacksNode::process_pushed_next_ready_block( + &mut stacks_node, + &mut sortdb, + &mut self.chain.miner, + &chain_tip.consensus_hash, + &mut self.chain.coord, + nakamoto_block.clone(), + ); + debug!( + "--------- Processed block: {sig_hash} ---------"; + "block" => ?nakamoto_block + ); + let remapped_result = res.map(|receipt| receipt.unwrap()).into(); + results.push(ExpectedResult::create_from(remapped_result, expected_marf)); // Restore chainstate for the next block self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); } } results @@ -523,31 +515,13 @@ impl ConsensusTest<'_> { #[test] fn test_append_empty_blocks() { + let empty_test_blocks = vec![TestBlock { + transactions: vec![], + }]; let mut epoch_blocks = HashMap::new(); - epoch_blocks.insert( - StacksEpochId::Epoch30, - vec![TestBlock { - transactions: vec![], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch31, - vec![TestBlock { - transactions: vec![], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch32, - vec![TestBlock { - transactions: vec![], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch33, - vec![TestBlock { - transactions: vec![], - }], - ); + for epoch in EPOCHS_TO_TEST { + epoch_blocks.insert(epoch, empty_test_blocks.clone()); + } let test_vector = ConsensusTestVector { initial_balances: vec![], @@ -564,53 +538,42 @@ fn test_append_stx_transfers_success() { StacksPrivateKey::from_hex(SK_2).unwrap(), StacksPrivateKey::from_hex(SK_3).unwrap(), ]; + let total_epochs = EPOCHS_TO_TEST.len() as u64; let send_amount = 1_000; let tx_fee = 180; + // initialize balances let mut initial_balances = Vec::new(); - let transactions: Vec<_> = sender_privks - .iter() - .map(|sender_privk| { - initial_balances.push(( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)).into(), - send_amount + tx_fee, - )); - // Interestingly, it doesn't seem to care about nonce... - make_stacks_transfer_tx( - sender_privk, - 0, - tx_fee, - CHAIN_ID_TESTNET, - &boot_code_addr(false).into(), - send_amount, - ) - }) - .collect(); + for sender_privk in &sender_privks { + let sender_addr = + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)).into(); + // give them enough to cover all transfers across all epochs + initial_balances.push((sender_addr, (send_amount + tx_fee) * total_epochs)); + } + // build transactions per epoch, incrementing nonce per sender let mut epoch_blocks = HashMap::new(); - epoch_blocks.insert( - StacksEpochId::Epoch30, - vec![TestBlock { - transactions: transactions.clone(), - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch31, - vec![TestBlock { - transactions: transactions.clone(), - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch32, - vec![TestBlock { - transactions: transactions.clone(), - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch33, - vec![TestBlock { - transactions: transactions.clone(), - }], - ); + let mut nonces = vec![0u64; sender_privks.len()]; // track nonce per sender + + for epoch in EPOCHS_TO_TEST { + let transactions: Vec<_> = sender_privks + .iter() + .enumerate() + .map(|(i, sender_privk)| { + let tx = make_stacks_transfer_tx( + sender_privk, + nonces[i], // use current nonce + tx_fee, + CHAIN_ID_TESTNET, + &boot_code_addr(false).into(), + send_amount, + ); + nonces[i] += 1; // increment for next epoch + tx + }) + .collect(); + + epoch_blocks.insert(epoch, vec![TestBlock { transactions }]); + } let test_vector = ConsensusTestVector { initial_balances, @@ -639,32 +602,13 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { ); let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - + let test_blocks = vec![TestBlock { + transactions: vec![tx.clone()], + }]; let mut epoch_blocks = HashMap::new(); - epoch_blocks.insert( - StacksEpochId::Epoch30, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch31, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch32, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch33, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); + for epoch in EPOCHS_TO_TEST { + epoch_blocks.insert(epoch, test_blocks.clone()); + } let test_vector = ConsensusTestVector { initial_balances: vec![], @@ -676,45 +620,35 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { #[test] fn test_append_block_with_contract_upload_success() { - let contract_name = "test-contract"; - let contract_content = "(/ 1 1)"; - let tx_fee = (contract_content.len() * 100) as u64; + // build transactions per epoch, incrementing nonce per sender + let mut epoch_blocks = HashMap::new(); - let tx_bytes = make_contract_publish( - &FAUCET_PRIV_KEY, - 0, - tx_fee, - CHAIN_ID_TESTNET, - contract_name, - &contract_content, - ); - let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + EPOCHS_TO_TEST + .into_iter() + .enumerate() + .for_each(|(nonce, epoch)| { + // Can't deploy to the same contract location so make sure contract name changes + let contract_name = format!("test-contract-{nonce}"); + let contract_content = "(/ 1 1)"; + let tx_fee = (contract_content.len() * 100) as u64; + + let tx_bytes = make_contract_publish( + &FAUCET_PRIV_KEY, + nonce as u64, + tx_fee, + CHAIN_ID_TESTNET, + &contract_name, + contract_content, + ); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + epoch_blocks.insert( + epoch, + vec![TestBlock { + transactions: vec![tx], + }], + ); + }); - let mut epoch_blocks = HashMap::new(); - epoch_blocks.insert( - StacksEpochId::Epoch30, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch31, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch32, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); - epoch_blocks.insert( - StacksEpochId::Epoch33, - vec![TestBlock { - transactions: vec![tx.clone()], - }], - ); let test_vector = ConsensusTestVector { initial_balances: vec![], epoch_blocks, @@ -724,7 +658,7 @@ fn test_append_block_with_contract_upload_success() { insta::assert_ron_snapshot!(result, @r#" [ Success(ExpectedBlockOutput( - marf_hash: "b45acd35f4c48a834a2f898ca8bb6c48416ac6bec9d8a3f3662b61ab97b1edde", + marf_hash: "ace4d5c5ffb440418fb30fe1999769ab7fff5a243b775b9961a1dfa77d7a1fab", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -749,7 +683,7 @@ fn test_append_block_with_contract_upload_success() { ), )), Success(ExpectedBlockOutput( - marf_hash: "521d75234ec6c64f68648b6b0f6f385d89b58efb581211a411e0e88aa71f3371", + marf_hash: "cf7a58c3c15ae61b0861a77a9909e9b05fe35a8d23f974461fd1317693413d3c", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -774,7 +708,7 @@ fn test_append_block_with_contract_upload_success() { ), )), Success(ExpectedBlockOutput( - marf_hash: "511e1cc37e83ef3de4ea56962574d6ddd2d8840d24d9238f19eee5a35127df6a", + marf_hash: "ad7f9b2130fda2ca8f5c75237755ab7055f69f91d937b2d0653d52f515765e6f", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -799,7 +733,7 @@ fn test_append_block_with_contract_upload_success() { ), )), Success(ExpectedBlockOutput( - marf_hash: "3520c2dd96f7d91e179c4dcd00f3c49c16d6ec21434fb16921922558282eab26", + marf_hash: "25eff57753c490824fc0205b4493d7073e378f0d4648810454cc7e06276fe7da", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -830,59 +764,45 @@ fn test_append_block_with_contract_upload_success() { #[test] fn test_append_block_with_contract_call_success() { let tx_fee = (FOO_CONTRACT.len() * 100) as u64; - - let tx_bytes = make_contract_publish( - &FAUCET_PRIV_KEY, - 0, - tx_fee, - CHAIN_ID_TESTNET, - "foo_contract", - FOO_CONTRACT, - ); - let tx_contract_deploy = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); - - let tx_bytes = make_contract_call( - &FAUCET_PRIV_KEY, - 1, - 200, - CHAIN_ID_TESTNET, - &to_addr(&FAUCET_PRIV_KEY), - "foo_contract", - "bar", - &[ClarityValue::UInt(1)], - ); - let tx_contract_call = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); - + let mut nonce = 0; + // build transactions per epoch, incrementing nonce per sender let mut epoch_blocks = HashMap::new(); - epoch_blocks.insert( - StacksEpochId::Epoch30, - vec![TestBlock { - transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], - }], - ); - - epoch_blocks.insert( - StacksEpochId::Epoch31, - vec![TestBlock { - transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], - }], - ); - - epoch_blocks.insert( - StacksEpochId::Epoch32, - vec![TestBlock { - transactions: vec![tx_contract_deploy.clone(), tx_contract_call.clone()], - }], - ); - - epoch_blocks.insert( - StacksEpochId::Epoch33, - vec![TestBlock { - transactions: vec![tx_contract_deploy, tx_contract_call], - }], - ); + EPOCHS_TO_TEST.into_iter().for_each(|epoch| { + // we need to change the contract name across deploys since same sender + let contract_name = format!("foo_contract_{nonce}"); + let tx_bytes = make_contract_publish( + &FAUCET_PRIV_KEY, + nonce, + tx_fee, + CHAIN_ID_TESTNET, + &contract_name, + FOO_CONTRACT, + ); + nonce += 1; + let tx_contract_deploy = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + + let tx_bytes = make_contract_call( + &FAUCET_PRIV_KEY, + nonce, + 200, + CHAIN_ID_TESTNET, + &to_addr(&FAUCET_PRIV_KEY), + &contract_name, + "bar", + &[ClarityValue::UInt(1)], + ); + nonce += 1; + let tx_contract_call = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + + epoch_blocks.insert( + epoch, + vec![TestBlock { + transactions: vec![tx_contract_deploy, tx_contract_call], + }], + ); + }); let test_vector = ConsensusTestVector { initial_balances: vec![], diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap index 8aec4b5474..2c231d7026 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_block_with_contract_call_success.snap @@ -4,7 +4,7 @@ expression: result --- [ Success(ExpectedBlockOutput( - marf_hash: "186c8e49bcfc59bb67ed22f031f009a44681f296392e0f92bed520918ba463ae", + marf_hash: "2149237f0e2a3407eed8733d38bce3db1f3ee1c14ed903c21f59546773174f4f", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -42,7 +42,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "ad23713f072473cad6a32125ed5fa822bb62bbfae8ed2302209c12d2f1958128", + marf_hash: "4742e535aebef843720867558b9e2be6148e95157f1fc259e24d162b6c5b78b0", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -80,7 +80,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "021bd30b09b5ac6ff34abd11f05244a966af937b584b1752f272cd717bb25f1d", + marf_hash: "75b37d37b1f171eb01fa71a1629e5cab10f2c5cb852b2532b0d4bd311bc94960", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -118,7 +118,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "416e728daeec4de695c89d15eede8ddb7b85fb4af82daffb1e0d8166a3e93451", + marf_hash: "eabaa1042075ab7afd7721584a590ee8f8542ad4743adc41ed3b1dbe9078a5b4", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap index a1f13d92b8..ea2c09ad3e 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_empty_blocks.snap @@ -3,48 +3,8 @@ source: stackslib/src/chainstate/tests/consensus.rs expression: result --- [ - Success(ExpectedBlockOutput( - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5", - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), - Success(ExpectedBlockOutput( - marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf", - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), - Success(ExpectedBlockOutput( - marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f", - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), - Success(ExpectedBlockOutput( - marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78", - transactions: [], - total_block_cost: ExecutionCost( - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - ), - )), + Failure("Invalid Nakamoto block: failed static transaction checks"), + Failure("Invalid Nakamoto block: failed static transaction checks"), + Failure("Invalid Nakamoto block: failed static transaction checks"), + Failure("Invalid Nakamoto block: failed static transaction checks"), ] diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap index c4be6d8a74..ef8280179a 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap @@ -4,7 +4,7 @@ expression: result --- [ Success(ExpectedBlockOutput( - marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798", + marf_hash: "cc77d584dea4a29e4d15efffc3306a0e6513d5b516903121c3c149cd85600d5d", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -55,7 +55,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c", + marf_hash: "8e80ece06d148b967241484040d26041c817ad9d8753a5d8d2afd284d0e172bc", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -106,7 +106,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028", + marf_hash: "aeb567f75a6a551252cedbbd882060d46dda38f0d949431b503fd435664338da", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData( @@ -157,7 +157,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32", + marf_hash: "39a1ec92bc388262902593e82da7af6e0cc12412bd566974cebb7f7e9f4e67ce", transactions: [ ExpectedTransactionOutput( return_type: Response(ResponseData(