diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index c1af5530416..3f63b97b3fb 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -17,7 +17,7 @@ use rusqlite::{params, Connection, OptionalExtension}; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; -use stacks_common::util::db::tx_busy_handler; +use stacks_common::util::db::{tx_busy_handler, SqlEncoded}; use stacks_common::util::hash::Sha512Trunc256Sum; use super::clarity_store::{make_contract_hash_key, ContractCommitment}; @@ -147,7 +147,7 @@ impl SqliteConnection { value: &str, ) -> Result<()> { let key = format!("clr-meta::{contract_hash}::{key}"); - let params = params![bhh, key, value]; + let params = params![bhh.sqlhex(), key, value]; if let Err(e) = conn.execute( "INSERT INTO metadata_table (blockhash, key, value) VALUES (?, ?, ?)", @@ -164,7 +164,7 @@ impl SqliteConnection { from: &StacksBlockId, to: &StacksBlockId, ) -> Result<()> { - let params = params![to, from]; + let params = params![to.sqlhex(), from.sqlhex()]; if let Err(e) = conn.execute( "UPDATE metadata_table SET blockhash = ? WHERE blockhash = ?", params, @@ -178,7 +178,7 @@ impl SqliteConnection { pub fn drop_metadata(conn: &Connection, from: &StacksBlockId) -> Result<()> { if let Err(e) = conn.execute( "DELETE FROM metadata_table WHERE blockhash = ?", - params![from], + params![from.sqlhex()], ) { error!("Failed to drop metadata from {from}: {e:?}"); return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); @@ -193,7 +193,7 @@ impl SqliteConnection { key: &str, ) -> Result> { let key = format!("clr-meta::{contract_hash}::{key}"); - let params = params![bhh, key]; + let params = params![bhh.sqlhex(), key]; match conn .query_row( diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index e225149bb97..496a77b9ac9 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -58,6 +58,7 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::{Hash160, hex_bytes, to_hex}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; @@ -942,7 +943,7 @@ check if the associated microblocks can be downloaded println!("{cur_burn}, {cur_tip}"); let (next_burn, next_tip) = match conn.query_row("SELECT parent_burn_header_hash, parent_anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ? and burn_header_hash = ?", - params![cur_tip, cur_burn], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) { + params![cur_tip.sqlhex(), cur_burn.sqlhex()], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) { Ok(x) => x, Err(e) => { match e { diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 0f45dc12b94..ae35fb691e9 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -454,7 +454,7 @@ impl StacksMessageCodec for StacksWorkScore { impl_byte_array_message_codec!(TrieHash, TRIEHASH_ENCODED_SIZE as u32); impl_byte_array_message_codec!(Sha512Trunc256Sum, 32); - +impl_byte_array_message_codec!(VRFSeed, 32); impl_byte_array_message_codec!(ConsensusHash, 20); impl_byte_array_message_codec!(Hash160, 20); impl_byte_array_message_codec!(BurnchainHeaderHash, 32); diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index da629dfde25..0f97f6a5ee4 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -15,14 +15,17 @@ // along with this program. If not, see . use std::backtrace::Backtrace; +use std::convert::TryFrom; +use std::io::{Read, Write}; use std::sync::{LazyLock, Mutex}; use std::thread; use std::time::Instant; use hashbrown::HashMap; use rand::{thread_rng, Rng}; -use rusqlite::Connection; +use rusqlite::{Connection, Row}; +use crate::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use crate::util::sleep_ms; /// Keep track of DB locks, for deadlock debugging @@ -91,3 +94,73 @@ pub fn tx_busy_handler(run_count: i32) -> bool { sleep_ms(sleep_time_ms); true } + +/// We use one of a few different encodings for columns that store "byte-string-y" data. That is, data that +/// is either a byte string, or data that is composed of many byte strings. At the time of this +/// writing, all byte-string-y data are stored as hex strings. As part of a system-wide migration +/// process, these fields will be moved over to a binary representation or a SIP-003 representation +/// to save disk space. +/// +/// The first byte in a DB-stored byte-string-y column identifies which codec to use, as detailed +/// below. The absence of one of these bytes means to use the legacy codec (i.e. hex string or +/// JSON, depending on the struct). The byte values are not ASCII printable, which ensures that +/// their presence unambiguously identifies which codec to use. +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum ColumnEncoding { + /// The following data is a SIP-003 blob + SIP003 = 0x00, +} + +/// Conversion from a u8 +impl TryFrom for ColumnEncoding { + type Error = crate::codec::Error; + + fn try_from(val: u8) -> Result { + match val { + 0x00 => Ok(Self::SIP003), + _ => Err(Self::Error::DeserializeError(format!( + "Invalid ColumnEncoding {:02x}", + val + ))), + } + } +} + +impl ColumnEncoding { + /// Convert to u8 value + pub fn as_u8(&self) -> u8 { + match self { + Self::SIP003 => 0x00, + } + } +} + +impl StacksMessageCodec for ColumnEncoding { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.as_u8()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let byte: u8 = read_next(fd)?; + Self::try_from(byte) + } +} + +/// This is an alternative to rusqlite's ToSql and FromSql traits which takes an optional encoding. +/// If the encoding is None, then the implementation should return a hex string's ASCII bytes. +pub trait SqlEncoded { + fn sql_encoded(&self, encoding: Option) -> Vec; + fn sql_decoded( + row: &Row, + column_name: &str, + encoding: Option, + ) -> Result + where + Self: Sized; + + #[inline] + fn sqlhex(&self) -> Vec { + self.sql_encoded(None) + } +} diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index cbf602cfa1c..6d53316cec9 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -27,7 +27,7 @@ macro_rules! iterable_enum { ($Name:ident { $($Variant:ident,)* }) => { pub enum $Name { - $($Variant),*, + $($Variant),*, } impl $Name { pub const ALL: &'static [$Name] = &[$($Name::$Variant),*]; @@ -731,25 +731,132 @@ macro_rules! fmax { } #[cfg(feature = "rusqlite")] +#[macro_export] macro_rules! impl_byte_array_rusqlite_only { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { fn column_result( value: rusqlite::types::ValueRef, ) -> rusqlite::types::FromSqlResult { - let hex_str = value.as_str()?; + use $crate::codec::StacksMessageCodec; + let byte_slice = value.as_bytes() + .map_err(|e| { + error!("Failed to load column result as bytes: {:?}", &e); + e + })?; + let mut cursor = byte_slice; + + // NB: This is a match statement so that if we add more encodings, this won't + // compile without a corresponding alteration. + match $crate::util::db::ColumnEncoding::consensus_deserialize(&mut cursor) { + Ok($crate::util::db::ColumnEncoding::SIP003) => { + // there's a designated encoding. Honor it. + let inst = $thing::consensus_deserialize(&mut cursor) + .map_err(|e| { + error!("Failed to deserialize column from bytes: {:?}, {:?}", &cursor, &e); + rusqlite::types::FromSqlError::InvalidType + })?; + return Ok(inst); + } + Err(_e) => { + // byte code is not recognized, so this must be a hex string + } + } + + // no designated encoding byte, so this must be a hex string. + // try to decode it as such (but error out if this is not a valid hex string) + let hex_str = str::from_utf8(&byte_slice) + .map_err(|e| { + error!("Failed to interpret byte string as ASCII hex: {:?}", &e); + rusqlite::types::FromSqlError::InvalidType + })?; + let byte_str = $crate::util::hash::hex_bytes(hex_str) - .map_err(|_e| rusqlite::types::FromSqlError::InvalidType)?; + .map_err(|e| { + error!("Failed to decode hex string {:?}: {:?}", &hex_str, &e); + rusqlite::types::FromSqlError::InvalidType + })?; + let inst = $thing::from_bytes(&byte_str) - .ok_or(rusqlite::types::FromSqlError::InvalidType)?; + .ok_or_else(|| { + error!("Failed to decode bytes to value: {:?}", &byte_str); + rusqlite::types::FromSqlError::InvalidType + })?; + Ok(inst) } } - impl rusqlite::types::ToSql for $thing { - fn to_sql(&self) -> rusqlite::Result> { - let hex_str = self.to_hex(); - Ok(hex_str.into()) + impl $crate::util::db::SqlEncoded for $thing { + fn sql_encoded(&self, encoding: Option<$crate::util::db::ColumnEncoding>) -> Vec { + use $crate::codec::StacksMessageCodec; + match encoding { + None => { + // hex string + let hex_str = self.to_hex(); + hex_str.as_bytes().to_vec() + }, + Some($crate::util::db::ColumnEncoding::SIP003) => { + // SIP003 byte string + let bytes = self.serialize_to_vec(); + let mut ret = vec![0; bytes.len() + 1]; + + // SAFETY: ret has enough bytes allocated + ret[0] = $crate::util::db::ColumnEncoding::SIP003.as_u8(); + ret[1..].copy_from_slice(&bytes); + ret + } + } + } + + fn sql_decoded(row: &rusqlite::Row, column_name: &str, encoding: Option<$crate::util::db::ColumnEncoding>) -> Result { + use $crate::codec::StacksMessageCodec; + match encoding { + None => { + // expect hex string + let hex_bin_str = match row.get_ref(column_name) + .map_err(|e| $crate::codec::Error::DeserializeError(format!("DB error loading hex-encoded column '{column_name}': {e:?}")))? + { + rusqlite::types::ValueRef::Text(bytes) => bytes, + rusqlite::types::ValueRef::Blob(bytes) => bytes, + _ => { + return Err($crate::codec::Error::DeserializeError(format!("DB error reading hex-encoded column '{column_name}: neither Text nor Blob affinity"))); + } + }; + + let hex_str = str::from_utf8(hex_bin_str) + .map_err(|e| $crate::codec::Error::DeserializeError(format!("UTF-8 error decoding hex-encoded bytes from '{column_name}: {e:?}")))?; + + let byte_str = $crate::util::hash::hex_bytes(&hex_str) + .map_err(|e| $crate::codec::Error::DeserializeError(format!("Hex error reading hex-encoded column '{column_name}': {e:?}")))?; + let inst = $thing::from_bytes(&byte_str) + .ok_or_else(|| $crate::codec::Error::DeserializeError(format!("Instantiation error from {} bytes of hex-encoded column '{column_name}'", &byte_str.len())))?; + Ok(inst) + } + Some($crate::util::db::ColumnEncoding::SIP003) => { + // expect a SIP003 byte string, with a 1-byte prefix + let byte_str = match row.get_ref(column_name) + .map_err(|e| $crate::codec::Error::DeserializeError(format!("DB error loading SIP003-encoded column '{column_name}': {e:?}")))? + { + rusqlite::types::ValueRef::Text(bytes) => bytes, + rusqlite::types::ValueRef::Blob(bytes) => bytes, + _ => { + return Err($crate::codec::Error::DeserializeError(format!("DB error reading SIP003-encoded column '{column_name}: neither Text nor Blob affinity"))); + } + }; + + let Some(encoding_byte) = byte_str.get(0) else { + return Err($crate::codec::Error::DeserializeError("Zero-length bytestring for SIP003-encoded column '{column_name}'".into())); + }; + if *encoding_byte != $crate::util::db::ColumnEncoding::SIP003.as_u8() { + return Err($crate::codec::Error::DeserializeError(format!("Column '{column_name}' is not SIP003-encoded; got encoding-byte value {:02x}", encoding_byte))); + } + + // SAFETY: byte_str.len() >= 1 due to above checks + let inst = $thing::consensus_deserialize(&mut &byte_str[1..])?; + Ok(inst) + } + } } } }; diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 506a75d2d67..94e9831fadb 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -36,8 +36,8 @@ use stacks::burnchains::{ }; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, PreStxOp, - StackStxOp, TransferStxOp, VoteForAggregateKeyOp, + BlockstackOperationType, BurnOpMemo, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, + PreStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp, }; #[cfg(test)] use stacks::chainstate::burn::Opcodes; @@ -2559,7 +2559,7 @@ mod tests { parent_vtxindex: 1, // 0x0001 key_block_ptr: 1432, // 0x00000598 key_vtxindex: 1, // 0x0001 - memo: vec![11], // 0x5a >> 3 + memo: vec![11].into(), // 0x5a >> 3 burn_fee: 110_000, //relevant for fee calculation when sending the tx input: (Txid([0x00; 32]), 0), @@ -2699,7 +2699,7 @@ mod tests { public_key: VRFPublicKey::from_private( &VRFPrivateKey::from_bytes(&[0u8; 32]).unwrap(), ), - memo: vec![], + memo: vec![].into(), txid: Txid([3u8; 32]), vtxindex: 0, block_height: 1, @@ -2845,7 +2845,7 @@ mod tests { parent_vtxindex: 1, // 0x0001 key_block_ptr: 1432, // 0x00000598 key_vtxindex: 1, // 0x0001 - memo: vec![11], // 0x5a >> 3 + memo: vec![11].into(), // 0x5a >> 3 burn_fee: 0, input: (Txid([0x00; 32]), 0), diff --git a/stacks-node/src/nakamoto_node/relayer.rs b/stacks-node/src/nakamoto_node/relayer.rs index 59691d39fa8..c09b167d9b2 100644 --- a/stacks-node/src/nakamoto_node/relayer.rs +++ b/stacks-node/src/nakamoto_node/relayer.rs @@ -31,7 +31,7 @@ use stacks::chainstate::burn::operations::leader_block_commit::{ RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, }; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, + BlockstackOperationType, BurnOpMemo, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; @@ -988,7 +988,7 @@ impl RelayerThread { ) -> BlockstackOperationType { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, - memo: miner_pkh.as_bytes().to_vec(), + memo: miner_pkh.as_bytes().to_vec().into(), consensus_hash: consensus_hash.clone(), vtxindex: 0, txid: Txid([0u8; 32]), @@ -1202,7 +1202,7 @@ impl RelayerThread { key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), - memo: vec![STACKS_EPOCH_LATEST_MARKER], + memo: vec![STACKS_EPOCH_LATEST_MARKER].into(), new_seed: VRFSeed::from_proof(&tip_vrf_proof), parent_block_ptr: u32::try_from(commit_parent_block_burn_height) .expect("FATAL: burn block height exceeded u32"), @@ -2345,7 +2345,7 @@ pub mod test { "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71", ) .unwrap(), - memo: pubkey_hash.as_ref().to_vec(), + memo: pubkey_hash.as_ref().to_vec().into(), }; let path = "/tmp/vrf_key.json"; save_activated_vrf_key(path, &key); @@ -2369,7 +2369,7 @@ pub mod test { "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71", ) .unwrap(), - memo: pubkey_hash.as_ref().to_vec(), + memo: pubkey_hash.as_ref().to_vec().into(), }; let path = "/tmp/vrf_key.json"; save_activated_vrf_key(path, &key); diff --git a/stacks-node/src/neon_node.rs b/stacks-node/src/neon_node.rs index a21fb7a5ffe..55c7553adcc 100644 --- a/stacks-node/src/neon_node.rs +++ b/stacks-node/src/neon_node.rs @@ -168,7 +168,7 @@ use stacks::chainstate::burn::operations::leader_block_commit::{ RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, }; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, + BlockstackOperationType, BurnOpMemo, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; @@ -1085,7 +1085,7 @@ impl BlockMinerThread { apparent_sender: sender, key_block_ptr: key.block_height as u32, key_vtxindex: key.op_vtxindex as u16, - memo: vec![STACKS_EPOCH_3_0_MARKER], + memo: vec![STACKS_EPOCH_3_0_MARKER].into(), new_seed: vrf_seed, parent_block_ptr, parent_vtxindex, @@ -4988,7 +4988,7 @@ impl StacksNode { block_height: 1, op_vtxindex: 1, vrf_public_key, - memo: vec![], + memo: vec![].into(), }) } else { // Warn the user that they need to set up a miner key diff --git a/stacks-node/src/node.rs b/stacks-node/src/node.rs index 1711dc297a7..51aa175c425 100644 --- a/stacks-node/src/node.rs +++ b/stacks-node/src/node.rs @@ -12,7 +12,7 @@ use stacks::chainstate::burn::operations::leader_block_commit::{ RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, }; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, + BlockstackOperationType, BurnOpMemo, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::address::PoxAddress; @@ -987,7 +987,7 @@ impl Node { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, - memo: vec![], + memo: vec![].into(), consensus_hash, vtxindex: 1, txid, @@ -1042,7 +1042,7 @@ impl Node { apparent_sender: self.keychain.get_burnchain_signer(), key_block_ptr: key.block_height as u32, key_vtxindex: key.op_vtxindex as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_1_MARKER].into(), new_seed: vrf_seed, parent_block_ptr, parent_vtxindex, diff --git a/stacks-node/src/tests/epoch_205.rs b/stacks-node/src/tests/epoch_205.rs index 32d9bf793ab..1aa36ac91d8 100644 --- a/stacks-node/src/tests/epoch_205.rs +++ b/stacks-node/src/tests/epoch_205.rs @@ -6,7 +6,9 @@ use clarity::vm::types::PrincipalData; use clarity::vm::ContractName; use stacks::burnchains::{Burnchain, Txid}; use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; -use stacks::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; +use stacks::chainstate::burn::operations::{ + BlockstackOperationType, BurnOpMemo, LeaderBlockCommitOp, +}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ @@ -614,7 +616,7 @@ fn transition_empty_blocks() { apparent_sender: keychain.get_burnchain_signer(), key_block_ptr, key_vtxindex, - memo: vec![0], // bad epoch marker + memo: vec![0].into(), // bad epoch marker new_seed: VRFSeed([0x11; 32]), parent_block_ptr: 0, parent_vtxindex: 0, diff --git a/stacks-node/src/tests/epoch_21.rs b/stacks-node/src/tests/epoch_21.rs index 16e1dbfc389..dc1541b0a2e 100644 --- a/stacks-node/src/tests/epoch_21.rs +++ b/stacks-node/src/tests/epoch_21.rs @@ -14,7 +14,7 @@ use stacks::chainstate::burn::operations::leader_block_commit::{ BURN_BLOCK_MINED_AT_MODULUS, OUTPUTS_PER_COMMIT, }; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, LeaderBlockCommitOp, PreStxOp, TransferStxOp, + BlockstackOperationType, BurnOpMemo, LeaderBlockCommitOp, PreStxOp, TransferStxOp, }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::address::PoxAddress; @@ -688,7 +688,7 @@ fn transition_fixes_bitcoin_rigidity() { sender: spender_stx_addr.clone(), recipient: recipient_addr.clone(), transfered_ustx: 100_000, - memo: vec![], + memo: vec![].into(), // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -838,7 +838,7 @@ fn transition_fixes_bitcoin_rigidity() { sender: spender_stx_addr.clone(), recipient: recipient_addr.clone(), transfered_ustx: 100_000, - memo: vec![], + memo: vec![].into(), // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -911,7 +911,7 @@ fn transition_fixes_bitcoin_rigidity() { sender: spender_2_stx_addr.clone(), recipient: recipient_addr.clone(), transfered_ustx: 100_000, - memo: vec![], + memo: vec![].into(), // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -977,7 +977,7 @@ fn transition_fixes_bitcoin_rigidity() { sender: spender_stx_addr.clone(), recipient: recipient_addr.clone(), transfered_ustx: 123, - memo: vec![], + memo: vec![].into(), // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -1880,7 +1880,7 @@ fn transition_empty_blocks() { apparent_sender: keychain.get_burnchain_signer(), key_block_ptr, key_vtxindex, - memo: vec![0], // bad epoch marker + memo: vec![0].into(), // bad epoch marker new_seed: VRFSeed([0x11; 32]), parent_block_ptr: 0, parent_vtxindex: 0, diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index 6450bead843..667481ef14f 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -42,7 +42,7 @@ use serial_test::serial; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, + BlockstackOperationType, BurnOpMemo, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp, }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; @@ -107,6 +107,7 @@ use stacks_common::types::{ set_test_coinbase_schedule, set_test_sip_031_emission_schedule, CoinbaseInterval, SIP031EmissionInterval, StacksPublicKeyBuffer, }; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; @@ -4994,7 +4995,7 @@ fn burn_ops_integration_test() { sender: stacker_addr_1.clone(), recipient: stacker_addr_2.clone(), transfered_ustx: 10000, - memo: vec![], + memo: vec![].into(), txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -10444,7 +10445,7 @@ fn sip029_coinbase_change() { } let coinbase = { let sql = "SELECT coinbase FROM payments WHERE consensus_hash = ?1"; - let args = rusqlite::params![&sn.consensus_hash]; + let args = rusqlite::params![&sn.consensus_hash.sqlhex()]; let Some(coinbase) = chainstate .db() .query_row(sql, args, |r| { diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index b11bf78af94..a593edca778 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -21,7 +21,7 @@ use stacks::burnchains::db::BurnchainDB; use stacks::burnchains::{Address, Burnchain, PoxConstants, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, + BlockstackOperationType, BurnOpMemo, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp, }; use stacks::chainstate::burn::ConsensusHash; @@ -76,6 +76,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::{bytes_to_hex, hex_bytes, to_hex, Hash160}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; @@ -2058,7 +2059,7 @@ fn stx_transfer_btc_integration_test() { sender: spender_stx_addr, recipient: recipient_addr.clone(), transfered_ustx: 100_000, - memo: vec![], + memo: vec![].into(), // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2128,7 +2129,7 @@ fn stx_transfer_btc_integration_test() { sender: spender_2_stx_addr, recipient: recipient_addr.clone(), transfered_ustx: 100_000, - memo: vec![], + memo: vec![].into(), // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -7084,7 +7085,7 @@ fn atlas_stress_integration_test() { let indexes = query_rows::( &atlasdb.conn, "SELECT attachment_index FROM attachment_instances WHERE index_block_hash = ?1", - &[ibh], + params![ibh.sqlhex()], ) .unwrap(); if !indexes.is_empty() { @@ -7095,7 +7096,7 @@ fn atlas_stress_integration_test() { let mut hashes = query_row_columns::( &atlasdb.conn, "SELECT content_hash FROM attachment_instances WHERE index_block_hash = ?1 AND attachment_index = ?2", - params![ibh, u64_to_sql(*index).unwrap()], + params![ibh.sqlhex(), u64_to_sql(*index).unwrap()], "content_hash") .unwrap(); if !hashes.is_empty() { diff --git a/stacks-node/src/tests/signer/v0.rs b/stacks-node/src/tests/signer/v0.rs index e329b5b3cfb..53288613e5d 100644 --- a/stacks-node/src/tests/signer/v0.rs +++ b/stacks-node/src/tests/signer/v0.rs @@ -39,7 +39,7 @@ use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, LeaderBlockCommitOp, PreStxOp, TransferStxOp, + BlockstackOperationType, BurnOpMemo, LeaderBlockCommitOp, PreStxOp, TransferStxOp, }; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; @@ -83,6 +83,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ }; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::TrieHash; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::sleep_ms; use stacks_signer::chainstate::v1::SortitionsView; use stacks_signer::chainstate::ProposalEvalConfig; @@ -3794,7 +3795,7 @@ fn tx_replay_btc_on_stx_invalidation() { sender: sender_addr, recipient: recipient_addr.clone(), transfered_ustx: recipient_balance.into(), - memo: vec![], + memo: vec![].into(), txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -12229,7 +12230,7 @@ fn no_reorg_due_to_successive_block_validation_ok() { let mut rows = stmt .query(rusqlite::params![ - block_n_1_prime_signature_hash, + block_n_1_prime_signature_hash.sqlhex(), config.stacks_address ]) .unwrap(); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index b0e0cc03bd1..b60a6f3f1d4 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -37,6 +37,7 @@ use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, OptionalExte use serde::{Deserialize, Serialize}; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; @@ -1123,7 +1124,7 @@ impl SignerDb { pub fn has_signed_block_in_tenure(&self, tenure: &ConsensusHash) -> Result { let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 LIMIT 1"; - let result: Option = query_row(&self.db, query, [tenure])?; + let result: Option = query_row(&self.db, query, [tenure.sqlhex()])?; Ok(result.is_some()) } @@ -1134,7 +1135,7 @@ impl SignerDb { tenure: &ConsensusHash, ) -> Result, DBError> { let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height ASC LIMIT 1"; - let result: Option = query_row(&self.db, query, [tenure])?; + let result: Option = query_row(&self.db, query, [tenure.sqlhex()])?; try_deserialize(result) } @@ -1145,7 +1146,7 @@ impl SignerDb { tenure: &ConsensusHash, ) -> Result { let query = "SELECT COALESCE((MAX(stacks_height) - MIN(stacks_height) + 1), 0) AS block_count FROM blocks WHERE consensus_hash = ?1 AND state = ?2"; - let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; + let args = params![tenure.sqlhex(), &BlockState::GloballyAccepted.to_string()]; let block_count_opt: Option = query_row(&self.db, query, args)?; match block_count_opt { Some(block_count) => Ok(block_count), @@ -1160,7 +1161,7 @@ impl SignerDb { ) -> Result, DBError> { let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; let args = params![ - tenure, + tenure.sqlhex(), &BlockState::GloballyAccepted.to_string(), &BlockState::LocallyAccepted.to_string() ]; @@ -1175,7 +1176,7 @@ impl SignerDb { tenure: &ConsensusHash, ) -> Result, DBError> { let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC LIMIT 1"; - let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; + let args = params![tenure.sqlhex(), &BlockState::GloballyAccepted.to_string()]; let result: Option = query_row(&self.db, query, args)?; try_deserialize(result) @@ -1195,7 +1196,7 @@ impl SignerDb { ORDER BY burn_block_height DESC LIMIT 1; "#; - let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; + let args = params![tenure.sqlhex(), &BlockState::GloballyAccepted.to_string()]; let result: Option = query_row(&self.db, query, args)?; Ok(result.map(|signed_self| UNIX_EPOCH + Duration::from_secs(signed_self))) } @@ -1232,11 +1233,11 @@ impl SignerDb { self.db.execute( "INSERT OR REPLACE INTO burn_blocks (block_hash, consensus_hash, block_height, received_time, parent_burn_block_hash) VALUES (?1, ?2, ?3, ?4, ?5)", params![ - burn_hash, - consensus_hash, + burn_hash.sqlhex(), + consensus_hash.sqlhex(), u64_to_sql(burn_height)?, u64_to_sql(received_ts)?, - parent_burn_block_hash, + parent_burn_block_hash.sqlhex(), ], )?; Ok(()) @@ -1249,7 +1250,9 @@ impl SignerDb { burn_hash: &BurnchainHeaderHash, ) -> Result, DBError> { let query = "SELECT received_time FROM burn_blocks WHERE block_hash = ? LIMIT 1"; - let Some(receive_time_i64) = query_row::(&self.db, query, &[burn_hash])? else { + let Some(receive_time_i64) = + query_row::(&self.db, query, params![burn_hash.sqlhex()])? + else { return Ok(None); }; let receive_time = u64::try_from(receive_time_i64).map_err(|e| { @@ -1266,7 +1269,8 @@ impl SignerDb { ch: &ConsensusHash, ) -> Result, DBError> { let query = "SELECT received_time FROM burn_blocks WHERE consensus_hash = ? LIMIT 1"; - let Some(receive_time_i64) = query_row::(&self.db, query, &[ch])? else { + let Some(receive_time_i64) = query_row::(&self.db, query, params![ch.sqlhex()])? + else { return Ok(None); }; let receive_time = u64::try_from(receive_time_i64).map_err(|e| { @@ -1283,7 +1287,7 @@ impl SignerDb { ) -> Result { let query = "SELECT block_hash, block_height, consensus_hash, parent_burn_block_hash FROM burn_blocks WHERE block_hash = ?"; - let args = params![burn_block_hash]; + let args = params![burn_block_hash.sqlhex()]; query_row(&self.db, query, args)?.ok_or(DBError::NotFoundError) } @@ -1291,7 +1295,7 @@ impl SignerDb { /// Lookup the burn block for a given consensus hash. pub fn get_burn_block_by_ch(&self, ch: &ConsensusHash) -> Result { let query = "SELECT block_hash, block_height, consensus_hash, parent_burn_block_hash FROM burn_blocks WHERE consensus_hash = ?"; - let args = params![ch]; + let args = params![ch.sqlhex()]; query_row(&self.db, query, args)?.ok_or(DBError::NotFoundError) } @@ -1326,7 +1330,7 @@ impl SignerDb { &block_info.signed_over, &broadcasted, u64_to_sql(block_info.block.header.chain_length)?, - block_info.block.header.consensus_hash.to_hex(), + block_info.block.header.consensus_hash.sqlhex(), &block_info.valid, &block_info.state.to_string(), &block_info.signed_group, &block_info.signed_self, @@ -1361,13 +1365,13 @@ impl SignerDb { ) -> Result { // Remove any block rejection entry for this signer and block hash let del_qry = "DELETE FROM block_rejection_signer_addrs WHERE signer_signature_hash = ?1 AND signer_addr = ?2"; - let del_args = params![block_sighash, signer_addr.to_string()]; + let del_args = params![block_sighash.sqlhex(), signer_addr.to_string()]; self.db.execute(del_qry, del_args)?; // Insert the block signature let qry = "INSERT OR IGNORE INTO block_signatures (signer_signature_hash, signer_addr, signature) VALUES (?1, ?2, ?3);"; let args = params![ - block_sighash, + block_sighash.sqlhex(), signer_addr.to_string(), serde_json::to_string(signature).map_err(DBError::SerializationError)? ]; @@ -1396,7 +1400,7 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { let qry = "SELECT signature FROM block_signatures WHERE signer_signature_hash = ?1"; - let args = params![block_sighash]; + let args = params![block_sighash.sqlhex()]; let sigs_txt: Vec = query_rows(&self.db, qry, args)?; sigs_txt .into_iter() @@ -1413,7 +1417,7 @@ impl SignerDb { ) -> Result { // If this signer/block already has a signature, do not allow a rejection let sig_qry = "SELECT EXISTS(SELECT 1 FROM block_signatures WHERE signer_signature_hash = ?1 AND signer_addr = ?2)"; - let sig_args = params![block_sighash, addr.to_string()]; + let sig_args = params![block_sighash.sqlhex(), addr.to_string()]; let exists = self.db.query_row(sig_qry, sig_args, |row| row.get(0))?; if exists { warn!("Cannot add block rejection because a signature already exists."; @@ -1426,7 +1430,7 @@ impl SignerDb { // Check if a row exists for this sighash/signer combo let qry = "SELECT reject_code FROM block_rejection_signer_addrs WHERE signer_signature_hash = ?1 AND signer_addr = ?2 LIMIT 1"; - let args = params![block_sighash, addr.to_string()]; + let args = params![block_sighash.sqlhex(), addr.to_string()]; let existing_code: Option = self.db.query_row(qry, args, |row| row.get(0)).optional()?; @@ -1445,7 +1449,7 @@ impl SignerDb { Some(_) => { // Row exists but with different reject_reason, update it let update_qry = "UPDATE block_rejection_signer_addrs SET reject_code = ?1 WHERE signer_signature_hash = ?2 AND signer_addr = ?3"; - let update_args = params![reject_code, block_sighash, addr.to_string()]; + let update_args = params![reject_code, block_sighash.sqlhex(), addr.to_string()]; self.db.execute(update_qry, update_args)?; debug!("Updated block rejection reason."; "signer_signature_hash" => %block_sighash, @@ -1457,7 +1461,7 @@ impl SignerDb { None => { // Row does not exist, insert it let insert_qry = "INSERT INTO block_rejection_signer_addrs (signer_signature_hash, signer_addr, reject_code) VALUES (?1, ?2, ?3)"; - let insert_args = params![block_sighash, addr.to_string(), reject_code]; + let insert_args = params![block_sighash.sqlhex(), addr.to_string(), reject_code]; self.db.execute(insert_qry, insert_args)?; debug!("Inserted block rejection."; "signer_signature_hash" => %block_sighash, @@ -1476,7 +1480,7 @@ impl SignerDb { ) -> Result, DBError> { let qry = "SELECT signer_addr, reject_code FROM block_rejection_signer_addrs WHERE signer_signature_hash = ?1"; - let args = params![block_sighash]; + let args = params![block_sighash.sqlhex()]; let mut stmt = self.db.prepare(qry)?; let rows = stmt.query_map(args, |row| { @@ -1511,7 +1515,7 @@ impl SignerDb { ts: u64, ) -> Result<(), DBError> { let qry = "UPDATE blocks SET broadcasted = ?1 WHERE signer_signature_hash = ?2"; - let args = params![u64_to_sql(ts)?, block_sighash]; + let args = params![u64_to_sql(ts)?, block_sighash.sqlhex()]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -1525,7 +1529,7 @@ impl SignerDb { ) -> Result, DBError> { let qry = "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE signer_signature_hash = ?"; - let args = params![block_sighash]; + let args = params![block_sighash.sqlhex()]; let Some(broadcasted): Option = query_row(&self.db, qry, args)? else { return Ok(None); @@ -1582,7 +1586,7 @@ impl SignerDb { /// Return the start time (epoch time in seconds) and the processing time in milliseconds of the tenure (idenfitied by consensus_hash). fn get_tenure_times(&self, tenure: &ConsensusHash) -> Result<(u64, u64), DBError> { let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC"; - let args = params![tenure, BlockState::GloballyAccepted.to_string()]; + let args = params![tenure.sqlhex(), BlockState::GloballyAccepted.to_string()]; let mut stmt = self.db.prepare(query)?; let rows = stmt.query_map(args, |row| { let tenure_change_block: bool = row.get(0)?; @@ -1668,7 +1672,7 @@ impl SignerDb { last_activity_time: u64, ) -> Result<(), DBError> { debug!("Updating last activity for tenure"; "consensus_hash" => %tenure, "last_activity_time" => last_activity_time); - self.db.execute("INSERT OR REPLACE INTO tenure_activity (consensus_hash, last_activity_time) VALUES (?1, ?2)", params![tenure, u64_to_sql(last_activity_time)?])?; + self.db.execute("INSERT OR REPLACE INTO tenure_activity (consensus_hash, last_activity_time) VALUES (?1, ?2)", params![tenure.sqlhex(), u64_to_sql(last_activity_time)?])?; Ok(()) } @@ -1676,7 +1680,9 @@ impl SignerDb { pub fn get_last_activity_time(&self, tenure: &ConsensusHash) -> Result, DBError> { let query = "SELECT last_activity_time FROM tenure_activity WHERE consensus_hash = ? LIMIT 1"; - let Some(last_activity_time_i64) = query_row::(&self.db, query, &[tenure])? else { + let Some(last_activity_time_i64) = + query_row::(&self.db, query, params![tenure.sqlhex()])? + else { return Ok(None); }; let last_activity_time = u64::try_from(last_activity_time_i64).map_err(|e| { @@ -1721,7 +1727,7 @@ impl SignerDb { VALUES (?1, ?2, ?3)", params![ address.to_string(), - burn_block_consensus_hash, + burn_block_consensus_hash.sqlhex(), u64_to_sql(received_ts)?, ], )?; @@ -1819,7 +1825,7 @@ impl SignerDb { "#; let mut stmt = self.db.prepare(query)?; - let rows = stmt.query_map(params![ch], |row| { + let rows = stmt.query_map(params![ch.sqlhex()], |row| { let signer_addr: String = row.get(0)?; let received_time: i64 = row.get(1)?; Ok((signer_addr, received_time)) @@ -1861,7 +1867,7 @@ impl SignerDb { address: &StacksAddress, ) -> Result<(), DBError> { let qry = "INSERT OR REPLACE INTO block_pre_commits (signer_signature_hash, signer_addr) VALUES (?1, ?2);"; - let args = params![block_sighash, address.to_string()]; + let args = params![block_sighash.sqlhex(), address.to_string()]; debug!("Inserting block pre-commit."; "signer_signature_hash" => %block_sighash, @@ -1886,7 +1892,7 @@ impl SignerDb { .db .query_row( qry_check, - params![block_sighash, address.to_string()], + params![block_sighash.sqlhex(), address.to_string()], |row| row.get(0), ) .optional()?; @@ -1900,7 +1906,7 @@ impl SignerDb { block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { let qry = "SELECT signer_addr FROM block_pre_commits WHERE signer_signature_hash = ?1"; - let args = params![block_sighash]; + let args = params![block_sighash.sqlhex()]; let addrs_txt: Vec = query_rows(&self.db, qry, args)?; let res: Result, _> = addrs_txt @@ -1977,7 +1983,7 @@ impl SignerDb { sighash: &Sha512Trunc256Sum, ) -> Result { let qry = "SELECT signer_signature_hash FROM block_validations_pending WHERE signer_signature_hash = ?1"; - let args = params![sighash.to_string()]; + let args = params![sighash.sqlhex()]; let sighash_opt: Option = query_row(&self.db, qry, args)?; Ok(sighash_opt.is_some()) } @@ -3297,7 +3303,7 @@ pub mod tests { conn.execute( "INSERT OR REPLACE INTO burn_blocks (block_hash, block_height, received_time) VALUES (?1, ?2, ?3)", params![ - burn_hash, + burn_hash.sqlhex(), u64_to_sql(burn_height.into()).unwrap(), u64_to_sql(received_ts + i as u64).unwrap(), // Ensure increasing received_time ] @@ -3306,8 +3312,8 @@ pub mod tests { conn.execute( "INSERT OR REPLACE INTO burn_blocks (block_hash, consensus_hash, block_height, received_time) VALUES (?1, ?2, ?3, ?4)", params![ - burn_hash, - consensus_hash, + burn_hash.sqlhex(), + consensus_hash.sqlhex(), u64_to_sql(burn_height.into()).unwrap(), u64_to_sql(received_ts + i as u64).unwrap(), // Ensure increasing received_time ] diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index c46baf007c0..f8a1583f23d 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -27,6 +27,7 @@ use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::uint::Uint256; @@ -685,7 +686,7 @@ impl SpvClient { query_row( &self.headers_db, "SELECT height FROM headers WHERE hash = ?1", - &[burn_header_hash], + params![burn_header_hash.sqlhex()], ) .map_err(|e| e.into()) } @@ -754,7 +755,7 @@ impl SpvClient { header.bits, header.nonce, u64_to_sql(height)?, - BurnchainHeaderHash::from_bitcoin_hash(&header.bitcoin_hash()), + BurnchainHeaderHash::from_bitcoin_hash(&header.bitcoin_hash()).sqlhex(), ]; tx.execute(sql, args) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 99e835855ce..7b570dabc69 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -22,6 +22,7 @@ use rusqlite::{params, Connection, OpenFlags, Row, Transaction}; use serde_json; use stacks_common::types::chainstate::BurnchainHeaderHash; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use crate::burnchains::{ Burnchain, BurnchainBlock, BurnchainBlockHeader, Error as BurnchainError, Txid, @@ -351,8 +352,8 @@ impl BurnchainDBTransaction<'_> { VALUES (?, ?, ?, ?, ?)"; let args = params![ u64_to_sql(header.block_height)?, - header.block_hash, - header.parent_block_hash, + header.block_hash.sqlhex(), + header.parent_block_hash.sqlhex(), u64_to_sql(header.num_txs)?, u64_to_sql(header.timestamp)?, ]; @@ -382,8 +383,8 @@ impl BurnchainDBTransaction<'_> { let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE burn_block_hash = ?2 AND txid = ?3"; let args = params![ u64_to_sql(target_reward_cycle)?, - block_commit.burn_header_hash, - block_commit.txid, + block_commit.burn_header_hash.sqlhex(), + block_commit.txid.sqlhex(), ]; match self.sql_tx.execute(sql, args) { Ok(_) => { @@ -417,8 +418,8 @@ impl BurnchainDBTransaction<'_> { VALUES (?1, ?2, ?3, ?4, ?5, ?6)"; let mut stmt = self.sql_tx.prepare(commit_metadata_sql)?; let args = params![ - bcm.burn_block_hash, - bcm.txid, + bcm.burn_block_hash.sqlhex(), + bcm.txid.sqlhex(), u64_to_sql(bcm.block_height)?, bcm.vtxindex, opt_u64_to_sql(bcm.anchor_block)?, @@ -439,7 +440,11 @@ impl BurnchainDBTransaction<'_> { for op in block_ops.iter() { let serialized_op = serde_json::to_string(op).expect("Failed to serialize parsed BlockstackOp"); - let args = params![block_header.block_hash, op.txid_ref(), serialized_op]; + let args = params![ + block_header.block_hash.sqlhex(), + op.txid_ref().sqlhex(), + serialized_op + ]; stmt.execute(args)?; } @@ -727,7 +732,7 @@ impl BurnchainDB { pub fn has_burnchain_block(&self, block: &BurnchainHeaderHash) -> Result { let qry = "SELECT 1 FROM burnchain_db_block_headers WHERE block_hash = ?1"; - let res: Option = query_row(&self.conn, qry, &[block])?; + let res: Option = query_row(&self.conn, qry, params![block.sqlhex()])?; Ok(res.is_some()) } @@ -740,7 +745,7 @@ impl BurnchainDB { return Ok(None); }; let qry = "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ?1"; - let args = params![hdr.block_hash]; + let args = params![hdr.block_hash.sqlhex()]; let res: Option = query_row(conn, qry, args)?; Ok(res) } @@ -753,9 +758,9 @@ impl BurnchainDB { "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ? LIMIT 1"; let block_ops_qry = "SELECT DISTINCT * FROM burnchain_db_block_ops WHERE block_hash = ?"; - let block_header = query_row(conn, block_header_qry, params![block])? + let block_header = query_row(conn, block_header_qry, params![block.sqlhex()])? .ok_or_else(|| BurnchainError::UnknownBlock(block.clone()))?; - let block_ops = query_rows(conn, block_ops_qry, params![block])?; + let block_ops = query_rows(conn, block_ops_qry, params![block.sqlhex()])?; Ok(BurnchainBlockData { header: block_header, @@ -770,7 +775,7 @@ impl BurnchainDB { ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1 AND block_hash = ?2"; - let args = params![txid, burn_header_hash]; + let args = params![txid.sqlhex(), burn_header_hash.sqlhex()]; match query_row(conn, qry, args) { Ok(res) => res, @@ -789,7 +794,7 @@ impl BurnchainDB { txid: &Txid, ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1"; - let args = params![txid]; + let args = params![txid.sqlhex()]; let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); @@ -862,7 +867,7 @@ impl BurnchainDB { txid: &Txid, ) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block IS NOT NULL AND burn_block_hash = ?1 AND txid = ?2"; - let args = params![burn_header_hash, txid]; + let args = params![burn_header_hash.sqlhex(), txid.sqlhex()]; query_row(conn, sql, args)?.ok_or(DBError::NotFoundError) } @@ -930,7 +935,10 @@ impl BurnchainDB { ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1 AND burn_block_hash = ?2"; - let args = params![u64_to_sql(reward_cycle)?, anchor_block_burn_header_hash]; + let args = params![ + u64_to_sql(reward_cycle)?, + anchor_block_burn_header_hash.sqlhex() + ]; if let Some(commit_metadata) = query_row::(conn, sql, args)? { let commit = BurnchainDB::get_block_commit( conn, @@ -1010,7 +1018,7 @@ impl BurnchainDB { vtxindex: u16, ) -> Result, DBError> { let qry = "SELECT txid FROM block_commit_metadata WHERE block_height = ?1 AND vtxindex = ?2 AND burn_block_hash = ?3"; - let args = params![block_ptr, vtxindex, header_hash]; + let args = params![block_ptr, vtxindex, header_hash.sqlhex()]; let txid = match query_row(conn, qry, args) { Ok(Some(txid)) => txid, Ok(None) => { @@ -1056,7 +1064,7 @@ impl BurnchainDB { burn_block_hash: &BurnchainHeaderHash, txid: &Txid, ) -> Result, DBError> { - let args = params![burn_block_hash, txid]; + let args = params![burn_block_hash.sqlhex(), txid.sqlhex()]; query_row_panic( conn, "SELECT * FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2", diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index ed444853a7e..59506879143 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -702,7 +702,7 @@ pub struct BurnchainStateTransition { /// -- the new burn distribution /// -- the sequence of valid blockstack operations that went into it /// -- the set of previously-accepted leader VRF keys consumed -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct BurnchainStateTransitionOps { pub accepted_ops: Vec, pub consumed_leader_keys: Vec, diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 817375dd62d..c40419d70ab 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -90,7 +90,7 @@ fn test_process_block_ops() { &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap(), @@ -110,7 +110,7 @@ fn test_process_block_ops() { &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c").unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7").unwrap(), @@ -130,7 +130,7 @@ fn test_process_block_ops() { &hex_bytes("de8af7037e522e65d2fe2d63fb1b764bfea829df78b84444338379df13144a02").unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes( &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c").unwrap(), @@ -157,7 +157,7 @@ fn test_process_block_ops() { parent_vtxindex: 0, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 12345, input: (Txid([0; 32]), 0), @@ -196,7 +196,7 @@ fn test_process_block_ops() { parent_vtxindex: 0, key_block_ptr: 122, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 12345, input: (Txid([0; 32]), 0), @@ -235,7 +235,7 @@ fn test_process_block_ops() { parent_vtxindex: 0, key_block_ptr: 121, key_vtxindex: 10, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 23456, input: (Txid([0; 32]), 0), @@ -775,7 +775,7 @@ fn test_burn_snapshot_sequence() { parent_vtxindex: (if i == 1 { 0 } else { 2 * (i - 1) }) as u16, key_block_ptr: (first_block_height + (i as u64)) as u32, key_vtxindex: (2 * (i - 1) + 1) as u16, - memo: vec![i], + memo: vec![i].into(), burn_fee: i as u64, input: (Txid([0; 32]), 0), @@ -818,7 +818,7 @@ fn test_burn_snapshot_sequence() { &hex_bytes(&leader_public_keys[i as usize]).unwrap(), ) .unwrap(), - memo: vec![0, 0, 0, 0, i], + memo: vec![0, 0, 0, 0, i].into(), txid: Txid::from_bytes(&[ i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 7a2b035ba96..95850e9cb27 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -23,6 +23,7 @@ use stacks_common::deps_common::bitcoin::blockdata::transaction::Transaction as use stacks_common::deps_common::bitcoin::network::serialize::deserialize; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::*; use super::*; @@ -57,7 +58,7 @@ impl BurnchainDB { use rusqlite::params; let sql = "SELECT op FROM burnchain_db_block_ops WHERE block_hash = ?1"; - let args = params![block_hash]; + let args = params![block_hash.sqlhex()]; let mut ops: Vec = query_rows(&self.conn, sql, args)?; ops.sort_by_key(|op| op.vtxindex()); Ok(ops) @@ -525,7 +526,7 @@ pub fn make_simple_block_commit( parent_vtxindex: 0, key_block_ptr: 0, key_vtxindex: 0, - memo: vec![0], + memo: vec![0].into(), commit_outs: vec![ PoxAddress::standard_burn_address(false), @@ -1099,7 +1100,7 @@ fn burnchain_db_migration_v2_to_v3() -> Result<(), BurnchainError> { let sample_txid = "txid1".to_string(); conn.execute( "INSERT INTO burnchain_db_block_headers (block_height, block_hash, parent_block_hash, num_txs, timestamp) VALUES (?, ?, ?, ?, ?)", - params![1, &sample_block_hash, &sample_parent_block_hash, 1, 1234567890], + params![1, &sample_block_hash.sqlhex(), &sample_parent_block_hash.sqlhex(), 1, 1234567890], )?; conn.execute( "INSERT INTO affirmation_maps (weight, affirmation_map) VALUES (?, ?)", @@ -1107,7 +1108,7 @@ fn burnchain_db_migration_v2_to_v3() -> Result<(), BurnchainError> { )?; conn.execute( "INSERT INTO block_commit_metadata (burn_block_hash, txid, block_height, vtxindex, affirmation_id, anchor_block, anchor_block_descendant) VALUES (?, ?, ?, ?, ?, ?, ?)", - params![&sample_block_hash, &sample_txid, 1, 0, 0, None::, None::], + params![&sample_block_hash.sqlhex(), &sample_txid, 1, 0, 0, None::, None::], )?; // Create BurnchainDB using connect to trigger migration code @@ -1153,7 +1154,7 @@ fn burnchain_db_migration_v2_to_v3() -> Result<(), BurnchainError> { let header: Option = query_row( &db.conn, "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ?", - params![&sample_block_hash], + params![&sample_block_hash.sqlhex()], )?; assert!( header.is_some(), @@ -1162,7 +1163,7 @@ fn burnchain_db_migration_v2_to_v3() -> Result<(), BurnchainError> { let metadata: Option = query_row( &db.conn, "SELECT txid FROM block_commit_metadata WHERE burn_block_hash = ?", - params![&sample_block_hash], + params![&sample_block_hash.sqlhex()], )?; assert_eq!( metadata, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index ff6cf995f81..97309196c89 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -537,7 +537,7 @@ impl TestBurnchainBlock { txop.txid = Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); - txop.memo = vec![epoch_marker << 3]; + txop.memo = vec![epoch_marker << 3].into(); self.txs .push(BlockstackOperationType::LeaderBlockCommit(txop.clone())); diff --git a/stackslib/src/chainstate/burn/db/mod.rs b/stackslib/src/chainstate/burn/db/mod.rs index 15e5baf1304..0fd248447ec 100644 --- a/stackslib/src/chainstate/burn/db/mod.rs +++ b/stackslib/src/chainstate/burn/db/mod.rs @@ -34,14 +34,18 @@ pub mod sortdb; pub type DBConn = Connection; -impl_byte_array_from_column!(Txid); +impl_byte_array_rusqlite_only!(Txid); +impl_byte_array_rusqlite_only!(OpsHash); +impl_byte_array_rusqlite_only!(SortitionHash); + +impl_byte_array_from_column_only!(Txid); impl_byte_array_from_column_only!(ConsensusHash); impl_byte_array_from_column_only!(Hash160); impl_byte_array_from_column_only!(BlockHeaderHash); impl_byte_array_from_column_only!(VRFSeed); -impl_byte_array_from_column!(OpsHash); +impl_byte_array_from_column_only!(OpsHash); impl_byte_array_from_column_only!(BurnchainHeaderHash); -impl_byte_array_from_column!(SortitionHash); +impl_byte_array_from_column_only!(SortitionHash); impl_byte_array_from_column_only!(Sha512Trunc256Sum); impl_byte_array_from_column_only!(VRFProof); impl_byte_array_from_column_only!(TrieHash); diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 58fea322125..ec7581b2412 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -370,7 +370,7 @@ mod tests { "a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a", ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -393,7 +393,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 101, key_vtxindex: 400, - memo: vec![0x80], + memo: vec![0x80].into(), apparent_sender: BurnchainSigner("hello-world".to_string()), commit_outs: vec![], diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index d6e74570521..f54908c17d4 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -28,6 +28,7 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::util::db::{ColumnEncoding, SqlEncoded}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha512Trunc256Sum}; use stacks_common::util::vrf::*; @@ -39,8 +40,8 @@ use crate::chainstate::burn::operations::leader_block_commit::{ MissedBlockCommit, RewardSetInfo, OUTPUTS_PER_COMMIT, }; use crate::chainstate::burn::operations::{ - BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, StackStxOp, - TransferStxOp, VoteForAggregateKeyOp, + BlockstackOperationType, BurnOpMemo, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, + StackStxOp, TransferStxOp, VoteForAggregateKeyOp, }; use crate::chainstate::burn::{ BlockSnapshot, ConsensusHash, ConsensusHashExtensions, OpsHash, SortitionHash, @@ -100,7 +101,7 @@ impl FromRow for BurnchainHeaderHash { impl FromRow for MissedBlockCommit { fn from_row(row: &Row) -> Result { let intended_sortition = SortitionId::from_column(row, "intended_sortition_id")?; - let input_json: String = row.get_unwrap("input"); + let input_json: String = row.get("input")?; let input = serde_json::from_str(&input_json).map_err(db_error::SerializationError)?; let txid = Txid::from_column(row, "txid")?; @@ -121,8 +122,8 @@ impl FromRow for BlockSnapshot { BurnchainHeaderHash::from_column(row, "parent_burn_header_hash")?; let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; let ops_hash = OpsHash::from_column(row, "ops_hash")?; - let total_burn_str: String = row.get_unwrap("total_burn"); - let sortition: bool = row.get_unwrap("sortition"); + let total_burn_str: String = row.get("total_burn")?; + let sortition: bool = row.get("sortition")?; let sortition_hash = SortitionHash::from_column(row, "sortition_hash")?; let winning_block_txid = Txid::from_column(row, "winning_block_txid")?; let winning_stacks_block_hash = @@ -131,7 +132,7 @@ impl FromRow for BlockSnapshot { let num_sortitions = u64::from_column(row, "num_sortitions")?; // information we learn about the stacks block this snapshot committedto - let stacks_block_accepted: bool = row.get_unwrap("stacks_block_accepted"); + let stacks_block_accepted: bool = row.get("stacks_block_accepted")?; let stacks_block_height = u64::from_column(row, "stacks_block_height")?; let arrival_index = u64::from_column(row, "arrival_index")?; @@ -146,9 +147,9 @@ impl FromRow for BlockSnapshot { // identifiers derived from PoX forking state let sortition_id = SortitionId::from_column(row, "sortition_id")?; let parent_sortition_id = SortitionId::from_column(row, "parent_sortition_id")?; - let pox_valid = row.get_unwrap("pox_valid"); + let pox_valid = row.get("pox_valid")?; - let accumulated_coinbase_ustx_str: String = row.get_unwrap("accumulated_coinbase_ustx"); + let accumulated_coinbase_ustx_str: String = row.get("accumulated_coinbase_ustx")?; let accumulated_coinbase_ustx = accumulated_coinbase_ustx_str .parse::() .expect("DB CORRUPTION: failed to parse stored value"); @@ -196,16 +197,12 @@ impl FromRow for BlockSnapshot { impl FromRow for LeaderKeyRegisterOp { fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let vtxindex: u32 = row.get_unwrap("vtxindex"); + let vtxindex: u32 = row.get("vtxindex")?; let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; let public_key = VRFPublicKey::from_column(row, "public_key")?; - let memo_hex: String = row.get_unwrap("memo"); - - let memo_bytes = hex_bytes(&memo_hex).map_err(|_e| db_error::ParseError)?; - - let memo = memo_bytes.to_vec(); + let memo = BurnOpMemo::from_column(row, "memo")?; let leader_key_row = LeaderKeyRegisterOp { txid, @@ -225,27 +222,23 @@ impl FromRow for LeaderKeyRegisterOp { impl FromRow for LeaderBlockCommitOp { fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let vtxindex: u32 = row.get_unwrap("vtxindex"); + let vtxindex: u32 = row.get("vtxindex")?; let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let block_header_hash = BlockHeaderHash::from_column(row, "block_header_hash")?; let new_seed = VRFSeed::from_column(row, "new_seed")?; - let parent_block_ptr: u32 = row.get_unwrap("parent_block_ptr"); - let parent_vtxindex: u16 = row.get_unwrap("parent_vtxindex"); - let key_block_ptr: u32 = row.get_unwrap("key_block_ptr"); - let key_vtxindex: u16 = row.get_unwrap("key_vtxindex"); - let memo_hex: String = row.get_unwrap("memo"); - let burn_fee_str: String = row.get_unwrap("burn_fee"); - let input_json: String = row.get_unwrap("input"); - let apparent_sender_json: String = row.get_unwrap("apparent_sender"); - let sunset_burn_str: String = row.get_unwrap("sunset_burn"); - - let commit_outs = serde_json::from_value(row.get_unwrap("commit_outs")) - .expect("Unparseable value stored to database"); - - let memo_bytes = hex_bytes(&memo_hex).map_err(|_e| db_error::ParseError)?; - - let memo = memo_bytes.to_vec(); + let parent_block_ptr: u32 = row.get("parent_block_ptr")?; + let parent_vtxindex: u16 = row.get("parent_vtxindex")?; + let key_block_ptr: u32 = row.get("key_block_ptr")?; + let key_vtxindex: u16 = row.get("key_vtxindex")?; + let memo = BurnOpMemo::from_column(row, "memo")?; + let burn_fee_str: String = row.get("burn_fee")?; + let input_json: String = row.get("input")?; + let apparent_sender_json: String = row.get("apparent_sender")?; + let sunset_burn_str: String = row.get("sunset_burn")?; + + let commit_outs = + serde_json::from_value(row.get("commit_outs")?).map_err(|_| db_error::ParseError)?; let input = serde_json::from_str(&input_json).map_err(db_error::SerializationError)?; @@ -260,9 +253,9 @@ impl FromRow for LeaderBlockCommitOp { .parse::() .expect("DB Corruption: Sunset burn is not parseable as u64"); - let burn_parent_modulus: u8 = row.get_unwrap("burn_parent_modulus"); + let burn_parent_modulus: u8 = row.get("burn_parent_modulus")?; - let punished_str: Option = row.get_unwrap("punished"); + let punished_str: Option = row.get("punished")?; let punished = punished_str .as_deref() .map(serde_json::from_str) @@ -298,16 +291,16 @@ impl FromRow for LeaderBlockCommitOp { impl FromRow for StackStxOp { fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let vtxindex: u32 = row.get_unwrap("vtxindex"); + let vtxindex: u32 = row.get("vtxindex")?; let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let sender = StacksAddress::from_column(row, "sender_addr")?; let reward_addr = PoxAddress::from_column(row, "reward_addr")?; - let stacked_ustx_str: String = row.get_unwrap("stacked_ustx"); - let stacked_ustx = u128::from_str_radix(&stacked_ustx_str, 10) - .expect("CORRUPTION: bad u128 written to sortdb"); - let num_cycles = row.get_unwrap("num_cycles"); + let stacked_ustx_str: String = row.get("stacked_ustx")?; + let stacked_ustx = + u128::from_str_radix(&stacked_ustx_str, 10).map_err(|_| db_error::ParseError)?; + let num_cycles = row.get("num_cycles")?; let signing_key_str_opt: Option = row.get("signer_key")?; let signer_key = match signing_key_str_opt { Some(key_str) => serde_json::from_str(&key_str).ok(), @@ -341,17 +334,17 @@ impl FromRow for StackStxOp { impl FromRow for DelegateStxOp { fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let vtxindex: u32 = row.get_unwrap("vtxindex"); + let vtxindex: u32 = row.get("vtxindex")?; let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let sender = StacksAddress::from_column(row, "sender_addr")?; let delegate_to = StacksAddress::from_column(row, "delegate_to")?; - let reward_addr_str: String = row.get_unwrap("reward_addr"); + let reward_addr_str: String = row.get("reward_addr")?; let reward_addr = serde_json::from_str(&reward_addr_str) .expect("CORRUPTION: DB stored bad transition ops"); - let delegated_ustx_str: String = row.get_unwrap("delegated_ustx"); + let delegated_ustx_str: String = row.get("delegated_ustx")?; let delegated_ustx = u128::from_str_radix(&delegated_ustx_str, 10) .expect("CORRUPTION: bad u128 written to sortdb"); let until_burn_height = u64::from_column(row, "until_burn_height")?; @@ -373,17 +366,16 @@ impl FromRow for DelegateStxOp { impl FromRow for TransferStxOp { fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let vtxindex: u32 = row.get_unwrap("vtxindex"); + let vtxindex: u32 = row.get("vtxindex")?; let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let sender = StacksAddress::from_column(row, "sender_addr")?; let recipient = StacksAddress::from_column(row, "recipient_addr")?; - let transfered_ustx_str: String = row.get_unwrap("transfered_ustx"); - let transfered_ustx = u128::from_str_radix(&transfered_ustx_str, 10) - .expect("CORRUPTION: bad u128 written to sortdb"); - let memo_hex: String = row.get_unwrap("memo"); - let memo = hex_bytes(&memo_hex).map_err(|_| db_error::Corruption)?; + let transfered_ustx_str: String = row.get("transfered_ustx")?; + let transfered_ustx = + u128::from_str_radix(&transfered_ustx_str, 10).map_err(|_| db_error::ParseError)?; + let memo = BurnOpMemo::from_column(row, "memo")?; Ok(TransferStxOp { txid, @@ -401,20 +393,20 @@ impl FromRow for TransferStxOp { impl FromRow for VoteForAggregateKeyOp { fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let vtxindex: u32 = row.get_unwrap("vtxindex"); + let vtxindex: u32 = row.get("vtxindex")?; let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let sender = StacksAddress::from_column(row, "sender_addr")?; - let aggregate_key_str: String = row.get_unwrap("aggregate_key"); - let aggregate_key: StacksPublicKeyBuffer = serde_json::from_str(&aggregate_key_str) - .expect("CORRUPTION: DB stored bad transition ops"); - let round: u32 = row.get_unwrap("round"); + let aggregate_key_str: String = row.get("aggregate_key")?; + let aggregate_key: StacksPublicKeyBuffer = + serde_json::from_str(&aggregate_key_str).map_err(|_| db_error::ParseError)?; + let round: u32 = row.get("round")?; let reward_cycle = u64::from_column(row, "reward_cycle")?; - let signer_index: u16 = row.get_unwrap("signer_index"); - let signer_key_str: String = row.get_unwrap("signer_key"); - let signer_key: StacksPublicKeyBuffer = serde_json::from_str(&signer_key_str) - .expect("CORRUPTION: DB stored bad transition ops"); + let signer_index: u16 = row.get("signer_index")?; + let signer_key_str: String = row.get("signer_key")?; + let signer_key: StacksPublicKeyBuffer = + serde_json::from_str(&signer_key_str).map_err(|_| db_error::ParseError)?; Ok(VoteForAggregateKeyOp { txid, @@ -462,15 +454,15 @@ impl FromRow for AcceptedStacksBlockHeader { impl FromRow for StacksEpoch { fn from_row(row: &Row) -> Result { - let epoch_id_u32: u32 = row.get_unwrap("epoch_id"); + let epoch_id_u32: u32 = row.get("epoch_id")?; let epoch_id = StacksEpochId::try_from(epoch_id_u32).map_err(|_| db_error::ParseError)?; let start_height = u64::from_column(row, "start_block_height")?; let end_height = u64::from_column(row, "end_block_height")?; - let network_epoch: u8 = row.get_unwrap("network_epoch"); + let network_epoch: u8 = row.get("network_epoch")?; - let block_limit = row.get_unwrap("block_limit"); + let block_limit = row.get("block_limit")?; Ok(StacksEpoch { epoch_id, start_height, @@ -481,7 +473,11 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: u32 = 10; +pub const SORTITION_DB_VERSION: u32 = 11; + +const SORTITION_DB_DBCONFIG_TABLE: &str = "CREATE TABLE db_config(version TEXT PRIMARY KEY);"; +const SORTITION_DB_DBCONFIG_ENCODING: &str = + "ALTER TABLE db_config ADD encoding INTEGER DEFAULT NULL;"; const SORTITION_DB_INITIAL_SCHEMA: &[&str] = &[ r#" @@ -615,7 +611,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &[&str] = &[ PRIMARY KEY(txid, intended_sortition_id) );"#, - "CREATE TABLE db_config(version TEXT PRIMARY KEY);", + SORTITION_DB_DBCONFIG_TABLE, ]; const SORTITION_DB_SCHEMA_2: &[&str] = &[r#" @@ -721,6 +717,14 @@ static SORTITION_DB_SCHEMA_9: &[&str] = &[r#"ALTER TABLE block_commits ADD punished TEXT DEFAULT NULL;"#]; static SORTITION_DB_SCHEMA_10: &[&str] = &[r#"DROP TABLE IF EXISTS ast_rule_heights;"#]; +static SORTITION_DB_SCHEMA_11: &[&str] = &[ + "DROP TABLE db_config", + SORTITION_DB_DBCONFIG_TABLE, + SORTITION_DB_DBCONFIG_ENCODING, +]; +static SORTITION_DB_SCHEMA_DBCONFIG: &[&str] = + &[SORTITION_DB_DBCONFIG_TABLE, SORTITION_DB_DBCONFIG_ENCODING]; + const LAST_SORTITION_DB_INDEX: &str = "index_block_commits_by_sender"; const SORTITION_DB_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", @@ -772,6 +776,8 @@ pub struct SortitionDB { pub pox_constants: PoxConstants, /// Path on disk from which this DB was opened (caller-given; not resolved). pub path: String, + /// Encoding to use when loading or storing columns + pub(crate) column_encoding: Option, } #[derive(Clone)] @@ -779,6 +785,8 @@ pub struct SortitionDBTxContext { pub first_block_height: u64, pub pox_constants: PoxConstants, pub dryrun: bool, + /// Encoding to use when loading or storing columns + pub(crate) column_encoding: Option, } #[derive(Clone)] @@ -787,6 +795,8 @@ pub struct SortitionHandleContext { pub pox_constants: PoxConstants, pub chain_tip: SortitionId, pub dryrun: bool, + /// Encoding to use when loading or storing columns + pub(crate) column_encoding: Option, } pub type SortitionDBConn<'a> = IndexDBConn<'a, SortitionDBTxContext, SortitionId>; @@ -827,8 +837,9 @@ pub fn get_block_commit_by_txid( sort_id: &SortitionId, txid: &Txid, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND txid = ?2 LIMIT 1"; - let args = params![sort_id, txid]; + let args = params![sort_id.sql_encoded(encoding), txid.sql_encoded(encoding)]; query_row(conn, qry, args) } @@ -1016,6 +1027,9 @@ pub trait SortitionHandle { /// a transaction, this should point to the open transaction. fn sqlite(&self) -> &Connection; + /// Returns the column encoding to use, if any + fn column_encoding(&self) -> Option; + /// Returns the snapshot of the burnchain block at burnchain height `block_height`. /// Returns None if there is no block at this height. fn get_block_snapshot_by_height( @@ -1089,7 +1103,7 @@ pub trait SortitionHandle { ) -> Result { let earliest_block_height_opt = self.sqlite().query_row( "SELECT block_height FROM snapshots WHERE winning_stacks_block_hash = ? ORDER BY block_height ASC LIMIT 1", - &[potential_ancestor], + params![potential_ancestor.sql_encoded(self.column_encoding())], |row| Ok(u64::from_row(row).expect("Expected u64 in database"))) .optional()?; @@ -1205,6 +1219,7 @@ impl<'a> SortitionHandleTx<'a> { first_block_height: conn.first_block_height, pox_constants: conn.pox_constants.clone(), dryrun: conn.dryrun, + column_encoding: conn.column_encoding, }, ); @@ -1261,7 +1276,9 @@ impl<'a> SortitionHandleTx<'a> { let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; let args = params![ - ancestor_snapshot.sortition_id, + ancestor_snapshot + .sortition_id + .sql_encoded(self.context.column_encoding), u64_to_sql(key_block_height)?, key_vtxindex, ]; @@ -1537,6 +1554,10 @@ impl SortitionHandle for SortitionHandleTx<'_> { self.tx() } + fn column_encoding(&self) -> Option { + self.context.column_encoding + } + fn tip(&self) -> SortitionId { self.context.chain_tip.clone() } @@ -1568,6 +1589,10 @@ impl SortitionHandle for SortitionHandleConn<'_> { self.conn() } + fn column_encoding(&self) -> Option { + self.context.column_encoding + } + fn tip(&self) -> SortitionId { self.context.chain_tip.clone() } @@ -1737,7 +1762,7 @@ impl SortitionHandleTx<'_> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args = params![sortition_id]; + let args = params![sortition_id.sql_encoded(self.context.column_encoding)]; let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; let pox_addrs: (Vec, u128) = @@ -1812,7 +1837,7 @@ impl SortitionHandleTx<'_> { } /// Update the canonical Stacks tip - fn update_canonical_stacks_tip( + pub(crate) fn update_canonical_stacks_tip( &mut self, sort_id: &SortitionId, consensus_hash: &ConsensusHash, @@ -1821,9 +1846,9 @@ impl SortitionHandleTx<'_> { ) -> Result<(), db_error> { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; let args = params![ - sort_id, - consensus_hash, - stacks_block_hash, + sort_id.sql_encoded(self.context.column_encoding), + consensus_hash.sql_encoded(self.context.column_encoding), + stacks_block_hash.sql_encoded(self.context.column_encoding), u64_to_sql(stacks_block_height)?, ]; self.execute(sql, args)?; @@ -1878,7 +1903,7 @@ impl SortitionHandleTx<'_> { // votes). let current_sortition_tip : Option<(ConsensusHash, BlockHeaderHash, u64)> = self.query_row_and_then( "SELECT consensus_hash,block_hash,block_height FROM stacks_chain_tips WHERE sortition_id = ?1 ORDER BY block_height DESC LIMIT 1", - rusqlite::params![&burn_tip.sortition_id], + rusqlite::params![&burn_tip.sortition_id.sql_encoded(self.context.column_encoding)], |row| Ok((row.get_unwrap(0), row.get_unwrap(1), (u64::try_from(row.get_unwrap::<_, i64>(2)).expect("FATAL: block height too high")))) ).optional()?; @@ -1932,8 +1957,8 @@ impl SortitionHandleTx<'_> { let args = params![ u64_to_sql(stacks_block_height)?, u64_to_sql(arrival_index + 1)?, - consensus_hash, - stacks_block_hash, + consensus_hash.sql_encoded(self.context.column_encoding), + stacks_block_hash.sql_encoded(self.context.column_encoding), ]; debug!( @@ -2058,6 +2083,7 @@ impl<'a> SortitionHandleConn<'a> { first_block_height: connection.context.first_block_height, pox_constants: connection.context.pox_constants.clone(), dryrun: connection.context.dryrun, + column_encoding: connection.context.column_encoding, }, )) } @@ -2517,7 +2543,7 @@ impl<'a> SortitionHandleConn<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args = params![sortition_id]; + let args = params![sortition_id.sql_encoded(self.context.column_encoding)]; let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; let pox_addrs: (Vec, u128) = @@ -2561,6 +2587,7 @@ impl SortitionDB { first_block_height: self.first_block_height, pox_constants: self.pox_constants.clone(), dryrun: self.dryrun, + column_encoding: self.column_encoding, }, ); Ok(index_tx) @@ -2574,6 +2601,7 @@ impl SortitionDB { first_block_height: self.first_block_height, pox_constants: self.pox_constants.clone(), dryrun: self.dryrun, + column_encoding: self.column_encoding, }, ) } @@ -2586,6 +2614,7 @@ impl SortitionDB { chain_tip: chain_tip.clone(), pox_constants: self.pox_constants.clone(), dryrun: self.dryrun, + column_encoding: self.column_encoding, }, ) } @@ -2639,6 +2668,7 @@ impl SortitionDB { chain_tip: chain_tip.clone(), pox_constants: self.pox_constants.clone(), dryrun: self.dryrun, + column_encoding: self.column_encoding, }, )) } @@ -2674,6 +2704,7 @@ impl SortitionDB { let (first_block_height, first_burn_header_hash) = SortitionDB::get_first_block_height_and_hash(marf.sqlite_conn())?; + let encoding = SortitionDB::get_column_encoding(&marf.sqlite_conn())?; let mut db = SortitionDB { path: path.to_string(), marf, @@ -2682,6 +2713,7 @@ impl SortitionDB { pox_constants, first_block_height, first_burn_header_hash, + column_encoding: encoding, }; db.check_schema_version_or_error()?; @@ -2740,6 +2772,7 @@ impl SortitionDB { first_block_height, pox_constants, first_burn_header_hash: first_burn_hash.clone(), + column_encoding: migrator.as_ref().map(|m| m.get_column_encoding()).flatten(), }; if create_flag { @@ -2836,11 +2869,12 @@ impl SortitionDB { let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; SortitionDB::apply_schema_10(&db_tx)?; + SortitionDB::apply_schema_11(&db_tx, None)?; db_tx.commit()?; self.add_indexes()?; - debug!("Instantiated SortDB"); + debug!("Instantiated SortDB with default encoding"); Ok(()) } @@ -2948,8 +2982,12 @@ impl SortitionDB { txid: &Txid, sortition_id: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM block_commits WHERE txid = ?1 AND sortition_id = ?2"; - let args = params![txid, sortition_id]; + let args = params![ + txid.sql_encoded(encoding), + sortition_id.sql_encoded(encoding) + ]; query_row(conn, qry, args) } @@ -2960,8 +2998,12 @@ impl SortitionDB { txid: &Txid, sortition_id: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT parent_sortition_id AS sortition_id FROM block_commit_parents WHERE block_commit_parents.block_commit_txid = ?1 AND block_commit_parents.block_commit_sortition_id = ?2"; - let args = params![txid, sortition_id]; + let args = params![ + txid.sql_encoded(encoding), + sortition_id.sql_encoded(encoding) + ]; query_row(conn, qry, args) } @@ -2976,8 +3018,9 @@ impl SortitionDB { conn: &DBConn, bhh: &BurnchainHeaderHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM snapshots WHERE burn_header_hash = ?1"; - query_rows(conn, qry, &[bhh]) + query_rows(conn, qry, params![bhh.sql_encoded(encoding)]) } /// Get all snapshots for a burn block height, even if they're not on the canonical PoX fork @@ -3011,7 +3054,11 @@ impl SortitionDB { #[cfg_attr(test, mutants::skip)] pub fn get_consensus_hash_height(&self, ch: &ConsensusHash) -> Result, db_error> { let qry = "SELECT block_height FROM snapshots WHERE consensus_hash = ?1"; - let mut heights: Vec = query_rows(self.conn(), qry, &[ch])?; + let mut heights: Vec = query_rows( + self.conn(), + qry, + params![ch.sql_encoded(self.column_encoding)], + )?; if let Some(height) = heights.pop() { for next_height in heights { if height != next_height { @@ -3082,45 +3129,95 @@ impl SortitionDB { Ok(version) } + /// Get the database column encoding for byte strings, given a DB connection + pub(crate) fn get_column_encoding( + conn: &Connection, + ) -> Result, db_error> { + let Some(version) = Self::get_schema_version(conn)? else { + // schema predates specific encoding scheme, so use the default + return Ok(None); + }; + + let Some(encoding_u8): Option = conn + .query_row( + "SELECT encoding FROM db_config WHERE version = ?1 AND encoding IS NOT NULL", + params![version], + |row| row.get(0), + ) + .optional()? + else { + return Ok(None); + }; + + Ok(ColumnEncoding::try_from(encoding_u8).ok()) + } + + /// Set the database column encoding. + /// Only used during migration. + pub(crate) fn set_column_encoding( + conn: &Connection, + encoding: Option, + ) -> Result<(), db_error> { + conn.execute( + "UPDATE db_config SET encoding = ?1", + params![&encoding.map(|enc| enc.as_u8())], + )?; + Ok(()) + } + + /// Update the database schema with the given version and column encoding rules. + /// Returns Ok(()) on success + /// Returns Err(..) on error + fn update_db_version( + tx: &DBTx, + version: u32, + encoding: Option, + ) -> Result<(), db_error> { + // drop and recreate the table each time so we have the latest schema + tx.execute("DROP TABLE db_config", NO_PARAMS)?; + for sql_exec in SORTITION_DB_SCHEMA_DBCONFIG { + tx.execute_batch(sql_exec)?; + } + tx.execute( + "INSERT INTO db_config (version,encoding) VALUES (?1,?2)", + params![version, encoding.map(|enc| enc.as_u8())], + )?; + Ok(()) + } + fn apply_schema_2(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 2..."); for sql_exec in SORTITION_DB_SCHEMA_2 { tx.execute_batch(sql_exec)?; } SortitionDB::validate_and_insert_epochs(tx, epochs)?; - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["2"], - )?; - + Self::update_db_version(tx, 2, None)?; Ok(()) } fn apply_schema_3(tx: &DBTx) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 3..."); for sql_exec in SORTITION_DB_SCHEMA_3 { tx.execute_batch(sql_exec)?; } - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["3"], - )?; + Self::update_db_version(tx, 3, None)?; Ok(()) } fn apply_schema_4(tx: &DBTx) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 4..."); for sql_exec in SORTITION_DB_SCHEMA_4 { tx.execute_batch(sql_exec)?; } - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["4"], - )?; + Self::update_db_version(tx, 4, None)?; Ok(()) } #[cfg_attr(test, mutants::skip)] fn apply_schema_5(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 5..."); // the schema 5 changes simply **replace** the contents of the epochs table // by dropping all the current rows and then revalidating and inserting // `epochs` @@ -3129,44 +3226,31 @@ impl SortitionDB { } SortitionDB::validate_and_insert_epochs(tx, epochs)?; - - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["5"], - )?; - + Self::update_db_version(tx, 5, None)?; Ok(()) } #[cfg_attr(test, mutants::skip)] fn apply_schema_6(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 6..."); for sql_exec in SORTITION_DB_SCHEMA_6 { tx.execute_batch(sql_exec)?; } SortitionDB::validate_and_insert_epochs(tx, epochs)?; - - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["6"], - )?; - + Self::update_db_version(tx, 6, None)?; Ok(()) } #[cfg_attr(test, mutants::skip)] fn apply_schema_7(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 7..."); for sql_exec in SORTITION_DB_SCHEMA_7 { tx.execute_batch(sql_exec)?; } SortitionDB::validate_and_insert_epochs(tx, epochs)?; - - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["7"], - )?; - + Self::update_db_version(tx, 7, None)?; Ok(()) } @@ -3191,13 +3275,14 @@ impl SortitionDB { &mut self, canonical_tip: &BlockSnapshot, ) -> Result<(), db_error> { + let encoding = self.column_encoding; let first_block_height = self.first_block_height; let tx = self.tx_begin()?; // skip if this step was done if table_exists(&tx, "stacks_chain_tips")? { let sql = "SELECT 1 FROM stacks_chain_tips WHERE sortition_id = ?1"; - let args = params![canonical_tip.sortition_id]; + let args = params![canonical_tip.sortition_id.sql_encoded(encoding)]; if let Ok(Some(_)) = query_row::(&tx, sql, args) { info!("`stacks_chain_tips` appears to have been populated already; skipping this step"); return Ok(()); @@ -3214,9 +3299,11 @@ impl SortitionDB { for snapshot in snapshots.into_iter() { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; let args = params![ - snapshot.sortition_id, - snapshot.canonical_stacks_tip_consensus_hash, - snapshot.canonical_stacks_tip_hash, + snapshot.sortition_id.sql_encoded(encoding), + snapshot + .canonical_stacks_tip_consensus_hash + .sql_encoded(encoding), + snapshot.canonical_stacks_tip_hash.sql_encoded(encoding), u64_to_sql(snapshot.canonical_stacks_tip_height)?, ]; tx.execute(sql, args)?; @@ -3265,6 +3352,7 @@ impl SortitionDB { &mut self, mut migrator: Option, ) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 8..."); let canonical_tip = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; // port over `stacks_chain_tips` table @@ -3281,10 +3369,7 @@ impl SortitionDB { } let tx = self.tx_begin()?; - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["8"], - )?; + Self::update_db_version(&tx, 8, None)?; tx.commit()?; Ok(()) @@ -3292,30 +3377,31 @@ impl SortitionDB { #[cfg_attr(test, mutants::skip)] fn apply_schema_9(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 9..."); for sql_exec in SORTITION_DB_SCHEMA_9 { tx.execute_batch(sql_exec)?; } SortitionDB::validate_and_replace_epochs(tx, epochs)?; - - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["9"], - )?; - + Self::update_db_version(tx, 9, None)?; Ok(()) } fn apply_schema_10(tx: &DBTx) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 10..."); for sql_exec in SORTITION_DB_SCHEMA_10 { tx.execute_batch(sql_exec)?; } + Self::update_db_version(tx, 10, None)?; + Ok(()) + } - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["10"], - )?; - + fn apply_schema_11(tx: &DBTx, encoding: Option) -> Result<(), db_error> { + info!("Migrating sortition DB to schema 11..."); + for sql_exec in SORTITION_DB_SCHEMA_11 { + tx.execute_batch(sql_exec)?; + } + Self::update_db_version(tx, 11, encoding)?; Ok(()) } @@ -3384,6 +3470,11 @@ impl SortitionDB { let tx = self.tx_begin()?; SortitionDB::apply_schema_10(tx.deref())?; tx.commit()?; + } else if version == 10 { + let encoding = self.column_encoding; + let tx = self.tx_begin()?; + SortitionDB::apply_schema_11(tx.deref(), encoding)?; + tx.commit()?; } else if version == SORTITION_DB_VERSION { // this transaction is almost never needed let validated_epochs = StacksEpoch::validate_epochs(epochs); @@ -3392,6 +3483,8 @@ impl SortitionDB { return Ok(()); } + info!("Updating sortition DB epochs..."); + // epochs are out of date let tx = self.tx_begin()?; SortitionDB::validate_and_replace_epochs(&tx, epochs)?; @@ -3421,6 +3514,7 @@ impl SortitionDB { { let index_path = db_mkdirs(path)?; let marf = SortitionDB::open_index(&index_path)?; + let column_encoding = SortitionDB::get_column_encoding(marf.sqlite_conn())?; let mut db = SortitionDB { path: path.to_string(), marf, @@ -3429,8 +3523,13 @@ impl SortitionDB { first_block_height: migrator.get_burnchain().first_block_height, first_burn_header_hash: migrator.get_burnchain().first_block_hash.clone(), pox_constants: migrator.get_burnchain().pox_constants.clone(), + column_encoding, }; - db.check_schema_version_and_update(epochs, Some(migrator)) + db.check_schema_version_and_update(epochs, Some(migrator))?; + + let column_encoding = SortitionDB::get_column_encoding(db.conn())?; + db.column_encoding = column_encoding; + Ok(()) } else { debug!("SortitionDB is at the latest schema"); Ok(()) @@ -3468,9 +3567,10 @@ impl SortitionDB { if !rc_info.is_reward_info_known() { return Ok(()); } + let encoding = SortitionDB::get_column_encoding(&sort_tx)?; let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; - let args = params![sortition_id, rc_json]; + let args = params![sortition_id.sql_encoded(encoding), rc_json]; sort_tx.execute(sql, args)?; Ok(()) } @@ -3516,8 +3616,9 @@ impl SortitionDB { sortdb: &DBConn, sortition_id: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(&sortdb)?; let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; - let args = params![sortition_id]; + let args = params![sortition_id.sql_encoded(encoding)]; let reward_set_opt: Option = sortdb.query_row(sql, args, |row| row.get(0)).optional()?; @@ -3564,6 +3665,7 @@ impl SortitionDBConn<'_> { chain_tip: chain_tip.clone(), pox_constants: self.context.pox_constants.clone(), dryrun: self.context.dryrun, + column_encoding: self.context.column_encoding, }, ) } @@ -3733,7 +3835,7 @@ impl SortitionDBConn<'_> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args = params![sortition_id]; + let args = params![sortition_id.sql_encoded(self.context.column_encoding)]; let pox_addrs_json: String = query_row(self.conn(), sql, args)?.ok_or(db_error::NotFoundError)?; @@ -3853,17 +3955,25 @@ impl SortitionDB { burnchain_header_hash: &BurnchainHeaderHash, ) -> Result, BurnchainError> { let qry = "SELECT sortition_id FROM snapshots WHERE burn_header_hash = ? AND pox_valid = 1"; - query_row(self.conn(), qry, &[burnchain_header_hash]).map_err(BurnchainError::from) + query_row( + self.conn(), + qry, + params![burnchain_header_hash.sql_encoded(self.column_encoding)], + ) + .map_err(BurnchainError::from) } fn get_block_height( conn: &Connection, sortition_id: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT block_height FROM snapshots WHERE sortition_id = ? LIMIT 1"; - conn.query_row(qry, &[sortition_id], |row| row.get(0)) - .optional() - .map_err(db_error::from) + conn.query_row(qry, params![sortition_id.sql_encoded(encoding)], |row| { + row.get(0) + }) + .optional() + .map_err(db_error::from) } /// Is the given block an expected PoX anchor in this sortition history? @@ -3912,12 +4022,13 @@ impl SortitionDB { canonical_stacks_height: u64, stacks_block_accepted: Option, ) -> Result<(), BurnchainError> { + let encoding = SortitionDB::get_column_encoding(tx)?; if let Some(stacks_block_accepted) = stacks_block_accepted { let args = params![ - sortition_id, + sortition_id.sql_encoded(encoding), u64_to_sql(canonical_stacks_height)?, - canonical_stacks_bhh, - canonical_stacks_ch, + canonical_stacks_bhh.sql_encoded(encoding), + canonical_stacks_ch.sql_encoded(encoding), stacks_block_accepted, ]; tx.execute( @@ -3926,10 +4037,10 @@ impl SortitionDB { )?; } else { let args = params![ - sortition_id, + sortition_id.sql_encoded(encoding), u64_to_sql(canonical_stacks_height)?, - canonical_stacks_bhh, - canonical_stacks_ch, + canonical_stacks_bhh.sql_encoded(encoding), + canonical_stacks_ch.sql_encoded(encoding), ]; tx.execute( "UPDATE snapshots SET pox_valid = 1, canonical_stacks_tip_height = ?2, canonical_stacks_tip_hash = ?3, canonical_stacks_tip_consensus_hash = ?4 WHERE sortition_id = ?1", @@ -3956,6 +4067,8 @@ impl SortitionDB { G: FnMut(&mut SortitionDBTx), { let mut db_tx = self.tx_begin()?; + let encoding = SortitionDB::get_column_encoding(&db_tx)?; + let mut queue = vec![burn_block.clone()]; while let Some(header) = queue.pop() { @@ -3964,7 +4077,9 @@ impl SortitionDB { let mut stmt = db_tx.prepare( "SELECT DISTINCT burn_header_hash FROM snapshots WHERE parent_burn_header_hash = ?", )?; - for next_header in stmt.query_map(&[&header], |row| row.get(0))? { + for next_header in + stmt.query_map(&[&header.sql_encoded(encoding)], |row| row.get(0))? + { queue.push(next_header?); } } @@ -3977,7 +4092,7 @@ impl SortitionDB { let to_invalidate: Vec = query_rows( &db_tx, "SELECT * FROM snapshots WHERE parent_burn_header_hash = ?1", - &[&header], + &[&header.sql_encoded(encoding)], )?; for invalid in to_invalidate { debug!("Invalidate child of {}: {:?}", &header, &invalid); @@ -3993,7 +4108,7 @@ impl SortitionDB { canonical_stacks_tip_consensus_hash = "0000000000000000000000000000000000000000", stacks_block_accepted = 0 WHERE parent_burn_header_hash = ?"#, - &[&header], + &[&header.sql_encoded(encoding)], )?; } @@ -4059,7 +4174,7 @@ impl SortitionDB { let sql_transition_ops = "SELECT accepted_ops, consumed_keys FROM snapshot_transition_ops WHERE sortition_id = ?"; let transition_ops = self .conn() - .query_row(sql_transition_ops, &[id], |row| { + .query_row(sql_transition_ops, params![id.sql_encoded(self.column_encoding)], |row| { let accepted_ops: String = row.get_unwrap(0); let consumed_leader_keys: String = row.get_unwrap(1); Ok(BurnchainStateTransitionOps { @@ -4070,7 +4185,7 @@ impl SortitionDB { }) }) .optional()? - .expect("CORRUPTION: DB stored BlockSnapshot, but not the transition ops"); + .unwrap_or_else(|| panic!("CORRUPTION: DB stored BlockSnapshot for {id} (encoding {:?}), but not the transition ops", &self.column_encoding)); Ok(Some((snapshot, transition_ops))) } @@ -4422,10 +4537,11 @@ impl SortitionDB { conn: &Connection, burn_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; query_rows( conn, "SELECT * FROM stack_stx WHERE burn_header_hash = ? ORDER BY vtxindex", - &[burn_header_hash], + params![burn_header_hash.sql_encoded(encoding)], ) } @@ -4436,10 +4552,11 @@ impl SortitionDB { conn: &Connection, burn_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; query_rows( conn, "SELECT * FROM delegate_stx WHERE burn_header_hash = ? ORDER BY vtxindex", - &[burn_header_hash], + params![burn_header_hash.sql_encoded(encoding)], ) } @@ -4450,10 +4567,11 @@ impl SortitionDB { conn: &Connection, burn_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; query_rows( conn, "SELECT * FROM vote_for_aggregate_key WHERE burn_header_hash = ? ORDER BY vtxindex", - &[burn_header_hash], + params![burn_header_hash.sql_encoded(encoding)], ) } @@ -4464,10 +4582,11 @@ impl SortitionDB { conn: &Connection, burn_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; query_rows( conn, "SELECT * FROM transfer_stx WHERE burn_header_hash = ? ORDER BY vtxindex", - &[burn_header_hash], + params![burn_header_hash.sql_encoded(encoding)], ) } @@ -4476,8 +4595,9 @@ impl SortitionDB { conn: &Connection, burnchain_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let sql = "SELECT parent_burn_header_hash AS burn_header_hash FROM snapshots WHERE burn_header_hash = ?1"; - let args = params![burnchain_header_hash]; + let args = params![burnchain_header_hash.sql_encoded(encoding)]; let mut rows = query_rows::(conn, sql, args)?; // there can be more than one if there was a PoX reorg. If so, make sure they're _all the @@ -4566,11 +4686,12 @@ impl SortitionDB { conn: &Connection, tip: &BlockSnapshot, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let mut cursor = tip.clone(); loop { let result_at_tip : Option<(ConsensusHash, BlockHeaderHash, u64)> = conn.query_row_and_then( "SELECT consensus_hash,block_hash,block_height FROM stacks_chain_tips WHERE sortition_id = ? ORDER BY block_height DESC LIMIT 1", - &[&cursor.sortition_id], + &[&cursor.sortition_id.sql_encoded(encoding)], |row| Ok((row.get_unwrap(0), row.get_unwrap(1), (u64::try_from(row.get_unwrap::<_, i64>(2)).expect("FATAL: block height too high")))) ).optional()?; if let Some(stacks_tip) = result_at_tip { @@ -4654,8 +4775,9 @@ impl SortitionDB { conn: &Connection, consensus_hash: &ConsensusHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT burn_header_hash FROM snapshots WHERE consensus_hash = ?1 AND pox_valid = 1 LIMIT 1"; - let args = [&consensus_hash]; + let args = [&consensus_hash.sql_encoded(encoding)]; query_row_panic(conn, qry, &args, || { format!( "FATAL: multiple block snapshots for the same block with consensus hash {}", @@ -4668,8 +4790,9 @@ impl SortitionDB { conn: &Connection, consensus_hash: &ConsensusHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT sortition_id FROM snapshots WHERE consensus_hash = ?1 AND pox_valid = 1 LIMIT 1"; - let args = [&consensus_hash]; + let args = [&consensus_hash.sql_encoded(encoding)]; query_row_panic(conn, qry, &args, || { format!( "FATAL: multiple block snapshots for the same block with consensus hash {}", @@ -4684,8 +4807,9 @@ impl SortitionDB { conn: &Connection, consensus_hash: &ConsensusHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM snapshots WHERE consensus_hash = ?1"; - let args = [&consensus_hash]; + let args = [&consensus_hash.sql_encoded(encoding)]; query_row_panic(conn, qry, &args, || { format!( "FATAL: multiple block snapshots for the same block with consensus hash {}", @@ -4699,8 +4823,9 @@ impl SortitionDB { conn: &Connection, consensus_hash: &ConsensusHash, ) -> Result { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT 1 FROM snapshots WHERE consensus_hash = ?1"; - let args = [&consensus_hash]; + let args = [&consensus_hash.sql_encoded(encoding)]; let res: Option = query_row_panic(conn, qry, &args, || { format!( "FATAL: multiple block snapshots for the same block with consensus hash {}", @@ -4716,8 +4841,9 @@ impl SortitionDB { conn: &Connection, sortition_id: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM snapshots WHERE sortition_id = ?1"; - let args = [&sortition_id]; + let args = [&sortition_id.sql_encoded(encoding)]; query_row_panic(conn, qry, &args, || { format!("FATAL: multiple block snapshots for the same block {sortition_id}") }) @@ -4730,10 +4856,21 @@ impl SortitionDB { /// Get the first snapshot pub fn get_first_block_snapshot(conn: &Connection) -> Result { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM snapshots WHERE consensus_hash = ?1"; - let result = query_row_panic(conn, qry, &[&ConsensusHash::empty()], || { - "FATAL: multiple first-block snapshots".into() - })?; + + test_debug!( + "encoding = {:?}, consensus-hash: {:?}", + &encoding, + &ConsensusHash::empty().sql_encoded(encoding) + ); + + let result = query_row_panic( + conn, + qry, + params![&ConsensusHash::empty().sql_encoded(encoding)], + || "FATAL: multiple first-block snapshots".into(), + )?; match result { None => { // should never happen @@ -4747,8 +4884,9 @@ impl SortitionDB { pub(crate) fn get_first_block_height_and_hash( conn: &Connection, ) -> Result<(u64, BurnchainHeaderHash), db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let sql = "SELECT block_height, burn_header_hash FROM snapshots WHERE consensus_hash = ?1"; - let args = params![ConsensusHash::empty()]; + let args = params![ConsensusHash::empty().sql_encoded(encoding)]; let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; if let Some(row) = rows.next()? { @@ -4889,8 +5027,9 @@ impl SortitionDB { conn: &Connection, sortition: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args = params![sortition]; + let args = params![sortition.sql_encoded(encoding)]; query_rows(conn, qry, args) } @@ -4901,8 +5040,9 @@ impl SortitionDB { conn: &Connection, sortition: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM missed_commits WHERE intended_sortition_id = ?1"; - let args = params![sortition]; + let args = params![sortition.sql_encoded(encoding)]; query_rows(conn, qry, args) } @@ -4913,8 +5053,9 @@ impl SortitionDB { conn: &Connection, sortition: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args = params![sortition]; + let args = params![sortition.sql_encoded(encoding)]; query_rows(conn, qry, args) } @@ -4925,10 +5066,14 @@ impl SortitionDB { conn: &Connection, sortition: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 AND txid = ( SELECT winning_block_txid FROM snapshots WHERE sortition_id = ?2 LIMIT 1) LIMIT 1"; - let args = params![sortition, sortition]; + let args = params![ + sortition.sql_encoded(encoding), + sortition.sql_encoded(encoding) + ]; conn.query_row(qry, args, |row| row.get(0)) .optional() .map_err(db_error::from) @@ -5009,8 +5154,13 @@ impl SortitionDB { ) -> Result, db_error> { assert!(block_height < BLOCK_HEIGHT_MAX); + let encoding = SortitionDB::get_column_encoding(conn)?; let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args = params![sortition, u64_to_sql(block_height)?, vtxindex]; + let args = params![ + sortition.sql_encoded(encoding), + u64_to_sql(block_height)?, + vtxindex + ]; query_row_panic(conn, qry, args, || { format!( "Multiple parent blocks at {},{} in {}", @@ -5029,6 +5179,7 @@ impl SortitionDB { key_vtxindex: u32, tip: &SortitionId, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(ic)?; assert!(key_block_height < BLOCK_HEIGHT_MAX); let ancestor_snapshot = match SortitionDB::get_ancestor_snapshot(ic, key_block_height, tip)? { @@ -5040,7 +5191,7 @@ impl SortitionDB { let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; let args = params![ - ancestor_snapshot.sortition_id, + ancestor_snapshot.sortition_id.sql_encoded(encoding), u64_to_sql(key_block_height)?, key_vtxindex, ]; @@ -5060,6 +5211,7 @@ impl SortitionDB { consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let (sortition_id, winning_txid) = match SortitionDB::get_block_snapshot_consensus( conn, consensus_hash, @@ -5077,7 +5229,11 @@ impl SortitionDB { }; let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_header_hash = ?2 AND txid = ?3"; - let args = params![sortition_id, block_hash, winning_txid]; + let args = params![ + sortition_id.sql_encoded(encoding), + block_hash.sql_encoded(encoding), + winning_txid.sql_encoded(encoding) + ]; query_row_panic(conn, qry, args, || { format!("FATAL: multiple block commits for {}", &block_hash) }) @@ -5309,8 +5465,8 @@ impl SortitionHandleTx<'_> { // look at stacks_chain_tips table let res: Result<_, db_error> = self.deref().query_row_and_then( "SELECT consensus_hash,block_hash,block_height FROM stacks_chain_tips WHERE sortition_id = ? ORDER BY block_height DESC LIMIT 1", - &[&parent_snapshot.sortition_id], - |row| Ok((row.get_unwrap(0), row.get_unwrap(1), (u64::try_from(row.get_unwrap::<_, i64>(2)).expect("FATAL: block height too high")))) + &[&parent_snapshot.sortition_id.sql_encoded(self.context.column_encoding)], + |row| Ok((row.get(0)?, row.get(1)?, (u64::try_from(row.get::<_, i64>(2)?).expect("FATAL: block height too high")))) ); let ( canonical_stacks_tip_consensus_hash, @@ -5390,23 +5546,24 @@ impl SortitionHandleTx<'_> { } #[cfg(any(test, feature = "testing"))] - fn store_burn_distribution( + pub(crate) fn store_burn_distribution( &mut self, new_sortition: &SortitionId, transition: &BurnchainStateTransition, ) { let create = "CREATE TABLE IF NOT EXISTS snapshot_burn_distributions (sortition_id TEXT PRIMARY KEY, data TEXT NOT NULL);"; self.execute(create, NO_PARAMS).unwrap(); - let sql = "INSERT INTO snapshot_burn_distributions (sortition_id, data) VALUES (?, ?)"; + let sql = + "INSERT OR REPLACE INTO snapshot_burn_distributions (sortition_id, data) VALUES (?, ?)"; let args = params![ - new_sortition, + new_sortition.sql_encoded(self.context.column_encoding), serde_json::to_string(&transition.burn_dist).unwrap(), ]; self.execute(sql, args).unwrap(); } #[cfg(not(any(test, feature = "testing")))] - fn store_burn_distribution( + pub(crate) fn store_burn_distribution( &mut self, _new_sortition: &SortitionId, _transition: &BurnchainStateTransition, @@ -5420,7 +5577,7 @@ impl SortitionHandleTx<'_> { ) -> Result<(), db_error> { let sql = "INSERT INTO snapshot_transition_ops (sortition_id, accepted_ops, consumed_keys) VALUES (?, ?, ?)"; let args = params![ - new_sortition, + new_sortition.sql_encoded(self.context.column_encoding), serde_json::to_string(&transition.accepted_ops).unwrap(), serde_json::to_string(&transition.consumed_leader_keys).unwrap(), ]; @@ -5439,7 +5596,7 @@ impl SortitionHandleTx<'_> { } /// Store a blockstack burnchain operation - fn store_burnchain_transaction( + pub(crate) fn store_burnchain_transaction( &mut self, blockstack_op: &BlockstackOperationType, sort_id: &SortitionId, @@ -5516,14 +5673,18 @@ impl SortitionHandleTx<'_> { assert!(leader_key.block_height < BLOCK_HEIGHT_MAX); let args = params![ - leader_key.txid, + leader_key.txid.sql_encoded(self.context.column_encoding), leader_key.vtxindex, u64_to_sql(leader_key.block_height)?, - leader_key.burn_header_hash, - leader_key.consensus_hash, + leader_key + .burn_header_hash + .sql_encoded(self.context.column_encoding), + leader_key + .consensus_hash + .sql_encoded(self.context.column_encoding), leader_key.public_key.to_hex(), - to_hex(&leader_key.memo), - sort_id, + leader_key.memo.sql_encoded(self.context.column_encoding), + sort_id.sql_encoded(self.context.column_encoding), ]; self.execute("INSERT INTO leader_keys (txid, vtxindex, block_height, burn_header_hash, consensus_hash, public_key, memo, sortition_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", args)?; @@ -5534,10 +5695,11 @@ impl SortitionHandleTx<'_> { /// Insert a stack-stx op fn insert_stack_stx(&mut self, op: &StackStxOp) -> Result<(), db_error> { let args = params![ - op.txid, + op.txid.sql_encoded(self.context.column_encoding), op.vtxindex, u64_to_sql(op.block_height)?, - op.burn_header_hash, + op.burn_header_hash + .sql_encoded(self.context.column_encoding), op.sender.to_string(), op.reward_addr.to_db_string(), op.stacked_ustx.to_string(), @@ -5555,10 +5717,11 @@ impl SortitionHandleTx<'_> { /// Insert a delegate-stx op fn insert_delegate_stx(&mut self, op: &DelegateStxOp) -> Result<(), db_error> { let args = params![ - op.txid, + op.txid.sql_encoded(self.context.column_encoding), op.vtxindex, u64_to_sql(op.block_height)?, - op.burn_header_hash, + op.burn_header_hash + .sql_encoded(self.context.column_encoding), op.sender.to_string(), op.delegate_to.to_string(), serde_json::to_string(&op.reward_addr).unwrap(), @@ -5577,10 +5740,11 @@ impl SortitionHandleTx<'_> { op: &VoteForAggregateKeyOp, ) -> Result<(), db_error> { let args = params![ - op.txid, + op.txid.sql_encoded(self.context.column_encoding), op.vtxindex, u64_to_sql(op.block_height)?, - op.burn_header_hash, + op.burn_header_hash + .sql_encoded(self.context.column_encoding), op.sender.to_string(), serde_json::to_string(&op.aggregate_key).unwrap(), op.round, @@ -5597,14 +5761,15 @@ impl SortitionHandleTx<'_> { /// Insert a transfer-stx op fn insert_transfer_stx(&mut self, op: &TransferStxOp) -> Result<(), db_error> { let args = params![ - op.txid, + op.txid.sql_encoded(self.context.column_encoding), op.vtxindex, u64_to_sql(op.block_height)?, - op.burn_header_hash, + op.burn_header_hash + .sql_encoded(self.context.column_encoding), op.sender.to_string(), op.recipient.to_string(), op.transfered_ustx.to_string(), - to_hex(&op.memo), + op.memo.sql_encoded(self.context.column_encoding), ]; self.execute("REPLACE INTO transfer_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, recipient_addr, transfered_ustx, memo) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", args)?; @@ -5645,20 +5810,26 @@ impl SortitionHandleTx<'_> { } let args = params![ - block_commit.txid, + block_commit.txid.sql_encoded(self.context.column_encoding), block_commit.vtxindex, u64_to_sql(block_commit.block_height)?, - block_commit.burn_header_hash, - block_commit.block_header_hash, - block_commit.new_seed, + block_commit + .burn_header_hash + .sql_encoded(self.context.column_encoding), + block_commit + .block_header_hash + .sql_encoded(self.context.column_encoding), + block_commit + .new_seed + .sql_encoded(self.context.column_encoding), block_commit.parent_block_ptr, block_commit.parent_vtxindex, block_commit.key_block_ptr, block_commit.key_vtxindex, - to_hex(&block_commit.memo[..]), + block_commit.memo.sql_encoded(self.context.column_encoding), block_commit.burn_fee.to_string(), tx_input_str, - sort_id, + sort_id.sql_encoded(self.context.column_encoding), serde_json::to_value(&block_commit.commit_outs).unwrap(), block_commit.sunset_burn.to_string(), apparent_sender_str, @@ -5669,7 +5840,11 @@ impl SortitionHandleTx<'_> { self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus, punished) \ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19)", args)?; - let parent_args = params![sort_id, block_commit.txid, parent_sortition_id]; + let parent_args = params![ + sort_id.sql_encoded(self.context.column_encoding), + block_commit.txid.sql_encoded(self.context.column_encoding), + parent_sortition_id.sql_encoded(self.context.column_encoding) + ]; debug!( "Parent sortition of {},{},{} is {} (parent at {},{})", @@ -5692,12 +5867,20 @@ impl SortitionHandleTx<'_> { } /// Insert a missed block commit - fn insert_missed_block_commit(&mut self, op: &MissedBlockCommit) -> Result<(), db_error> { + pub(crate) fn insert_missed_block_commit( + &mut self, + op: &MissedBlockCommit, + ) -> Result<(), db_error> { // serialize tx input to JSON let tx_input_str = serde_json::to_string(&op.input).map_err(db_error::SerializationError)?; - let args = params![op.txid, op.intended_sortition, tx_input_str]; + let args = params![ + op.txid.sql_encoded(self.context.column_encoding), + op.intended_sortition + .sql_encoded(self.context.column_encoding), + tx_input_str + ]; self.execute( "INSERT OR REPLACE INTO missed_commits (txid, intended_sortition_id, input) \ @@ -5741,7 +5924,9 @@ impl SortitionHandleTx<'_> { let all_valid_sortitions: Vec = query_rows( self, "SELECT 1 FROM snapshots WHERE burn_header_hash = ?1 AND pox_valid = 1 LIMIT 1", - &[&snapshot.burn_header_hash], + &[&snapshot + .burn_header_hash + .sql_encoded(self.context.column_encoding)], )?; if !all_valid_sortitions.is_empty() { error!("FATAL: Tried to insert snapshot {:?}, but already have pox-valid sortition for {:?}", &snapshot, &snapshot.burn_header_hash); @@ -5751,30 +5936,55 @@ impl SortitionHandleTx<'_> { let args = params![ u64_to_sql(snapshot.block_height)?, - snapshot.burn_header_hash, + snapshot + .burn_header_hash + .sql_encoded(self.context.column_encoding), u64_to_sql(snapshot.burn_header_timestamp)?, - snapshot.parent_burn_header_hash, - snapshot.consensus_hash, - snapshot.ops_hash, + snapshot + .parent_burn_header_hash + .sql_encoded(self.context.column_encoding), + snapshot + .consensus_hash + .sql_encoded(self.context.column_encoding), + snapshot.ops_hash.sql_encoded(self.context.column_encoding), snapshot.total_burn.to_string(), snapshot.sortition, - snapshot.sortition_hash, - snapshot.winning_block_txid, - snapshot.winning_stacks_block_hash, - snapshot.index_root, + snapshot + .sortition_hash + .sql_encoded(self.context.column_encoding), + snapshot + .winning_block_txid + .sql_encoded(self.context.column_encoding), + snapshot + .winning_stacks_block_hash + .sql_encoded(self.context.column_encoding), + snapshot + .index_root + .sql_encoded(self.context.column_encoding), u64_to_sql(snapshot.num_sortitions)?, snapshot.stacks_block_accepted, u64_to_sql(snapshot.stacks_block_height)?, u64_to_sql(snapshot.arrival_index)?, u64_to_sql(snapshot.canonical_stacks_tip_height)?, - snapshot.canonical_stacks_tip_hash, - snapshot.canonical_stacks_tip_consensus_hash, - snapshot.sortition_id, - snapshot.parent_sortition_id, + snapshot + .canonical_stacks_tip_hash + .sql_encoded(self.context.column_encoding), + snapshot + .canonical_stacks_tip_consensus_hash + .sql_encoded(self.context.column_encoding), + snapshot + .sortition_id + .sql_encoded(self.context.column_encoding), + snapshot + .parent_sortition_id + .sql_encoded(self.context.column_encoding), snapshot.pox_valid, snapshot.accumulated_coinbase_ustx.to_string(), pox_payouts_json, - snapshot.miner_pk_hash, + snapshot + .miner_pk_hash + .as_ref() + .map(|miner_pk| miner_pk.sql_encoded(self.context.column_encoding)), ]; self.execute("INSERT INTO snapshots \ @@ -6363,8 +6573,8 @@ impl SortitionHandleTx<'_> { best_height: u64, ) -> Result<(), db_error> { let args = params![ - best_chh, - best_bhh, + best_chh.sql_encoded(self.context.column_encoding), + best_bhh.sql_encoded(self.context.column_encoding), u64_to_sql(best_height)?, u64_to_sql(tip.block_height)?, ]; @@ -6467,7 +6677,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: next_txid(), vtxindex, @@ -6727,6 +6937,7 @@ pub mod tests { first_block_height, first_burn_header_hash: first_burn_hash.clone(), pox_constants: PoxConstants::test_default(), + column_encoding: None, }; if create_flag { @@ -6763,6 +6974,7 @@ pub mod tests { sql_pragma(self.conn(), "journal_mode", &"WAL")?; sql_pragma(self.conn(), "foreign_keys", &true)?; + let column_encoding = self.column_encoding; let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; // create first (sentinel) snapshot @@ -6803,26 +7015,38 @@ pub mod tests { let args = params![ u64_to_sql(first_snapshot.block_height)?, - first_snapshot.burn_header_hash, + first_snapshot.burn_header_hash.sql_encoded(column_encoding), u64_to_sql(first_snapshot.burn_header_timestamp)?, - first_snapshot.parent_burn_header_hash, - first_snapshot.consensus_hash, - first_snapshot.ops_hash, + first_snapshot + .parent_burn_header_hash + .sql_encoded(column_encoding), + first_snapshot.consensus_hash.sql_encoded(column_encoding), + first_snapshot.ops_hash.sql_encoded(column_encoding), first_snapshot.total_burn.to_string(), first_snapshot.sortition, - first_snapshot.sortition_hash, - first_snapshot.winning_block_txid, - first_snapshot.winning_stacks_block_hash, - first_snapshot.index_root, + first_snapshot.sortition_hash.sql_encoded(column_encoding), + first_snapshot + .winning_block_txid + .sql_encoded(column_encoding), + first_snapshot + .winning_stacks_block_hash + .sql_encoded(column_encoding), + first_snapshot.index_root.sql_encoded(column_encoding), u64_to_sql(first_snapshot.num_sortitions)?, first_snapshot.stacks_block_accepted, u64_to_sql(first_snapshot.stacks_block_height)?, u64_to_sql(first_snapshot.arrival_index)?, u64_to_sql(first_snapshot.canonical_stacks_tip_height)?, - first_snapshot.canonical_stacks_tip_hash, - first_snapshot.canonical_stacks_tip_consensus_hash, - first_snapshot.sortition_id, - first_snapshot.parent_sortition_id, + first_snapshot + .canonical_stacks_tip_hash + .sql_encoded(column_encoding), + first_snapshot + .canonical_stacks_tip_consensus_hash + .sql_encoded(column_encoding), + first_snapshot.sortition_id.sql_encoded(column_encoding), + first_snapshot + .parent_sortition_id + .sql_encoded(column_encoding), first_snapshot.pox_valid, first_snapshot.accumulated_coinbase_ustx.to_string(), pox_payouts_json, @@ -6858,8 +7082,14 @@ pub mod tests { bhh: &BlockHeaderHash, height: u64, ) -> Result<(), db_error> { + let encoding = SortitionDB::get_column_encoding(conn)?; let tip = SortitionDB::get_canonical_burn_chain_tip(conn)?; - let args = params![ch, bhh, u64_to_sql(height)?, tip.sortition_id]; + let args = params![ + ch.sql_encoded(encoding), + bhh.sql_encoded(encoding), + u64_to_sql(height)?, + tip.sortition_id.sql_encoded(encoding) + ]; conn.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 WHERE sortition_id = ?4", args) .map_err(db_error::SqliteError)?; @@ -6883,10 +7113,11 @@ pub mod tests { txid: &Txid, ) -> Result, db_error> { // leader key? + let encoding = SortitionDB::get_column_encoding(conn)?; let leader_key_sql = "SELECT * FROM leader_keys WHERE txid = ?1 LIMIT 1"; - let args = [&txid]; + let args = params![txid.sql_encoded(encoding)]; - let leader_key_res = query_row_panic(conn, leader_key_sql, &args, || { + let leader_key_res = query_row_panic(conn, leader_key_sql, args, || { "Multiple leader keys with same txid".to_string() })?; if let Some(leader_key) = leader_key_res { @@ -6896,7 +7127,7 @@ pub mod tests { // block commit? let block_commit_sql = "SELECT * FROM block_commits WHERE txid = ?1 LIMIT 1"; - let block_commit_res = query_row_panic(conn, block_commit_sql, &args, || { + let block_commit_res = query_row_panic(conn, block_commit_sql, args, || { "Multiple block commits with same txid".to_string() })?; if let Some(block_commit) = block_commit_res { @@ -7079,7 +7310,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -7158,7 +7389,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -7186,7 +7417,7 @@ pub mod tests { parent_vtxindex: 0x5150, key_block_ptr: (block_height + 1) as u32, key_vtxindex: vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -7381,7 +7612,7 @@ pub mod tests { ) .unwrap(), public_key: public_key.clone(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -7873,7 +8104,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -7901,7 +8132,7 @@ pub mod tests { parent_vtxindex: 0x4342, key_block_ptr: (block_height + 1) as u32, key_vtxindex: vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -10080,7 +10311,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -10109,7 +10340,7 @@ pub mod tests { parent_vtxindex: 0, key_block_ptr: (block_height + 1) as u32, key_vtxindex: vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -10152,7 +10383,7 @@ pub mod tests { parent_vtxindex: genesis_block_commit.vtxindex as u16, key_block_ptr: (block_height + 1) as u32, key_vtxindex: vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -10195,7 +10426,7 @@ pub mod tests { parent_vtxindex: block_commit_1.vtxindex as u16, key_block_ptr: (block_height + 1) as u32, key_vtxindex: vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -10238,7 +10469,7 @@ pub mod tests { parent_vtxindex: genesis_block_commit.vtxindex as u16, key_block_ptr: (block_height + 1) as u32, key_vtxindex: vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 1, @@ -10640,7 +10871,7 @@ pub mod tests { sender: StacksAddress::new(1, Hash160([1u8; 20])).unwrap(), recipient: StacksAddress::new(2, Hash160([2u8; 20])).unwrap(), transfered_ustx: 123, - memo: vec![0x00, 0x01, 0x02, 0x03, 0x04], + memo: vec![0x00, 0x01, 0x02, 0x03, 0x04].into(), txid: Txid([0x01; 32]), vtxindex: 1, @@ -10738,7 +10969,7 @@ pub mod tests { sender: StacksAddress::new(1, Hash160([1u8; 20])).unwrap(), recipient: StacksAddress::new(2, Hash160([2u8; 20])).unwrap(), transfered_ustx: 123, - memo: vec![0x00, 0x01, 0x02, 0x03, 0x04], + memo: vec![0x00, 0x01, 0x02, 0x03, 0x04].into(), txid: Txid([0x01; 32]), vtxindex: 1, diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index f3abb66a06e..e18c9f2808f 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -503,7 +503,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: vrf_ident, key_vtxindex: 0, - memo: vec![], + memo: vec![].into(), burn_fee, input: (input_txid, 3), apparent_sender: BurnchainSigner::new_p2pkh(&StacksPublicKey::new()), @@ -803,7 +803,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -828,7 +828,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -853,7 +853,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c") @@ -885,7 +885,7 @@ mod tests { parent_vtxindex: 456, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 12345, input: (Txid([0; 32]), 0), @@ -931,7 +931,7 @@ mod tests { parent_vtxindex: 111, key_block_ptr: 122, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 12345, input: (Txid([0; 32]), 0), @@ -977,7 +977,7 @@ mod tests { parent_vtxindex: 111, key_block_ptr: 121, key_vtxindex: 10, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 23456, input: (Txid([0; 32]), 0), diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index a7b4c98a864..6cd9b5133bf 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -47,12 +47,14 @@ pub struct OpsHash(pub [u8; 32]); impl_array_newtype!(OpsHash, u8, 32); impl_array_hexstring_fmt!(OpsHash); impl_byte_array_newtype!(OpsHash, u8, 32); +impl_byte_array_message_codec!(OpsHash, 32); // rolling hash of PoW outputs to mix with the VRF seed on sortition pub struct SortitionHash(pub [u8; 32]); impl_array_newtype!(SortitionHash, u8, 32); impl_array_hexstring_fmt!(SortitionHash); impl_byte_array_newtype!(SortitionHash, u8, 32); +impl_byte_array_message_codec!(SortitionHash, 32); #[derive(Debug, Clone, PartialEq)] #[repr(u8)] diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 61852b64d0d..b07ce41295b 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -105,7 +105,7 @@ impl LeaderBlockCommitOp { key_vtxindex: paired_key.vtxindex as u16, parent_block_ptr: 0, parent_vtxindex: 0, - memo: vec![0x00], + memo: vec![0x00].into(), burn_fee, input: input.clone(), block_header_hash: block_header_hash.clone(), @@ -140,7 +140,7 @@ impl LeaderBlockCommitOp { key_vtxindex, parent_block_ptr: parent_block_height, parent_vtxindex, - memo: vec![], + memo: vec![].into(), burn_fee, input: input.clone(), block_header_hash: block_header_hash.clone(), @@ -433,7 +433,7 @@ impl LeaderBlockCommitOp { parent_vtxindex: data.parent_vtxindex, key_block_ptr: data.key_block_ptr, key_vtxindex: data.key_vtxindex, - memo: vec![data.memo], + memo: vec![data.memo].into(), burn_parent_modulus: data.burn_parent_modulus, commit_outs, @@ -507,7 +507,7 @@ pub struct RewardSetInfo { pub allow_nakamoto_punishment: bool, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct MissedBlockCommit { pub txid: Txid, pub input: (Txid, u32), @@ -1136,6 +1136,7 @@ mod tests { use stacks_common::deps_common::bitcoin::blockdata::transaction::{Transaction, TxOut}; use stacks_common::deps_common::bitcoin::network::serialize::{deserialize, serialize_hex}; use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksAddress, VRFSeed}; + use stacks_common::util::db::ColumnEncoding; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::*; use stacks_common::util::vrf::VRFPublicKey; @@ -1722,7 +1723,7 @@ mod tests { parent_vtxindex: 0x5051, key_block_ptr: 0x60616263, key_vtxindex: 0x7071, - memo: vec![0x1f], + memo: vec![0x1f].into(), commit_outs: vec![ PoxAddress::Standard( StacksAddress::new(26, Hash160::empty()).unwrap(), None ), @@ -1738,7 +1739,8 @@ mod tests { block_height, burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash, - treatment: vec![], }) + treatment: vec![], + }) }, OpFixture { // invalid -- wrong opcode @@ -1914,7 +1916,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -1936,7 +1938,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -1965,7 +1967,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2121,7 +2123,7 @@ mod tests { parent_vtxindex: 444, key_block_ptr: 124, key_vtxindex: 457, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_1_MARKER].into(), commit_outs: vec![], burn_fee: 12345, @@ -2172,7 +2174,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 457, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_1_MARKER].into(), commit_outs: vec![], burn_fee: 12345, @@ -2223,7 +2225,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 456, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_1_MARKER].into(), commit_outs: vec![], burn_fee: 12345, @@ -2274,7 +2276,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2337,7 +2339,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2452,7 +2454,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -2474,7 +2476,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -2504,7 +2506,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2656,7 +2658,7 @@ mod tests { parent_vtxindex: 456, key_block_ptr: 1, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2707,7 +2709,7 @@ mod tests { parent_vtxindex: 444, key_block_ptr: 2, key_vtxindex: 400, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2759,7 +2761,7 @@ mod tests { key_block_ptr: 124, key_vtxindex: 457, commit_outs: vec![], - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 12345, input: (Txid([0; 32]), 0), @@ -2809,7 +2811,7 @@ mod tests { parent_vtxindex: 444, key_block_ptr: 124, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2862,7 +2864,7 @@ mod tests { parent_vtxindex: 444, key_block_ptr: 124, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -2913,7 +2915,7 @@ mod tests { parent_vtxindex: 444, key_block_ptr: 124, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 0, @@ -2964,7 +2966,7 @@ mod tests { parent_vtxindex: 444, key_block_ptr: 124, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -3015,7 +3017,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -3066,7 +3068,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -3146,6 +3148,10 @@ mod tests { panic!("Cannot evaluate"); } + fn column_encoding(&self) -> Option { + None + } + fn get_nakamoto_tip( &self, ) -> Result, db_error> { @@ -3190,7 +3196,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: 124, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -3508,7 +3514,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid([0x01; 32]), vtxindex: 456, block_height: first_block_height + 1, @@ -3524,7 +3530,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, @@ -3554,7 +3560,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![STACKS_EPOCH_2_05_MARKER], + memo: vec![STACKS_EPOCH_2_05_MARKER].into(), commit_outs: vec![], burn_fee: 12345, @@ -3584,7 +3590,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![STACKS_EPOCH_2_05_MARKER + 1], + memo: vec![STACKS_EPOCH_2_05_MARKER + 1].into(), commit_outs: vec![], burn_fee: 12345, @@ -3614,7 +3620,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![STACKS_EPOCH_2_05_MARKER - 1], + memo: vec![STACKS_EPOCH_2_05_MARKER - 1].into(), commit_outs: vec![], burn_fee: 12345, @@ -3644,7 +3650,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![], + memo: vec![].into(), commit_outs: vec![], burn_fee: 12345, @@ -3674,7 +3680,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_1_MARKER].into(), commit_outs: vec![], burn_fee: 12345, @@ -3704,7 +3710,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER + 1], + memo: vec![STACKS_EPOCH_2_1_MARKER + 1].into(), commit_outs: vec![], burn_fee: 12345, @@ -3734,7 +3740,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER - 1], + memo: vec![STACKS_EPOCH_2_1_MARKER - 1].into(), commit_outs: vec![], burn_fee: 12345, @@ -3764,7 +3770,7 @@ mod tests { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![], + memo: vec![].into(), commit_outs: vec![], burn_fee: 12345, diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 11cfffac91f..22bcda0e98a 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -27,13 +27,13 @@ use stacks_common::util::vrf::VRFPublicKey; use crate::burnchains::{Burnchain, BurnchainBlockHeader, BurnchainTransaction}; use crate::chainstate::burn::db::sortdb::SortitionHandleTx; -use crate::chainstate::burn::operations::{Error as op_error, LeaderKeyRegisterOp}; +use crate::chainstate::burn::operations::{BurnOpMemo, Error as op_error, LeaderKeyRegisterOp}; use crate::chainstate::burn::{ConsensusHash, Opcodes}; pub struct ParsedData { pub consensus_hash: ConsensusHash, pub public_key: VRFPublicKey, - pub memo: Vec, + pub memo: BurnOpMemo, } impl LeaderKeyRegisterOp { @@ -43,7 +43,7 @@ impl LeaderKeyRegisterOp { LeaderKeyRegisterOp { public_key: public_key.clone(), - memo: vec![], + memo: vec![].into(), // will be filled in consensus_hash: ConsensusHash([0u8; 20]), @@ -77,8 +77,8 @@ impl LeaderKeyRegisterOp { new_memo .get_mut(0..self.memo.len()) .expect("FATAL: improper handling of key_register op memo") - .copy_from_slice(&self.memo); - self.memo = new_memo; + .copy_from_slice(&self.memo.0[..]); + self.memo = new_memo.into(); } self.memo .get_mut(0..20) @@ -123,7 +123,7 @@ impl LeaderKeyRegisterOp { Some(ParsedData { consensus_hash, public_key: pubkey, - memo: memo.to_vec(), + memo: memo.to_vec().into(), }) } @@ -192,7 +192,7 @@ impl StacksMessageCodec for LeaderKeyRegisterOp { _ => self.memo.get(0..25), } .expect("FATAL: improper memo serialization"); - fd.write_all(&memo).map_err(codec_error::WriteError)?; + fd.write_all(memo).map_err(codec_error::WriteError)?; Ok(()) } @@ -281,7 +281,7 @@ pub mod tests { result: Some(LeaderKeyRegisterOp { consensus_hash: ConsensusHash::from_bytes(&hex_bytes("2222222222222222222222222222222222222222").unwrap()).unwrap(), public_key: VRFPublicKey::from_bytes(&hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap()).unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be(&hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap()).unwrap(), vtxindex, @@ -295,7 +295,7 @@ pub mod tests { result: Some(LeaderKeyRegisterOp { consensus_hash: ConsensusHash::from_bytes(&hex_bytes("2222222222222222222222222222222222222222").unwrap()).unwrap(), public_key: VRFPublicKey::from_bytes(&hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap()).unwrap(), - memo: vec![], + memo: vec![].into(), txid: Txid::from_bytes_be(&hex_bytes("2fbf8d5be32dce49790d203ba59acbb0929d5243413174ff5d26a5c6f23dea65").unwrap()).unwrap(), vtxindex, @@ -486,7 +486,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -620,7 +620,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes( @@ -649,7 +649,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes( diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index f9528fb579f..0fa8b5a6f2e 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -14,12 +14,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::convert::From; +use std::io::{Read, Write}; +use std::ops::{Index, IndexMut}; +use std::slice::SliceIndex; use std::{error, fmt}; -use clarity::vm::types::PrincipalData; use serde::de::Error as DeError; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_json::json; +use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, VRFSeed, }; @@ -46,6 +50,91 @@ mod test; /// This module contains all burn-chain operations +/// Unused OP_RETURN bytes in some payloads +/// The implementation is just featureful enough that we can use the +/// impl_byte_array_rusqlite_only!() macro +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BurnOpMemo(pub Vec); + +impl StacksMessageCodec for BurnOpMemo { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.0) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let inner: Vec = read_next(fd)?; + Ok(Self(inner)) + } +} + +impl BurnOpMemo { + pub fn from_bytes(bytes: &[u8]) -> Option { + Some(BurnOpMemo(bytes.to_vec())) + } + + pub fn get>(&self, idx: I) -> Option<&>::Output> { + self.0.get(idx) + } + + pub fn get_mut>( + &mut self, + idx: I, + ) -> Option<&mut >::Output> { + self.0.get_mut(idx) + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } + + pub fn first(&self) -> Option<&u8> { + self.0.first() + } + + pub fn to_hex(&self) -> String { + to_hex(&self.0) + } +} + +impl From> for BurnOpMemo { + fn from(v: Vec) -> Self { + Self(v) + } +} + +impl Index for BurnOpMemo { + type Output = u8; + fn index(&self, idx: usize) -> &Self::Output { + self.0.index(idx) + } +} + +impl IndexMut for BurnOpMemo { + fn index_mut(&mut self, idx: usize) -> &mut Self::Output { + self.0.index_mut(idx) + } +} + +impl serde::Serialize for BurnOpMemo { + fn serialize(&self, s: S) -> Result { + self.0.serialize(s) + } +} + +impl<'de> serde::Deserialize<'de> for BurnOpMemo { + fn deserialize>(d: D) -> Result { + let inner = Vec::::deserialize(d)?; + Ok(Self(inner)) + } +} + +impl_byte_array_rusqlite_only!(BurnOpMemo); +impl_byte_array_from_column_only!(BurnOpMemo); + #[derive(Debug)] pub enum Error { /// Failed to parse the operation from the burnchain transaction @@ -169,7 +258,7 @@ pub struct TransferStxOp { pub sender: StacksAddress, pub recipient: StacksAddress, pub transfered_ustx: u128, - pub memo: Vec, + pub memo: BurnOpMemo, // common to all transactions pub txid: Txid, // transaction ID @@ -221,7 +310,7 @@ pub struct LeaderBlockCommitOp { pub parent_vtxindex: u16, // offset in the parent block where the parent block hash can be found pub key_block_ptr: u32, // pointer to the block that contains the leader key registration pub key_vtxindex: u16, // offset in the block where the leader key can be found - pub memo: Vec, // extra unused byte + pub memo: BurnOpMemo, // extra unused byte /// how many burn tokens (e.g. satoshis) were committed to produce this block pub burn_fee: u64, @@ -264,7 +353,7 @@ fn default_treatment() -> Vec { pub struct LeaderKeyRegisterOp { pub consensus_hash: ConsensusHash, // consensus hash at time of issuance pub public_key: VRFPublicKey, // EdDSA public key - pub memo: Vec, // extra bytes in the op-return + pub memo: BurnOpMemo, // extra bytes in the op-return // common to all transactions pub txid: Txid, // transaction ID @@ -308,40 +397,6 @@ pub struct VoteForAggregateKeyOp { pub burn_header_hash: BurnchainHeaderHash, // hash of the burn chain block header } -fn hex_ser_memo(bytes: &[u8], s: S) -> Result { - let inst = to_hex(bytes); - s.serialize_str(inst.as_str()) -} - -fn hex_deser_memo<'de, D: serde::Deserializer<'de>>(d: D) -> Result, D::Error> { - let inst_str = String::deserialize(d)?; - hex_bytes(&inst_str).map_err(serde::de::Error::custom) -} - -fn hex_serialize(bhh: &BurnchainHeaderHash, s: S) -> Result { - let inst = bhh.to_hex(); - s.serialize_str(inst.as_str()) -} - -fn hex_deserialize<'de, D: serde::Deserializer<'de>>( - d: D, -) -> Result { - let inst_str = String::deserialize(d)?; - BurnchainHeaderHash::from_hex(&inst_str).map_err(serde::de::Error::custom) -} - -fn principal_serialize(pd: &PrincipalData, s: S) -> Result { - let inst = pd.to_string(); - s.serialize_str(inst.as_str()) -} - -fn principal_deserialize<'de, D: serde::Deserializer<'de>>( - d: D, -) -> Result { - let inst_str = String::deserialize(d)?; - PrincipalData::parse(&inst_str).map_err(serde::de::Error::custom) -} - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum BlockstackOperationType { LeaderKeyRegister(LeaderKeyRegisterOp), @@ -742,7 +797,7 @@ impl BlockstackOperationType { "transfer_stx": { "burn_block_height": op.block_height, "burn_header_hash": &op.burn_header_hash.to_hex(), - "memo": memo_serialize(&op.memo), + "memo": memo_serialize(&op.memo.0[..]), "recipient": stacks_addr_serialize(&op.recipient), "sender": stacks_addr_serialize(&op.sender), "transfered_ustx": op.transfered_ustx, diff --git a/stackslib/src/chainstate/burn/operations/test/mod.rs b/stackslib/src/chainstate/burn/operations/test/mod.rs index 331d2bb6c22..b74859e8695 100644 --- a/stackslib/src/chainstate/burn/operations/test/mod.rs +++ b/stackslib/src/chainstate/burn/operations/test/mod.rs @@ -180,7 +180,7 @@ fn serde_blockstack_ops() { recipient: StacksAddress::new(0, Hash160([6u8; 20])) .expect("Unable to create StacksAddress"), transfered_ustx: 20, - memo: vec![], + memo: vec![].into(), txid: Txid([3u8; 32]), vtxindex: 1, block_height: 20, @@ -226,7 +226,7 @@ fn serde_blockstack_ops() { parent_vtxindex: 2, key_block_ptr: 3, key_vtxindex: 4, - memo: vec![], + memo: vec![].into(), burn_fee: 5, vtxindex: 1, input: (Txid([1u8; 32]), 1), @@ -250,7 +250,7 @@ fn serde_blockstack_ops() { LeaderKeyRegisterOp { consensus_hash: ConsensusHash([0u8; 20]), public_key: VRFPublicKey::from_private(&VRFPrivateKey::new()), - memo: vec![], + memo: vec![].into(), txid: Txid([3u8; 32]), vtxindex: 0, block_height: 1, diff --git a/stackslib/src/chainstate/burn/operations/test/serialization.rs b/stackslib/src/chainstate/burn/operations/test/serialization.rs index 8d20811c59b..fdc1ba73faf 100644 --- a/stackslib/src/chainstate/burn/operations/test/serialization.rs +++ b/stackslib/src/chainstate/burn/operations/test/serialization.rs @@ -20,7 +20,7 @@ fn test_serialization_transfer_stx_op() { sender, recipient, transfered_ustx: 10, - memo: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05], + memo: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05].into(), txid: Txid([10u8; 32]), vtxindex: 10, block_height: 10, diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index 9d12a539609..df20ed79178 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -20,13 +20,15 @@ use stacks_common::codec::{write_next, Error as codec_error, StacksMessageCodec} use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use crate::burnchains::{BurnchainBlockHeader, BurnchainTransaction, Txid}; -use crate::chainstate::burn::operations::{parse_u128_from_be, Error as op_error, TransferStxOp}; +use crate::chainstate::burn::operations::{ + parse_u128_from_be, BurnOpMemo, Error as op_error, TransferStxOp, +}; use crate::chainstate::burn::Opcodes; // return type from parse_data below struct ParsedData { transfered_ustx: u128, - memo: Vec, + memo: BurnOpMemo, } impl TransferStxOp { @@ -40,7 +42,7 @@ impl TransferStxOp { sender: sender.clone(), recipient: recipient.clone(), transfered_ustx, - memo: vec![], + memo: vec![].into(), // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -82,7 +84,7 @@ impl TransferStxOp { } let transfered_ustx = parse_u128_from_be(data.get(0..16)?).unwrap(); - let memo = Vec::from(data.get(16..)?); + let memo = Vec::from(data.get(16..)?).into(); Some(ParsedData { transfered_ustx, @@ -203,7 +205,8 @@ impl StacksMessageCodec for TransferStxOp { write_next(fd, &(Opcodes::TransferStx as u8))?; fd.write_all(&self.transfered_ustx.to_be_bytes()) .map_err(codec_error::WriteError)?; - fd.write_all(&self.memo).map_err(codec_error::WriteError)?; + fd.write_all(self.memo.as_slice()) + .map_err(codec_error::WriteError)?; Ok(()) } @@ -300,6 +303,6 @@ mod tests { ) ); assert_eq!(op.transfered_ustx, u128::from_be_bytes([1; 16])); - assert_eq!(op.memo, vec![1; 61]); + assert_eq!(op.memo, vec![1; 61].into()); } } diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index e6449824a8e..baf6bd0c35c 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -1115,7 +1115,7 @@ mod test { parent_vtxindex: 0, key_block_ptr: 0, key_vtxindex: 0, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 100, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index f25b2e5fc20..3067b6c3fdc 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -20,10 +20,12 @@ use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex}; use clarity::vm::costs::ExecutionCost; +use rusqlite::params; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId, + BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId, TrieHash, VRFSeed, }; +use stacks_common::util::db::ColumnEncoding; use stacks_common::util::get_epoch_time_secs; pub use self::comm::CoordinatorCommunication; @@ -35,8 +37,8 @@ use crate::burnchains::{ }; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; -use crate::chainstate::burn::operations::BlockstackOperationType; -use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use crate::chainstate::burn::operations::{BlockstackOperationType, BurnOpMemo}; +use crate::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; use crate::chainstate::coordinator::comm::{ ArcCounterCoordinatorNotices, CoordinatorEvents, CoordinatorNotices, CoordinatorReceivers, }; @@ -1834,11 +1836,95 @@ pub fn check_chainstate_db_versions( /// Sortition DB migrator. /// This is an opaque struct that is meant to assist migrating an epoch 2.1-2.4 chainstate to epoch -/// 2.5. It will not work for 2.5 to 3.0+ +/// 2.5 for nodes that are not yet in epoch 2.5. +/// It also will re-encode all hex strings and JSON blobs into SIP003 byte strings if asked. pub struct SortitionDBMigrator { chainstate: Option, burnchain: Burnchain, burnchain_db: BurnchainDB, + column_encoding: Option, +} + +/// Macro to create a function to change the encoding of a given list of rows with the given types, +/// Each type must implement SqlEncoded. +macro_rules! impl_table_reencode { + ($func_name:ident, $table_name:expr; $($column_name:expr => $type:ty),+) => { + fn $func_name(tx: &DBTx, current_encoding: Option, new_encoding: Option) -> Result<(), DBError> { + use crate::stacks_common::util::db::SqlEncoded; + + info!("Reencoding table `{}`...", $table_name); + + let mut column_names : Vec = vec![]; + $({ + column_names.push($column_name.into()); + })+ + + let qry = format!("SELECT rowid,{} FROM {}", column_names.join(","), $table_name); + let mut stmt = tx.prepare(&qry)?; + let mut rows = stmt.query([])?; + + let mut reencoded_rows : Vec<(i64, Vec>)> = vec![]; + let mut loaded_rows = 0usize; + + while let Some(row) = rows.next()? { + let mut decoded_row = vec![]; + let rowid: i64 = row.get("rowid")?; + $({ + let decoded = <$type>::sql_decoded(&row, $column_name, current_encoding)?; + let reencoded = decoded.sql_encoded(new_encoding); + + test_debug!("Column '{}' value '{:?}' re-encoded from encoding {:?} to '{:?}' (encoding {:?})", $column_name, &decoded, current_encoding, &reencoded, new_encoding); + decoded_row.push(reencoded); + })+ + + reencoded_rows.push((rowid, decoded_row)); + + loaded_rows = loaded_rows.saturating_add(1); + if loaded_rows % 100 == 0 { + info!("Decoded {loaded_rows}..."); + } + } + + debug!("Decoded {} rows from `{}`", loaded_rows, $table_name); + + let mut field_counter = 1i64; + let qry = format!("UPDATE {} SET {} WHERE rowid = ?{}", + // table name + $table_name, + // SET list + { + column_names + .iter() + .map(|column_name| { + let ret = format!("{} = ?{}", column_name, field_counter); + field_counter += 1; + ret + }) + .collect::>() + .join(", ") + }, + // rowid + field_counter + ); + + let mut stmt = tx.prepare(&qry)?; + + for (row_count, (rowid, reencoded_row)) in reencoded_rows.into_iter().enumerate() { + for (i, arg) in reencoded_row.iter().enumerate() { + // NB: SQLite's parameter indexing is 1-indexed + stmt.raw_bind_parameter(i + 1, arg)?; + } + stmt.raw_bind_parameter(reencoded_row.len() + 1, rowid)?; + let _ = stmt.raw_execute()?; + + if row_count > 0 && row_count % 100 == 0 { + info!("Stored {} reencoded rows...", row_count); + } + } + + Ok(()) + } + } } impl SortitionDBMigrator { @@ -1862,14 +1948,26 @@ impl SortitionDBMigrator { chainstate: Some(chainstate), burnchain, burnchain_db, + column_encoding: None, }) } + /// Additive constructor for column encoding + pub fn with_column_encoding(mut self, encoding: ColumnEncoding) -> Self { + self.column_encoding = Some(encoding); + self + } + /// Get the burnchain reference pub fn get_burnchain(&self) -> &Burnchain { &self.burnchain } + /// Get the default column encoding + pub fn get_column_encoding(&self) -> Option { + self.column_encoding + } + /// Regenerate a reward cycle. Do this by re-calculating the RewardSetInfo for the given /// reward cycle. This should store the preprocessed reward cycle info to the sortition DB. pub fn regenerate_reward_cycle_info( @@ -1916,6 +2014,153 @@ impl SortitionDBMigrator { .expect("FATAL: No reward cycle info calculated at a reward-cycle start"); Ok(rc_info) } + + impl_table_reencode!(reencode_block_commit_parents, + "block_commit_parents"; + "block_commit_txid" => Txid, + "block_commit_sortition_id" => SortitionId, + "parent_sortition_id" => SortitionId + ); + + // TODO: commit_outs + // TODO: input + // TODO: punished + impl_table_reencode!(reencode_block_commits, + "block_commits"; + "txid" => Txid, + "burn_header_hash" => BurnchainHeaderHash, + "sortition_id" => SortitionId, + "block_header_hash" => BlockHeaderHash, + "new_seed" => VRFSeed, + "memo" => BurnOpMemo + ); + + // TODO: sender_addr + // TODO: delegate_to + // TODO: reward_addr + impl_table_reencode!(reencode_delegate_stx, + "delegate_stx"; + "txid" => Txid, + "burn_header_hash" => BurnchainHeaderHash + ); + + // TODO: public_key + impl_table_reencode!(reencode_leader_keys, + "leader_keys"; + "txid" => Txid, + "burn_header_hash" => BurnchainHeaderHash, + "sortition_id" => SortitionId, + "consensus_hash" => ConsensusHash, + "memo" => BurnOpMemo + ); + + // TODO: input + impl_table_reencode!(reencode_missed_commits, + "missed_commits"; + "txid" => Txid, + "intended_sortition_id" => SortitionId + ); + + // TODO: reward_set + impl_table_reencode!(reencode_preprocessed_reward_sets, + "preprocessed_reward_sets"; + "sortition_id" => SortitionId + ); + + // TODO: accepted_ops + // TODO: consumed_keys + impl_table_reencode!(reencode_snapshot_transition_ops, + "snapshot_transition_ops"; + "sortition_id" => SortitionId + ); + + // TODO: total_burn + // TODO: pox_payouts + impl_table_reencode!(reencode_snapshots, + "snapshots"; + "burn_header_hash" => BurnchainHeaderHash, + "sortition_id" => SortitionId, + "parent_sortition_id" => SortitionId, + "parent_burn_header_hash" => BurnchainHeaderHash, + "consensus_hash" => ConsensusHash, + "ops_hash" => OpsHash, + "sortition_hash" => SortitionHash, + "winning_block_txid" => Txid, + "winning_stacks_block_hash" => BlockHeaderHash, + "index_root" => TrieHash, + "canonical_stacks_tip_hash" => BlockHeaderHash, + "canonical_stacks_tip_consensus_hash" => ConsensusHash + ); + + // TODO: sender_addr + // TODO: reward_addr + // TODO: signer_key + // TODO: max_amount + impl_table_reencode!(reencode_stack_stx, + "stack_stx"; + "burn_header_hash" => BurnchainHeaderHash + ); + + impl_table_reencode!(reencode_stacks_chain_tips, + "stacks_chain_tips"; + "sortition_id" => SortitionId, + "consensus_hash" => ConsensusHash, + "block_hash" => BlockHeaderHash + ); + + // TODO: sender_addr + // TODO: recipient_addr + // TODO: transferred_ustx + impl_table_reencode!(reencode_transfer_stx, + "transfer_stx"; + "burn_header_hash" => BurnchainHeaderHash, + "memo" => BurnOpMemo + ); + + // TODO: sender_addr + // TODO: aggregate_key + // TODO: signer_key + impl_table_reencode!(reencode_vote_for_aggregate_key, + "vote_for_aggregate_key"; + "txid" => Txid, + "burn_header_hash" => BurnchainHeaderHash + ); + + /// Reencode all hex strings and JSON blobs into SIP003 data + pub fn reencode_tables(&self, sortdb: &mut SortitionDB) -> Result<(), DBError> { + // sanity check -- we must not yet have the given encoding + let db_encoding = SortitionDB::get_column_encoding(sortdb.conn())?; + let new_encoding = self.column_encoding; + if db_encoding == new_encoding { + return Ok(()); + } + if db_encoding.is_some() { + // some other unsupported encoding + return Err(DBError::NotImplemented); + } + + let tx = sortdb.tx_begin()?; + tx.execute("PRAGMA defer_foreign_keys = 1", params![])?; + Self::reencode_block_commits(&tx, db_encoding, new_encoding)?; + Self::reencode_block_commit_parents(&tx, db_encoding, new_encoding)?; + Self::reencode_delegate_stx(&tx, db_encoding, new_encoding)?; + Self::reencode_leader_keys(&tx, db_encoding, new_encoding)?; + Self::reencode_missed_commits(&tx, db_encoding, new_encoding)?; + Self::reencode_preprocessed_reward_sets(&tx, db_encoding, new_encoding)?; + Self::reencode_snapshots(&tx, db_encoding, new_encoding)?; + Self::reencode_snapshot_transition_ops(&tx, db_encoding, new_encoding)?; + Self::reencode_stack_stx(&tx, db_encoding, new_encoding)?; + Self::reencode_stacks_chain_tips(&tx, db_encoding, new_encoding)?; + Self::reencode_transfer_stx(&tx, db_encoding, new_encoding)?; + Self::reencode_vote_for_aggregate_key(&tx, db_encoding, new_encoding)?; + tx.execute("PRAGMA defer_foreign_keys = 0", params![])?; + + SortitionDB::set_column_encoding(&tx, new_encoding)?; + tx.commit()?; + + sortdb.column_encoding = new_encoding; + Ok(()) + } } /// Migrate all databases to their latest schemas. diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index ac39b68aafc..f7ca12fa61f 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -27,7 +27,7 @@ use clarity::vm::errors::Error as InterpreterError; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::Value; use lazy_static::lazy_static; -use rusqlite::Connection; +use rusqlite::{params, Connection}; use stacks_common::address; use stacks_common::address::AddressHashMode; use stacks_common::consts::CHAIN_ID_TESTNET; @@ -38,12 +38,15 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::hash::Hash160; +use stacks_common::util::db::SqlEncoded; +use stacks_common::util::hash::{hex_bytes, Hash160}; +use stacks_common::util::uint::Uint256; use stacks_common::util::vrf::*; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::db::*; use crate::burnchains::*; +use crate::chainstate::burn::db::sortdb::tests::test_append_snapshot; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::distribution::BurnSamplePoint; use crate::chainstate::burn::operations::leader_block_commit::*; @@ -73,6 +76,25 @@ lazy_static! { pub static ref STACKS_BLOCK_HEADERS: Arc = Arc::new(AtomicU64::new(1)); } +impl SortitionHandleTx<'_> { + /// Write or replace a burnchain state transition. + /// Used for testing only, so we can test reencoding. + fn store_or_replace_transition_ops( + &mut self, + new_sortition: &SortitionId, + transition: &BurnchainStateTransition, + ) { + let sql = "INSERT OR REPLACE INTO snapshot_transition_ops (sortition_id, accepted_ops, consumed_keys) VALUES (?, ?, ?)"; + let args = params![ + new_sortition.sql_encoded(self.context.column_encoding), + serde_json::to_string(&transition.accepted_ops).unwrap(), + serde_json::to_string(&transition.consumed_leader_keys).unwrap(), + ]; + self.execute(sql, args).unwrap(); + self.store_burn_distribution(new_sortition, transition); + } +} + fn test_path(name: &str) -> String { format!( "/tmp/stacks-node-tests/coordinator-tests/{}/{}", @@ -136,7 +158,7 @@ pub fn produce_burn_block<'a, I: Iterator>( fn get_burn_distribution(conn: &Connection, sortition: &SortitionId) -> Vec { conn.query_row( "SELECT data FROM snapshot_burn_distributions WHERE sortition_id = ?", - &[sortition], + params![sortition.sqlhex()], |row| { let data_str: String = row.get_unwrap(0); Ok(serde_json::from_str(&data_str).unwrap()) @@ -306,7 +328,7 @@ pub fn setup_states_with_epochs( LeaderKeyRegisterOp { public_key, consensus_hash, - memo, + memo: memo.into(), vtxindex, block_height, burn_header_hash, @@ -673,7 +695,7 @@ fn make_genesis_block_with_recipients( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_4_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER].into(), new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -945,7 +967,7 @@ fn make_stacks_block_with_input( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_4_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER].into(), new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -3126,7 +3148,7 @@ fn test_stx_transfer_btc_ops() { sender: stacker.clone(), recipient: recipient.clone(), transfered_ustx: transfer_amt, - memo: vec![], + memo: vec![].into(), txid: next_txid(), vtxindex: 5, block_height: 0, @@ -3138,7 +3160,7 @@ fn test_stx_transfer_btc_ops() { sender: recipient.clone(), recipient: stacker.clone(), transfered_ustx: transfer_amt + 1, - memo: vec![], + memo: vec![].into(), txid: next_txid(), vtxindex: 5, block_height: 0, @@ -6465,3 +6487,729 @@ fn test_check_chainstate_db_versions() { // should fail in epoch 2.05 assert!(!check_chainstate_db_versions(&[epoch_2_05], &sortdb_path, &chainstate_path).unwrap()); } + +/// Macro for tests to verify that a particular struct's SqlEncoded implementation can load its +/// legacy representation and its SIP003 representation. +/// Pertains to structures besides those declared by impl_byte_array_newtype!(). +macro_rules! impl_reencoding_test { + ($func_name:ident, $type:ty, $db_sample:expr, $expected:expr, $encoding:expr) => { + #[test] + fn $func_name() { + let pathdir = test_path(function_name!()); + let _ = std::fs::remove_dir_all(&pathdir); + std::fs::create_dir_all(&pathdir).unwrap(); + let path = format!("{}/test.db", &pathdir); + + test_debug!("Create test database '{}'...", &path); + + let mut db = $crate::util_lib::db::sqlite_open( + &path, + rusqlite::OpenFlags::SQLITE_OPEN_CREATE + | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, + false, + ) + .unwrap(); + let sql = "CREATE TABLE test(instance TEXT NOT NULL)"; + { + let tx = $crate::util_lib::db::tx_begin_immediate(&mut db).unwrap(); + tx.execute(sql, rusqlite::params![]).unwrap(); + tx.commit().unwrap(); + } + + test_debug!("Insert old form..."); + + // insert old form + let sql = "INSERT INTO test (instance) VALUES (?1)"; + { + let tx = $crate::util_lib::db::tx_begin_immediate(&mut db).unwrap(); + let mut stmt = tx.prepare(sql).unwrap(); + stmt.raw_bind_parameter(1, $db_sample).unwrap(); + stmt.raw_execute().unwrap(); + drop(stmt); + tx.commit().unwrap(); + } + + test_debug!("Query old form..."); + + // query old form + let sql = "SELECT instance FROM test"; + { + let mut stmt = db.prepare(sql).unwrap(); + let mut rows = stmt.query(params![]).unwrap(); + let row = rows.next().unwrap().unwrap(); + let val = <$type>::sql_decoded(&row, "instance", None).unwrap(); + + assert_eq!($expected, val); + } + + test_debug!("Reencode form..."); + + // reencode + let sql = "UPDATE test SET instance = ?1 WHERE instance = ?2"; + { + let tx = $crate::util_lib::db::tx_begin_immediate(&mut db).unwrap(); + let mut stmt = tx.prepare(sql).unwrap(); + stmt.raw_bind_parameter(1, $expected.sql_encoded($encoding)) + .unwrap(); + stmt.raw_bind_parameter(2, $db_sample).unwrap(); + stmt.raw_execute().unwrap(); + drop(stmt); + tx.commit().unwrap(); + } + + test_debug!("Query new form..."); + + // query new form + let sql = "SELECT instance FROM test"; + { + let mut stmt = db.prepare(sql).unwrap(); + let mut rows = stmt.query(params![]).unwrap(); + let row = rows.next().unwrap().unwrap(); + let val = <$type>::sql_decoded(&row, "instance", $encoding).unwrap(); + + assert_eq!($expected, val); + } + } + }; +} + +impl_reencoding_test!( + test_burn_op_memo_legacy, + BurnOpMemo, + "010203", + BurnOpMemo(vec![1, 2, 3]), + None +); +impl_reencoding_test!( + test_burn_op_memo_sip003, + BurnOpMemo, + "010203", + BurnOpMemo(vec![1, 2, 3]), + Some(ColumnEncoding::SIP003) +); + +/// Generic test harness for verifying that data stored to the sortition DB can be loaded after a +/// reencoding +fn test_sortitiondb_reencoding(test_name: &str, storer: S, mut loader: L) +where + S: FnOnce(&mut SortitionDB), + L: FnMut(&mut SortitionDB) -> R, + R: std::cmp::PartialEq + std::fmt::Debug, +{ + let path = &test_path(test_name); + let _ = std::fs::remove_dir_all(path); + + let sortdb_path = format!("{}/sortdb", &path); + let burnchain_path = format!("{}/burnchain", &path); + + std::fs::create_dir_all(&burnchain_path).unwrap(); + + let pox_constants = PoxConstants::new( + 5, + 3, + 3, + 25, + 5, + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); + let burnchain = Burnchain::regtest(&burnchain_path); + let _burnchain_blocks_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); + + let mut boot_data = ChainStateBootData::new(&burnchain, vec![], None); + + let post_flight_callback = move |clarity_tx: &mut ClarityTx| { + let contract = boot_code_id("pox", false); + let sender = PrincipalData::from(contract.clone()); + + clarity_tx.connection().as_transaction(|conn| { + conn.run_contract_call( + &sender, + None, + &contract, + "set-burnchain-parameters", + &[ + Value::UInt(burnchain.first_block_height as u128), + Value::UInt(burnchain.pox_constants.prepare_length as u128), + Value::UInt(burnchain.pox_constants.reward_cycle_length as u128), + Value::UInt(burnchain.pox_constants.pox_rejection_fraction as u128), + ], + |_, _| None, + None, + ) + .expect("Failed to set burnchain parameters in PoX contract"); + }); + }; + + boot_data.post_flight_callback = Some(Box::new(post_flight_callback)); + + let (chainstate, _) = StacksChainState::open_and_exec( + false, + 0x80000000, + &format!("{path}/chainstate/"), + Some(&mut boot_data), + None, + ) + .unwrap(); + + let mut sortdb = SortitionDB::connect( + &sortdb_path, + 100, + &BurnchainHeaderHash([0x00; 32]), + 0, + &StacksEpoch::all(0, 0, 0), + pox_constants, + None, + true, + ) + .unwrap(); + + storer(&mut sortdb); + let obj = loader(&mut sortdb); + + let migrator = SortitionDBMigrator::new(burnchain, &chainstate.root_path, None) + .unwrap() + .with_column_encoding(ColumnEncoding::SIP003); + + migrator.reencode_tables(&mut sortdb).unwrap(); + + let reencoded_obj = loader(&mut sortdb); + assert_eq!(obj, reencoded_obj); +} + +/// Test that the SortitionDBMigrator can reencode the first snapshot +#[test] +fn test_first_snapshot_reencoding() { + test_sortitiondb_reencoding( + "first_snapshot", + |_sortdb| {}, + |sortdb| SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(), + ); +} + +/// Test that the SortitionDB migrator can reencode an arbitrary snapshot +#[test] +fn test_snapshot_reencoding() { + test_sortitiondb_reencoding( + "snapshot", + |sortdb| { + let first_snapshot = SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(); + let snapshot = BlockSnapshot { + accumulated_coinbase_ustx: 0, + pox_valid: true, + block_height: first_snapshot.block_height + 1, + burn_header_timestamp: get_epoch_time_secs(), + burn_header_hash: BurnchainHeaderHash([0x01; 32]), + sortition_id: SortitionId([0x02; 32]), + parent_sortition_id: first_snapshot.sortition_id.clone(), + parent_burn_header_hash: first_snapshot.burn_header_hash.clone(), + consensus_hash: ConsensusHash([0x03; 20]), + ops_hash: OpsHash([0x04; 32]), + total_burn: 0, + sortition: true, + sortition_hash: SortitionHash([0x05; 32]), + winning_block_txid: Txid([0x06; 32]), + winning_stacks_block_hash: BlockHeaderHash([0x07; 32]), + index_root: TrieHash([0x08; 32]), + num_sortitions: first_snapshot.num_sortitions + 1, + stacks_block_accepted: false, + stacks_block_height: 0, + arrival_index: 0, + canonical_stacks_tip_height: 0, + canonical_stacks_tip_hash: BlockHeaderHash([0x09; 32]), + canonical_stacks_tip_consensus_hash: ConsensusHash([0x0a; 20]), + miner_pk_hash: None, + }; + let mut tx = SortitionHandleTx::begin(sortdb, &first_snapshot.sortition_id).unwrap(); + let _index_root = tx + .append_chain_tip_snapshot(&first_snapshot, &snapshot, &[], &[], None, None, None) + .unwrap(); + tx.commit().unwrap(); + }, + |sortdb| { + SortitionDB::get_block_snapshot(sortdb.conn(), &SortitionId([0x02; 32])) + .unwrap() + .unwrap() + }, + ); +} + +/// Test that the SortitionDB migrator can reencode: +/// * a block-commit +/// * a block-commit parent record +/// * a VRF key registration +#[test] +fn test_block_commit_reencoding() { + let block_height = 100; + let vtxindex = 456; + let leader_key = LeaderKeyRegisterOp { + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), + ) + .unwrap(), + memo: vec![1, 2, 3, 4, 5].into(), + + txid: Txid::from_bytes_be( + &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap(), + ) + .unwrap(), + vtxindex, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x01; 32]), + }; + + let block_commit = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + new_seed: VRFSeed::from_bytes( + &hex_bytes("3333333333333333333333333333333333333333333333333333333333333333").unwrap(), + ) + .unwrap(), + parent_block_ptr: 0x43424140, + parent_vtxindex: 0x5150, + key_block_ptr: (block_height + 1) as u32, + key_vtxindex: vtxindex as u16, + memo: vec![0x80].into(), + + commit_outs: vec![], + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner::mock_parts( + AddressHashMode::SerializeP2PKH, + 1, + vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + ), + + txid: Txid([0x55; 32]), + vtxindex, + block_height: block_height + 2, + burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash([0x03; 32]), + treatment: vec![], + }; + + test_sortitiondb_reencoding( + "block_commit", + |sortdb| { + let snapshot = test_append_snapshot( + sortdb, + BurnchainHeaderHash([0x01; 32]), + &[BlockstackOperationType::LeaderKeyRegister( + leader_key.clone(), + )], + ); + + let snapshot_consumed = test_append_snapshot( + sortdb, + BurnchainHeaderHash([0x03; 32]), + &[BlockstackOperationType::LeaderBlockCommit( + block_commit.clone(), + )], + ); + + // sanity check + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + let commit = sort_handle + .get_block_commit_by_txid(&tip.sortition_id, &Txid([0x55; 32])) + .unwrap() + .unwrap(); + assert_eq!(commit, block_commit); + + // check parent commit + { + let conn = sortdb.conn(); + let sql = "SELECT block_commit_txid, block_commit_sortition_id, parent_sortition_id FROM block_commit_parents"; + let mut stmt = conn.prepare(sql).unwrap(); + let mut qry = stmt.query(params![]).unwrap(); + let row = qry.next().unwrap().unwrap(); + let parent_commit_txid: Txid = row.get("block_commit_txid").unwrap(); + let parent_commit_sort_id: SortitionId = + row.get("block_commit_sortition_id").unwrap(); + let parent_sortition_id: SortitionId = row.get("parent_sortition_id").unwrap(); + + assert_eq!(parent_commit_sort_id, tip.sortition_id); + assert_eq!(parent_commit_txid, commit.txid); + assert_eq!(parent_sortition_id, SortitionId([0x00; 32])); + } + + // check leader key registration + { + let ic = sortdb.index_conn(); + let key_register = SortitionDB::get_leader_key_at( + &ic, + commit.key_block_ptr.into(), + commit.key_vtxindex.into(), + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + assert_eq!(key_register, leader_key); + } + + test_debug!("Stored leader key and block commit"); + }, + |sortdb| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + let commit = sort_handle + .get_block_commit_by_txid(&tip.sortition_id, &Txid([0x55; 32])) + .unwrap() + .unwrap(); + + // check parent as well + { + let conn = sortdb.conn(); + let sql = "SELECT block_commit_txid, block_commit_sortition_id, parent_sortition_id FROM block_commit_parents"; + let mut stmt = conn.prepare(sql).unwrap(); + let mut qry = stmt.query(params![]).unwrap(); + let row = qry.next().unwrap().unwrap(); + let parent_commit_txid: Txid = row.get("block_commit_txid").unwrap(); + let parent_commit_sort_id: SortitionId = + row.get("block_commit_sortition_id").unwrap(); + let parent_sortition_id: SortitionId = row.get("parent_sortition_id").unwrap(); + + assert_eq!(parent_commit_sort_id, tip.sortition_id); + assert_eq!(parent_commit_txid, commit.txid); + assert_eq!(parent_sortition_id, SortitionId([0x00; 32])); + } + // check leader key registration as well + { + let ic = sortdb.index_conn(); + let key_register = SortitionDB::get_leader_key_at( + &ic, + commit.key_block_ptr.into(), + commit.key_vtxindex.into(), + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + assert_eq!(key_register, leader_key); + } + commit + }, + ); +} + +/// Test reencoding for +/// * delegate-stx +/// * stack-stx +/// * transfer-stx +/// * vote-aggregate-key +#[test] +fn test_burnchain_ops_reencoding() { + let block_height = 100; + let first_burn_hash = BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + let vote_pubkey = StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap(); + let vote_key: StacksPublicKeyBuffer = vote_pubkey.to_bytes_compressed().as_slice().into(); + + let fixture_ops = vec![ + BlockstackOperationType::TransferStx(TransferStxOp { + sender: StacksAddress::new(1, Hash160([1u8; 20])).unwrap(), + recipient: StacksAddress::new(2, Hash160([2u8; 20])).unwrap(), + transfered_ustx: 123, + memo: vec![0x00, 0x01, 0x02, 0x03, 0x04].into(), + + txid: Txid([0x01; 32]), + vtxindex: 1, + block_height, + burn_header_hash: first_burn_hash.clone(), + }), + BlockstackOperationType::StackStx(StackStxOp { + sender: StacksAddress::new(3, Hash160([3u8; 20])).unwrap(), + reward_addr: PoxAddress::Standard( + StacksAddress::new(4, Hash160([4u8; 20])).unwrap(), + None, + ), + stacked_ustx: 456, + num_cycles: 6, + signer_key: Some(StacksPublicKeyBuffer([0x02; 33])), + max_amount: Some(u128::MAX), + auth_id: Some(0u32), + + txid: Txid([0x02; 32]), + vtxindex: 2, + block_height, + burn_header_hash: first_burn_hash.clone(), + }), + BlockstackOperationType::DelegateStx(DelegateStxOp { + sender: StacksAddress::new(6, Hash160([6u8; 20])).unwrap(), + delegate_to: StacksAddress::new(7, Hash160([7u8; 20])).unwrap(), + reward_addr: Some(( + 123, + PoxAddress::Standard( + StacksAddress::new(8, Hash160([8u8; 20])).unwrap(), + Some(AddressHashMode::SerializeP2PKH), + ), + )), + delegated_ustx: 789, + until_burn_height: Some(1000), + + txid: Txid([0x04; 32]), + vtxindex: 3, + block_height, + burn_header_hash: first_burn_hash.clone(), + }), + BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { + sender: StacksAddress::new(6, Hash160([6u8; 20])).unwrap(), + aggregate_key: vote_key.clone(), + signer_key: vote_key, + round: 1, + reward_cycle: 2, + signer_index: 3, + + txid: Txid([0x05; 32]), + vtxindex: 4, + block_height, + burn_header_hash: first_burn_hash.clone(), + }), + ]; + + test_sortitiondb_reencoding( + "burnchain_ops", + |sortdb| { + let mut tx = sortdb.tx_begin_at_tip(); + for op in fixture_ops.iter() { + tx.store_burnchain_transaction(op, &SortitionId::stubbed(&first_burn_hash)) + .unwrap(); + } + tx.commit().unwrap(); + }, + |sortdb| { + let ops = SortitionDB::get_transfer_stx_ops(sortdb.conn(), &first_burn_hash).unwrap(); + assert_eq!(ops.len(), 1); + assert_eq!( + BlockstackOperationType::TransferStx(ops[0].clone()), + fixture_ops[0] + ); + + let ops = SortitionDB::get_stack_stx_ops(sortdb.conn(), &first_burn_hash).unwrap(); + assert_eq!(ops.len(), 1); + assert_eq!( + BlockstackOperationType::StackStx(ops[0].clone()), + fixture_ops[1] + ); + + let ops = SortitionDB::get_delegate_stx_ops(sortdb.conn(), &first_burn_hash).unwrap(); + assert_eq!(ops.len(), 1); + assert_eq!( + BlockstackOperationType::DelegateStx(ops[0].clone()), + fixture_ops[2] + ); + + let ops = SortitionDB::get_vote_for_aggregate_key_ops(sortdb.conn(), &first_burn_hash) + .unwrap(); + assert_eq!(ops.len(), 1); + assert_eq!( + BlockstackOperationType::VoteForAggregateKey(ops[0].clone()), + fixture_ops[3] + ); + }, + ); +} + +/// Test reencoding of missed_commits +#[test] +fn test_missed_commits_reencoding() { + let missed_commit = MissedBlockCommit { + txid: Txid([0x01; 32]), + input: (Txid([0x02; 32]), 1), + intended_sortition: SortitionId([0x03; 32]), + }; + test_sortitiondb_reencoding( + "missed_commits", + |sortdb| { + let mut tx = SortitionHandleTx::begin(sortdb, &SortitionId([0x03; 32])).unwrap(); + tx.insert_missed_block_commit(&missed_commit).unwrap(); + tx.commit().unwrap(); + }, + |sortdb| { + let mut missed = SortitionDB::get_missed_commits_by_intended( + sortdb.conn(), + &SortitionId([0x03; 32]), + ) + .unwrap(); + assert_eq!(missed.len(), 1); + missed.pop().unwrap() + }, + ); +} + +/// Test reencoding of preprocessed reward sets +#[test] +fn test_preprocessed_reward_sets_reencoding() { + let reward_cycle_info = RewardCycleInfo { + reward_cycle: 0, + anchor_status: PoxAnchorBlockStatus::NotSelected, + }; + test_sortitiondb_reencoding( + "preprocessed_reward_set", + |sortdb| { + let mut tx = sortdb.tx_begin().unwrap(); + SortitionDB::store_preprocessed_reward_set( + &mut tx, + &SortitionId([0x01; 32]), + &reward_cycle_info, + ) + .unwrap(); + tx.commit().unwrap(); + }, + |sortdb| { + let preprocessed_reward_set = + SortitionDB::get_preprocessed_reward_set(sortdb.conn(), &SortitionId([0x01; 32])) + .unwrap() + .unwrap(); + preprocessed_reward_set + }, + ); +} + +/// Test reencoding of snapshot_transition_ops +#[test] +fn test_transition_ops_reencoding() { + let block_height = 100; + let vtxindex = 456; + let leader_key = LeaderKeyRegisterOp { + consensus_hash: ConsensusHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + public_key: VRFPublicKey::from_bytes( + &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), + ) + .unwrap(), + memo: vec![1, 2, 3, 4, 5].into(), + + txid: Txid::from_bytes_be( + &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap(), + ) + .unwrap(), + vtxindex, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x01; 32]), + }; + let block_commit = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash::from_bytes( + &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222").unwrap(), + ) + .unwrap(), + new_seed: VRFSeed::from_bytes( + &hex_bytes("3333333333333333333333333333333333333333333333333333333333333333").unwrap(), + ) + .unwrap(), + parent_block_ptr: 0x43424140, + parent_vtxindex: 0x5150, + key_block_ptr: (block_height + 1) as u32, + key_vtxindex: vtxindex as u16, + memo: vec![0x80].into(), + + commit_outs: vec![], + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner::mock_parts( + AddressHashMode::SerializeP2PKH, + 1, + vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + ), + + txid: Txid([0x55; 32]), + vtxindex, + block_height: block_height + 2, + burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash([0x03; 32]), + treatment: vec![], + }; + + let burn_sample_point = BurnSamplePoint { + burns: 123, + median_burn: 456, + frequency: 3, + range_start: Uint256::from_u128(789), + range_end: Uint256::from_u128(123456789), + candidate: block_commit.clone(), + }; + let missed_commit = MissedBlockCommit { + txid: Txid([0x01; 32]), + input: (Txid([0x02; 32]), 1), + intended_sortition: SortitionId([0x03; 32]), + }; + + let transition = BurnchainStateTransition { + burn_dist: vec![burn_sample_point], + accepted_ops: vec![BlockstackOperationType::LeaderBlockCommit( + block_commit.clone(), + )], + consumed_leader_keys: vec![leader_key.clone()], + windowed_block_commits: vec![vec![block_commit.clone()]], + windowed_missed_commits: vec![vec![missed_commit.clone()]], + }; + test_sortitiondb_reencoding( + "snapshot_transition_ops", + |sortdb| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let mut tx = SortitionHandleTx::begin(sortdb, &tip.sortition_id).unwrap(); + tx.store_or_replace_transition_ops(&tip.sortition_id, &transition); + tx.commit().unwrap(); + }, + |sortdb| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let (_, transition_ops) = sortdb + .get_sortition_result(&tip.sortition_id) + .unwrap() + .unwrap(); + transition_ops + }, + ); +} + +#[test] +fn test_stacks_chain_tips_reencoding() { + test_sortitiondb_reencoding( + "stacks_chain_tips", + |sortdb| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let mut tx = SortitionHandleTx::begin(sortdb, &tip.sortition_id).unwrap(); + tx.update_canonical_stacks_tip( + &tip.sortition_id, + &ConsensusHash([0x22; 20]), + &BlockHeaderHash([0x33; 32]), + 4, + ) + .unwrap(); + tx.commit().unwrap(); + }, + |sortdb| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let (ch, bhh, height) = + SortitionDB::get_canonical_nakamoto_tip_hash_and_height(sortdb.conn(), &tip) + .unwrap() + .unwrap(); + (ch, bhh, height) + }, + ); +} diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 6eb9ad2b60c..bc98538ae9c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -3428,7 +3428,7 @@ fn test_stacks_on_burnchain_ops() { sender: addr.clone(), recipient: recipient_addr.clone(), transfered_ustx: 1, - memo: vec![0x2], + memo: vec![0x2].into(), // mocked txid: Txid([i | 0x40; 32]), diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0d6bc89ba5f..b11db3453fe 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -37,6 +37,7 @@ use stacks_common::types::chainstate::{ StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; use stacks_common::types::{PrivateKey, SIP031EmissionInterval, StacksEpochId}; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; @@ -2578,7 +2579,7 @@ impl NakamotoChainState { ) -> Result, ChainstateError> { let qry = "SELECT total_tenure_cost FROM nakamoto_block_headers WHERE index_block_hash = ?"; chainstate_conn - .query_row(qry, &[block], |row| row.get(0)) + .query_row(qry, params![block.sqlhex()], |row| row.get(0)) .optional() .map_err(ChainstateError::from) } @@ -2590,7 +2591,7 @@ impl NakamotoChainState { ) -> Result, ChainstateError> { let qry = "SELECT total_tenure_cost FROM nakamoto_block_headers WHERE index_block_hash = ?"; chainstate_conn - .query_row(qry, &[block], |row| row.get(0)) + .query_row(qry, params![block.sqlhex()], |row| row.get(0)) .optional() .map_err(ChainstateError::from) } @@ -2603,7 +2604,7 @@ impl NakamotoChainState { ) -> Result, ChainstateError> { let qry = "SELECT tenure_tx_fees FROM nakamoto_block_headers WHERE index_block_hash = ?"; let tx_fees_str: Option = chainstate_conn - .query_row(qry, &[block], |row| row.get(0)) + .query_row(qry, params![block.sqlhex()], |row| row.get(0)) .optional()?; tx_fees_str .map(|x| x.parse()) @@ -2655,7 +2656,7 @@ impl NakamotoChainState { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT version FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let args = rusqlite::params![index_block_hash]; + let args = rusqlite::params![index_block_hash.sqlhex()]; let mut stmt = chainstate_conn.prepare(sql)?; let result = stmt .query_row(args, |row| { @@ -2676,7 +2677,7 @@ impl NakamotoChainState { let mut result = query_row_columns( chainstate_conn, sql, - &[&index_block_hash], + &[&index_block_hash.sqlhex()], "parent_block_id", )?; if result.len() > 1 { @@ -2693,7 +2694,7 @@ impl NakamotoChainState { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash.sqlhex()], || { "FATAL: multiple rows for the same block hash".to_string() })?; Ok(result) @@ -2705,7 +2706,7 @@ impl NakamotoChainState { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT consensus_hash FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash.sqlhex()], || { "FATAL: multiple rows for the same block hash".to_string() })?; Ok(result) @@ -2717,7 +2718,7 @@ impl NakamotoChainState { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT * FROM block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash.sqlhex()], || { "FATAL: multiple rows for the same block hash".to_string() })?; @@ -2745,7 +2746,7 @@ impl NakamotoChainState { ) -> Result { let sql = "SELECT 1 FROM nakamoto_block_headers WHERE index_block_hash = ?1"; let result: Option = - query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + query_row_panic(chainstate_conn, sql, &[&index_block_hash.sqlhex()], || { "FATAL: multiple rows for the same block hash".to_string() })?; if result.is_some() { @@ -2759,7 +2760,7 @@ impl NakamotoChainState { // check epoch 2 let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1"; let result: Option = - query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + query_row_panic(chainstate_conn, sql, &[&index_block_hash.sqlhex()], || { "FATAL: multiple rows for the same block hash".to_string() })?; @@ -2773,7 +2774,7 @@ impl NakamotoChainState { ) -> Result { let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1"; let result: Option = - query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + query_row_panic(chainstate_conn, sql, &[&index_block_hash.sqlhex()], || { "FATAL: multiple rows for the same block hash".to_string() })?; @@ -2920,7 +2921,7 @@ impl NakamotoChainState { WHERE h.consensus_hash = ?1 ORDER BY h.block_height DESC, h.timestamp "; - let args = params![tenure_id]; + let args = params![tenure_id.sqlhex()]; let out = query_rows(db, qry, args)?; if !out.is_empty() { return Ok(out); @@ -3033,7 +3034,7 @@ impl NakamotoChainState { WHERE h.burn_header_hash = ?1 ORDER BY h.block_height DESC, h.timestamp "; - let args = params![tenure_block_hash]; + let args = params![tenure_block_hash.sqlhex()]; let out = query_rows(db, qry, args)?; if !out.is_empty() { return Ok(out); @@ -3154,7 +3155,7 @@ impl NakamotoChainState { block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { let sql = "SELECT processed, orphaned FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args = params![consensus_hash, block_hash]; + let args = params![consensus_hash.sqlhex(), block_hash.sqlhex()]; let Some((processed, orphaned)) = query_row_panic(&staging_blocks_conn, sql, args, || { "FATAL: multiple rows for the same consensus hash and block hash".to_string() }) @@ -3193,7 +3194,7 @@ impl NakamotoChainState { tenure_start_block_id: &StacksBlockId, ) -> Result, ChainstateError> { let sql = r#"SELECT IFNULL(vrf_proof,"") FROM nakamoto_block_headers WHERE index_block_hash = ?1"#; - let args = params![tenure_start_block_id]; + let args = params![tenure_start_block_id.sqlhex()]; let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { if bytes.is_empty() { @@ -3235,7 +3236,7 @@ impl NakamotoChainState { let epoch_2_qry = "SELECT block_height FROM block_headers WHERE index_block_hash = ?1"; let opt_height: Option = chainstate_conn .sqlite() - .query_row(epoch_2_qry, &[block], |row| row.get(0)) + .query_row(epoch_2_qry, params![block.sqlhex()], |row| row.get(0)) .optional()?; opt_height .map(u64::try_from) @@ -3329,9 +3330,9 @@ impl NakamotoChainState { let args = params![ u64_to_sql(*stacks_block_height)?, - index_root, - consensus_hash, - burn_header_hash, + index_root.sqlhex(), + consensus_hash.sqlhex(), + burn_header_hash.sqlhex(), burn_header_height, u64_to_sql(*burn_header_timestamp)?, block_size_str, @@ -3339,30 +3340,33 @@ impl NakamotoChainState { header.version, u64_to_sql(header.chain_length)?, u64_to_sql(header.burn_spent)?, - header.miner_signature, + header.miner_signature.sqlhex(), signer_signature, - header.tx_merkle_root, - header.state_index_root, + header.tx_merkle_root.sqlhex(), + header.state_index_root.sqlhex(), u64_to_sql(header.timestamp)?, - block_hash, - index_block_hash, + block_hash.sqlhex(), + index_block_hash.sqlhex(), block_cost, total_tenure_cost, &tenure_tx_fees.to_string(), - &header.parent_block_id, + &header.parent_block_id.sqlhex(), if tenure_changed { &1i64 } else { &0i64 }, &vrf_proof_bytes.as_ref(), &header.pox_treatment, &height_in_tenure, - tip_info.burn_view.as_ref().ok_or_else(|| { - error!( - "Attempted to store nakamoto block header information without burnchain view"; - "block_id" => %index_block_hash, - ); - ChainstateError::DBError(DBError::Other( - "Nakamoto block StacksHeaderInfo did not set burnchain view".into(), - )) - })?, + tip_info.burn_view + .as_ref() + .map(|ch| ch.sqlhex()) + .ok_or_else(|| { + error!( + "Attempted to store nakamoto block header information without burnchain view"; + "block_id" => %index_block_hash, + ); + ChainstateError::DBError(DBError::Other( + "Nakamoto block StacksHeaderInfo did not set burnchain view".into(), + )) + })?, ]; chainstate_tx.execute( @@ -3648,7 +3652,7 @@ impl NakamotoChainState { if applied_epoch_transition { debug!("Block {} applied an epoch transition", &index_block_hash); let sql = "INSERT INTO epoch_transitions (block_id) VALUES (?)"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; headers_tx.deref_mut().execute(sql, args)?; } @@ -3661,7 +3665,7 @@ impl NakamotoChainState { reward_set: &RewardSet, ) -> Result<(), ChainstateError> { let sql = "INSERT INTO nakamoto_reward_sets (index_block_hash, reward_set) VALUES (?, ?)"; - let args = params![block_id, reward_set.metadata_serialize(),]; + let args = params![block_id.sqlhex(), reward_set.metadata_serialize(),]; tx.execute(sql, args)?; Ok(()) } @@ -3672,7 +3676,7 @@ impl NakamotoChainState { ) -> Result, ChainstateError> { let sql = "SELECT reward_set FROM nakamoto_reward_sets WHERE index_block_hash = ?"; chainstate_db - .query_row(sql, &[block_id], |row| { + .query_row(sql, params![block_id.sqlhex()], |row| { let reward_set: String = row.get(0)?; let reward_set = RewardSet::metadata_deserialize(&reward_set) .map_err(|s| FromSqlError::Other(s.into()))?; @@ -3711,7 +3715,7 @@ impl NakamotoChainState { let txid = tx_receipt.transaction.txid(); let tx_hex = tx_receipt.transaction.serialize_to_dbstring(); let result = tx_receipt.result.to_string(); - let params = params![txid, block_id, tx_hex, result]; + let params = params![txid.sqlhex(), block_id.sqlhex(), tx_hex, result]; if let Err(e) = stacks_db_tx.execute(insert, params) { warn!("Failed to record TX: {}", e); } @@ -3723,7 +3727,7 @@ impl NakamotoChainState { txid: &Txid, ) -> Result, ChainstateError> { let sql = "SELECT index_block_hash, tx_hex, result FROM transactions WHERE txid = ?"; - let args = params![txid]; + let args = params![txid.sqlhex()]; let mut stmt = conn.prepare(sql)?; Ok(stmt diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 52973fb113d..be2e319b871 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -42,6 +42,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; @@ -760,7 +761,7 @@ impl NakamotoStagingBlocksConnRef<'_> { ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1 AND obtain_method = ?2"; let args = params![ - index_block_hash, + index_block_hash.sqlhex(), &NakamotoBlockObtainMethod::Shadow.to_string() ]; let res: Option = query_row(self, qry, args)?; @@ -777,7 +778,7 @@ impl NakamotoStagingBlocksConnRef<'_> { ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND obtain_method = ?2"; let args = rusqlite::params![ - consensus_hash, + consensus_hash.sqlhex(), NakamotoBlockObtainMethod::Shadow.to_string() ]; let present: Option = query_row(self, qry, args)?; @@ -797,7 +798,7 @@ impl NakamotoStagingBlocksConnRef<'_> { ch: &ConsensusHash, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND obtain_method = ?2 ORDER BY height DESC LIMIT 1"; - let args = params![ch, &NakamotoBlockObtainMethod::Shadow.to_string()]; + let args = params![ch.sqlhex(), &NakamotoBlockObtainMethod::Shadow.to_string()]; let res: Option> = query_row(self, qry, args)?; let Some(block_bytes) = res else { return Ok(None); @@ -824,7 +825,7 @@ impl NakamotoStagingBlocksTx<'_> { // is this block stored already? let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args = params![block_id]; + let args = params![block_id.sqlhex()]; let present: Option = query_row(self, qry, args)?; if present.is_some() { return Ok(()); @@ -832,7 +833,7 @@ impl NakamotoStagingBlocksTx<'_> { // this tenure must be empty, or it must be a shadow tenure let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1"; - let args = rusqlite::params![&shadow_block.header.consensus_hash]; + let args = rusqlite::params![&shadow_block.header.consensus_hash.sqlhex()]; let present: Option = query_row(self, qry, args)?; if present.is_some() && !self @@ -847,7 +848,7 @@ impl NakamotoStagingBlocksTx<'_> { // there must not be a block at this height in this tenure let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND height = ?2"; let args = rusqlite::params![ - &shadow_block.header.consensus_hash, + &shadow_block.header.consensus_hash.sqlhex(), u64_to_sql(shadow_block.header.chain_length)? ]; let present: Option = query_row(self, qry, args)?; diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index a378a68ec7a..3d9d79341b0 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -22,6 +22,7 @@ use rusqlite::blob::Blob; use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::get_epoch_time_secs; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; @@ -274,7 +275,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; let res: Option = query_row(self, qry, args)?; Ok(res.is_some()) } @@ -290,7 +291,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { let sql = "SELECT index_block_hash,processed,orphaned,signing_weight FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2 ORDER BY signing_weight DESC, index_block_hash LIMIT 1"; - let args = params![consensus_hash, block_hash]; + let args = params![consensus_hash.sqlhex(), block_hash.sqlhex()]; let mut stmt = self.deref().prepare(sql)?; Ok(stmt @@ -311,7 +312,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT rowid FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; let res: Option = query_row(self, sql, args)?; Ok(res) } @@ -323,7 +324,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT consensus_hash,parent_block_id FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; let mut stmt = self.deref().prepare(sql)?; Ok(stmt @@ -346,7 +347,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; let res: Option> = query_row(self, qry, args)?; let Some(block_bytes) = res else { return Ok(None); @@ -401,7 +402,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT length(data) FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; let res = query_row(self, qry, args)? .map(|size: i64| u64::try_from(size).expect("FATAL: block size exceeds i64::MAX")); Ok(res) @@ -417,7 +418,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; - let args = params![consensus_hash]; + let args = params![consensus_hash.sqlhex()]; let block_data: Vec> = query_rows(self, qry, args)?; Ok(block_data .into_iter() @@ -441,7 +442,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND processed = 1"; - let args = params![consensus_hash]; + let args = params![consensus_hash.sqlhex()]; let block_data: Vec> = query_rows(self, qry, args)?; Ok(block_data .into_iter() @@ -535,7 +536,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { consensus_hash: &ConsensusHash, ) -> Result { let sql = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND burn_attachable = 1"; - let args = rusqlite::params![consensus_hash]; + let args = rusqlite::params![consensus_hash.sqlhex()]; let res: Option = query_row(self, sql, args)?; Ok(res.is_some()) } @@ -551,7 +552,7 @@ impl NakamotoStagingBlocksTx<'_> { WHERE index_block_hash = ?1"; self.execute( clear_staged_block, - params![block, u64_to_sql(get_epoch_time_secs())?], + params![block.sqlhex(), u64_to_sql(get_epoch_time_secs())?], )?; Ok(()) @@ -564,14 +565,14 @@ impl NakamotoStagingBlocksTx<'_> { let update_dependents = "UPDATE nakamoto_staging_blocks SET orphaned = 1 WHERE parent_block_id = ?"; - self.execute(update_dependents, &[&block])?; + self.execute(update_dependents, &[&block.sqlhex()])?; let clear_staged_block = "UPDATE nakamoto_staging_blocks SET processed = 1, processed_time = ?2, orphaned = 1 WHERE index_block_hash = ?1"; self.execute( clear_staged_block, - params![block, u64_to_sql(get_epoch_time_secs())?], + params![block.sqlhex(), u64_to_sql(get_epoch_time_secs())?], )?; Ok(()) @@ -585,7 +586,7 @@ impl NakamotoStagingBlocksTx<'_> { ) -> Result<(), ChainstateError> { let update_dependents = "UPDATE nakamoto_staging_blocks SET burn_attachable = 1 WHERE consensus_hash = ?"; - self.execute(update_dependents, &[consensus_hash])?; + self.execute(update_dependents, params![consensus_hash.sqlhex()])?; Ok(()) } @@ -642,15 +643,15 @@ impl NakamotoStagingBlocksTx<'_> { data ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", params![ - &block.header.block_hash(), - &block.header.consensus_hash, - &block.header.parent_block_id, + &block.header.block_hash().sqlhex(), + &block.header.consensus_hash.sqlhex(), + &block.header.parent_block_id.sqlhex(), &tenure_start, if burn_attachable { 1 } else { 0 }, 0, 0, u64_to_sql(block.header.chain_length)?, - &block.block_id(), + &block.block_id().sqlhex(), 0, obtain_method.to_string(), signing_weight, @@ -672,7 +673,7 @@ impl NakamotoStagingBlocksTx<'_> { ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args = rusqlite::params![consensus_hash, block_hash]; + let args = rusqlite::params![consensus_hash.sqlhex(), block_hash.sqlhex()]; let present: Option = query_row(self, qry, args)?; Ok(present.is_some()) } @@ -709,8 +710,8 @@ impl NakamotoStagingBlocksTx<'_> { &block.serialize_to_vec(), &signing_weight, &obtain_method.to_string(), - &block.header.consensus_hash, - &block.header.block_hash(), + &block.header.consensus_hash.sqlhex(), + &block.header.block_hash().sqlhex(), ])?; Ok(()) } diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index c3ca481d964..9efef1a50cf 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -65,6 +65,7 @@ use rusqlite::{params, Connection}; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, MINER_REWARD_MATURITY}; use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId}; use stacks_common::types::StacksEpochId; +use stacks_common::util::db::SqlEncoded; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleConn}; use crate::chainstate::burn::BlockSnapshot; @@ -421,12 +422,12 @@ impl NakamotoChainState { // NOTE: this is checked with check_nakamoto_tenure() assert_eq!(block_header.consensus_hash, tenure.tenure_consensus_hash); let args = params![ - tenure.tenure_consensus_hash, - tenure.prev_tenure_consensus_hash, - tenure.burn_view_consensus_hash, + tenure.tenure_consensus_hash.sqlhex(), + tenure.prev_tenure_consensus_hash.sqlhex(), + tenure.burn_view_consensus_hash.sqlhex(), tenure.cause.as_u8(), - block_header.block_hash(), - block_header.block_id(), + block_header.block_hash().sqlhex(), + block_header.block_id().sqlhex(), u64_to_sql(coinbase_height)?, tenure.previous_tenure_blocks, ]; @@ -451,7 +452,7 @@ impl NakamotoChainState { ) -> Result<(), ChainstateError> { tx.execute( "DELETE FROM nakamoto_tenure_events WHERE tenure_id_consensus_hash = ?1", - &[ch], + params![ch.sqlhex()], )?; Ok(()) } @@ -476,7 +477,7 @@ impl NakamotoChainState { ) -> Result { // at least one block in this tenure let sql = "SELECT height_in_tenure FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let count = match query_int(chainstate_conn, sql, &[block_id]) { + let count = match query_int(chainstate_conn, sql, params![block_id.sqlhex()]) { Ok(count_i64) => { let count: u32 = count_i64 .try_into() @@ -498,7 +499,10 @@ impl NakamotoChainState { ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenure_events WHERE burn_view_consensus_hash = ?1 AND block_id = ?2"; - let args = rusqlite::params![tenure_id.burn_view_consensus_hash, tenure_id.block_id]; + let args = rusqlite::params![ + tenure_id.burn_view_consensus_hash.sqlhex(), + tenure_id.block_id.sqlhex() + ]; Ok(query_row(headers_conn, sql, args)?) } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 6e59899a54c..76dead9a473 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -34,6 +34,7 @@ use stacks_common::types::chainstate::{ StacksPublicKey, StacksWorkScore, TrieHash, VRFSeed, }; use stacks_common::types::{Address, PrivateKey, StacksEpoch, StacksEpochId}; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; @@ -87,7 +88,8 @@ impl NakamotoStagingBlocksConnRef<'_> { let mut cursor = tip.clone(); let qry = "SELECT data FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; loop { - let Some(block_data): Option> = query_row(self, qry, params![cursor])? else { + let Some(block_data): Option> = query_row(self, qry, params![cursor.sqlhex()])? + else { break; }; let block = NakamotoBlock::consensus_deserialize(&mut block_data.as_slice())?; @@ -2024,7 +2026,7 @@ fn test_make_miners_stackerdb_config() { let miner = LeaderKeyRegisterOp { consensus_hash: last_snapshot.consensus_hash.clone(), public_key: vrf_pubkey, - memo: miner_hash160.0.to_vec(), + memo: miner_hash160.0.to_vec().into(), txid: Txid([id; 32]), vtxindex: 1 + (id as u32), block_height: last_snapshot.block_height + 1, @@ -2082,7 +2084,7 @@ fn test_make_miners_stackerdb_config() { // miners take turns winning key_block_ptr: miner.block_height as u32, key_vtxindex: miner.vtxindex as u16, - memo: vec![STACKS_EPOCH_3_0_MARKER], + memo: vec![STACKS_EPOCH_3_0_MARKER].into(), commit_outs: vec![], burn_fee: 12345, diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 3a21ebb0867..d52881e07b9 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -1255,7 +1255,7 @@ mod test { .unwrap(), ) .unwrap(), - memo: vec![1, 2, 3, 4, 5], + memo: vec![1, 2, 3, 4, 5].into(), txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -1276,7 +1276,7 @@ mod test { parent_vtxindex: 0, key_block_ptr: leader_key.block_height as u32, key_vtxindex: leader_key.vtxindex as u16, - memo: vec![0x80], + memo: vec![0x80].into(), commit_outs: vec![], burn_fee: 12345, diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index d09063fdb66..0459c899eaa 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -18,6 +18,7 @@ use clarity::types::chainstate::TenureBlockId; use clarity::vm::types::*; use rusqlite::{params, Row}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::util::db::SqlEncoded; use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::{Error, *}; @@ -449,10 +450,10 @@ impl StacksChainState { let args = params![ block_reward.address.to_string(), block_reward.recipient.to_string(), - block_reward.block_hash, - block_reward.consensus_hash, - block_reward.parent_block_hash, - block_reward.parent_consensus_hash, + block_reward.block_hash.sqlhex(), + block_reward.consensus_hash.sqlhex(), + block_reward.parent_block_hash.sqlhex(), + block_reward.parent_consensus_hash.sqlhex(), block_reward.coinbase.to_string(), db_tx_fees_anchored.to_string(), db_tx_fees_streamed.to_string(), @@ -461,7 +462,7 @@ impl StacksChainState { u64_to_sql(block_reward.stacks_block_height)?, true, 0i64, - index_block_hash, + index_block_hash.sqlhex(), payment_type, "0".to_string(), ]; @@ -544,8 +545,8 @@ impl StacksChainState { reward.tx_fees_anchored.to_string(), reward.tx_fees_streamed_confirmed.to_string(), reward.tx_fees_streamed_produced.to_string(), - parent_block_id, - child_block_id, + parent_block_id.sqlhex(), + child_block_id.sqlhex(), ]; tx.execute(sql, args) @@ -645,7 +646,7 @@ impl StacksChainState { child_block_id: &TenureBlockId, ) -> Result, Error> { let sql = "SELECT * FROM matured_rewards WHERE parent_index_block_hash = ?1 AND child_index_block_hash = ?2 AND vtxindex = 0"; - let args = params![parent_block_id.0, child_block_id.0]; + let args = params![parent_block_id.0.sqlhex(), child_block_id.0.sqlhex()]; let ret: Vec = query_rows(conn, sql, args).map_err(Error::DBError)?; Ok(ret) } @@ -711,7 +712,7 @@ impl StacksChainState { ) -> Result, Error> { let qry = "SELECT * FROM payments WHERE index_block_hash = ?1 ORDER BY vtxindex ASC".to_string(); - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; let rows = query_rows::(conn, &qry, args).map_err(Error::DBError)?; test_debug!("{} rewards in {}", rows.len(), index_block_hash); @@ -734,8 +735,8 @@ impl StacksChainState { let qry = "SELECT * FROM payments WHERE block_hash = ?1 AND consensus_hash = ?2 ORDER BY vtxindex ASC".to_string(); let args = params![ - ancestor_info.anchored_header.block_hash(), - ancestor_info.consensus_hash, + ancestor_info.anchored_header.block_hash().sqlhex(), + ancestor_info.consensus_hash.sqlhex(), ]; let rows = query_rows::(tx, &qry, args).map_err(Error::DBError)?; test_debug!( @@ -769,7 +770,7 @@ impl StacksChainState { let qry = "SELECT * FROM payments WHERE consensus_hash = ?1 AND block_hash = ?2 AND miner = 1" .to_string(); - let args = params![consensus_hash, stacks_block_hash,]; + let args = params![consensus_hash.sqlhex(), stacks_block_hash.sqlhex()]; let mut rows = query_rows::(conn, &qry, args).map_err(Error::DBError)?; let len = rows.len(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index f5d62cd5096..4210b292bd5 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -36,6 +36,7 @@ use stacks_common::bitvec::BitVec; use stacks_common::codec::MAX_MESSAGE_LEN; use stacks_common::types::chainstate::{BurnchainHeaderHash, SortitionId, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::to_hex; use stacks_common::util::retry::BoundReader; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; @@ -752,7 +753,7 @@ impl StacksChainState { for (consensus_hash, block_hash) in blocks.into_iter() { let list_microblock_sql = "SELECT * FROM staging_microblocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 ORDER BY sequence".to_string(); - let list_microblock_args = params![block_hash, consensus_hash]; + let list_microblock_args = params![block_hash.sqlhex(), consensus_hash.sqlhex()]; let microblocks = query_rows::( blocks_conn, &list_microblock_sql, @@ -910,7 +911,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result>, Error> { let sql = format!("SELECT block_data FROM {table} WHERE block_hash = ?1"); - let args = [&block_hash]; + let args = [&block_hash.sqlhex()]; let mut blobs = StacksChainState::load_block_data_blobs(block_conn, &sql, &args)?; let len = blobs.len(); match len { @@ -947,7 +948,7 @@ impl StacksChainState { minimum_block_height: i64, ) -> bool { let sql = "SELECT 1 FROM staging_blocks WHERE microblock_pubkey_hash = ?1 AND height >= ?2"; - let args = params![pubkey_hash, minimum_block_height]; + let args = params![pubkey_hash.sqlhex(), minimum_block_height]; block_conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -963,7 +964,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND orphaned = 0 AND processed = 0"; - let args = params![block_hash, consensus_hash]; + let args = params![block_hash.sqlhex(), consensus_hash.sqlhex()]; let mut rows = query_rows::(block_conn, sql, args).map_err(Error::DBError)?; let len = rows.len(); @@ -992,7 +993,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE index_block_hash = ?1 AND orphaned = 0"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; query_row::(block_conn, sql, args).map_err(Error::DBError) } @@ -1003,7 +1004,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT parent_microblock_hash FROM staging_blocks WHERE index_block_hash = ?1 AND orphaned = 0"; block_conn - .query_row(sql, &[index_block_hash], |row| row.get(0)) + .query_row(sql, params![index_block_hash.sqlhex()], |row| row.get(0)) .optional() .map_err(|e| Error::DBError(db_error::from(e))) } @@ -1042,7 +1043,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT microblock_pubkey_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0"; - let args = params![block_hash, consensus_hash]; + let args = params![block_hash.sqlhex(), consensus_hash.sqlhex()]; query_one_row_column( block_conn, sql, @@ -1095,7 +1096,7 @@ impl StacksChainState { microblock_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND orphaned = 0 LIMIT 1"; - let args = params![parent_index_block_hash, microblock_hash]; + let args = params![parent_index_block_hash.sqlhex(), microblock_hash.sqlhex()]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1108,7 +1109,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_microblock_hash = ?1 AND orphaned = 0 LIMIT 1"; - let args = params![index_microblock_hash]; + let args = params![index_microblock_hash.sqlhex()]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1313,7 +1314,7 @@ impl StacksChainState { "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 AND sequence < ?3 AND orphaned = 0 ORDER BY sequence ASC" }; - let args = params![parent_index_block_hash, start_seq, last_seq]; + let args = params![parent_index_block_hash.sqlhex(), start_seq, last_seq]; let staging_microblocks = query_rows::(blocks_conn, sql, args).map_err(Error::DBError)?; @@ -1444,7 +1445,7 @@ impl StacksChainState { pub fn get_parent(&self, stacks_block: &StacksBlockId) -> Result { let sql = "SELECT parent_block_id FROM block_headers WHERE index_block_hash = ?"; self.db() - .query_row(sql, &[stacks_block], |row| row.get(0)) + .query_row(sql, params![stacks_block.sqlhex()], |row| row.get(0)) .map_err(|e| Error::from(db_error::from(e))) } @@ -1459,7 +1460,7 @@ impl StacksChainState { // find all blocks that we have that could be this block's parent let sql = "SELECT * FROM snapshots WHERE winning_stacks_block_hash = ?1"; let possible_parent_snapshots = - query_rows::(&sort_handle, sql, &[parent_block_hash])?; + query_rows::(&sort_handle, sql, params![parent_block_hash.sqlhex()])?; for possible_parent in possible_parent_snapshots.into_iter() { let burn_ancestor = sort_handle.get_block_snapshot(&possible_parent.burn_header_hash)?; @@ -1495,8 +1496,11 @@ impl StacksChainState { // find all blocks that we have that could be this block's parent let sql = "SELECT * FROM snapshots WHERE winning_stacks_block_hash = ?1"; - let possible_parent_snapshots = - query_rows::(&sort_handle, sql, &[&header.parent_block])?; + let possible_parent_snapshots = query_rows::( + &sort_handle, + sql, + params![header.parent_block.sqlhex()], + )?; for possible_parent in possible_parent_snapshots.into_iter() { let burn_ancestor = sort_handle.get_block_snapshot(&possible_parent.burn_header_hash)?; @@ -1545,7 +1549,10 @@ impl StacksChainState { // if this block has an unprocessed staging parent, then it's not attachable until its parent is. let has_unprocessed_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0 LIMIT 1"; let has_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 LIMIT 1"; - let has_parent_args = params![block.header.parent_block, parent_consensus_hash]; + let has_parent_args = params![ + block.header.parent_block.sqlhex(), + parent_consensus_hash.sqlhex() + ]; let has_unprocessed_parent_rows = query_row_columns::( tx, has_unprocessed_parent_sql, @@ -1598,20 +1605,20 @@ impl StacksChainState { VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17)"; let args = params![ - block_hash, - block.header.parent_block, - consensus_hash, - parent_consensus_hash, - block.header.parent_microblock, + block_hash.sqlhex(), + block.header.parent_block.sqlhex(), + consensus_hash.sqlhex(), + parent_consensus_hash.sqlhex(), + block.header.parent_microblock.sqlhex(), block.header.parent_microblock_sequence, - block.header.microblock_pubkey_hash, + block.header.microblock_pubkey_hash.sqlhex(), u64_to_sql(block.header.total_work.work)?, attachable, 0, 0, u64_to_sql(commit_burn)?, u64_to_sql(sortition_burn)?, - index_block_hash, + index_block_hash.sqlhex(), u64_to_sql(get_epoch_time_secs())?, 0, u64_to_sql(download_time)?, @@ -1626,7 +1633,7 @@ impl StacksChainState { // this should be done across all burnchains. let children_sql = "UPDATE staging_blocks SET attachable = 0 WHERE parent_anchored_block_hash = ?1"; - let children_args = [&block_hash]; + let children_args = [&block_hash.sqlhex()]; tx.execute(children_sql, &children_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -1671,12 +1678,12 @@ impl StacksChainState { // store microblock metadata let sql = "INSERT OR REPLACE INTO staging_microblocks (anchored_block_hash, consensus_hash, index_block_hash, microblock_hash, parent_hash, index_microblock_hash, sequence, processed, orphaned) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)"; let args = params![ - parent_anchored_block_hash, - parent_consensus_hash, - index_block_hash, - microblock.block_hash(), - microblock.header.prev_block, - index_microblock_hash, + parent_anchored_block_hash.sqlhex(), + parent_consensus_hash.sqlhex(), + index_block_hash.sqlhex(), + microblock.block_hash().sqlhex(), + microblock.header.prev_block.sqlhex(), + index_microblock_hash.sqlhex(), microblock.header.sequence, 0, 0, @@ -1689,7 +1696,7 @@ impl StacksChainState { let block_sql = "INSERT OR REPLACE INTO staging_microblocks_data \ (block_hash, block_data) VALUES (?1, ?2)"; - let block_args = params![microblock.block_hash(), microblock_bytes]; + let block_args = params![microblock.block_hash().sqlhex(), microblock_bytes]; tx.execute(block_sql, block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -1743,7 +1750,7 @@ impl StacksChainState { consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, Error> { - StacksChainState::read_one_i64(blocks_conn, "SELECT processed FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", &[block_hash, consensus_hash]) + StacksChainState::read_one_i64(blocks_conn, "SELECT processed FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", params![block_hash.sqlhex(), consensus_hash.sqlhex()]) .and_then(|processed| { let Some(processed_head) = processed else { // if empty, return false @@ -1761,7 +1768,7 @@ impl StacksChainState { query_rows::( blocks_conn, "SELECT consensus_hash FROM staging_blocks WHERE anchored_block_hash = ?1", - &[block_hash], + params![block_hash.sqlhex()], ) .map_err(|e| e.into()) } @@ -1772,7 +1779,7 @@ impl StacksChainState { consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result { - StacksChainState::read_one_i64(blocks_conn, "SELECT orphaned FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", &[block_hash, consensus_hash]) + StacksChainState::read_one_i64(blocks_conn, "SELECT orphaned FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", params![block_hash.sqlhex(), consensus_hash.sqlhex()]) .and_then(|orphaned| { let Some(orphaned_head) = orphaned else { // if empty, return false @@ -1792,7 +1799,7 @@ impl StacksChainState { parent_block_hash: &BlockHeaderHash, microblock_hash: &BlockHeaderHash, ) -> Result, Error> { - StacksChainState::read_one_i64(self.db(), "SELECT processed FROM staging_microblocks WHERE anchored_block_hash = ?1 AND microblock_hash = ?2 AND consensus_hash = ?3", &[&parent_block_hash, microblock_hash, &parent_consensus_hash]) + StacksChainState::read_one_i64(self.db(), "SELECT processed FROM staging_microblocks WHERE anchored_block_hash = ?1 AND microblock_hash = ?2 AND consensus_hash = ?3", params![&parent_block_hash.sqlhex(), microblock_hash.sqlhex(), &parent_consensus_hash.sqlhex()]) .and_then(|processed| { let Some(processed_head) = processed else { return Ok(None) @@ -1838,7 +1845,10 @@ impl StacksChainState { }; let sql = "SELECT 1 FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND processed = 1 AND orphaned = 0"; - let args = params![parent_index_block_hash, parent_microblock_hash]; + let args = params![ + parent_index_block_hash.sqlhex(), + parent_microblock_hash.sqlhex() + ]; let res = self .db() .query_row(sql, args, |_r| Ok(())) @@ -1861,7 +1871,7 @@ impl StacksChainState { ) -> Result { StacksChainState::read_one_i64(self.db(), "SELECT staging_microblocks.processed FROM staging_blocks JOIN staging_microblocks ON staging_blocks.parent_anchored_block_hash = staging_microblocks.anchored_block_hash AND staging_blocks.parent_consensus_hash = staging_microblocks.consensus_hash - WHERE staging_blocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2 AND staging_microblocks.orphaned = 0", &[child_index_block_hash, &parent_microblock_hash]) + WHERE staging_blocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2 AND staging_microblocks.orphaned = 0", params![child_index_block_hash.sqlhex(), &parent_microblock_hash.sqlhex()]) .and_then(|processed| { let Some(processed_head) = processed else { // if empty, return false @@ -2133,7 +2143,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let qry = "SELECT consensus_hash FROM staging_blocks WHERE anchored_block_hash = ?1"; - let args = params![block_hash]; + let args = params![block_hash.sqlhex()]; query_rows(conn, qry, args).map_err(|e| e.into()) } @@ -2154,7 +2164,7 @@ impl StacksChainState { tx: &mut DBTx, microblock_hash: &BlockHeaderHash, ) -> Result<(), Error> { - let args = [µblock_hash]; + let args = [µblock_hash.sqlhex()]; // copy into the invalidated_microblocks_data table let copy_sql = "INSERT OR REPLACE INTO invalidated_microblocks_data SELECT * FROM staging_microblocks_data WHERE block_hash = ?1"; @@ -2177,16 +2187,17 @@ impl StacksChainState { ) -> Result<(), Error> { // This block is orphaned let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_block_args = params![consensus_hash, anchored_block_hash]; + let update_block_args = params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; // All descendants of this processed block are never attachable. // Indicate this by marking all children as orphaned (but not procesed), across all burnchain forks. let update_children_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 0, attachable = 0 WHERE parent_consensus_hash = ?1 AND parent_anchored_block_hash = ?2"; - let update_children_args = params![consensus_hash, anchored_block_hash]; + let update_children_args = params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args = + params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2196,7 +2207,8 @@ impl StacksChainState { // drop microblocks (this processes them) let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; + let update_microblock_children_args = + params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; tx.execute(update_block_sql, update_block_args)?; @@ -2243,7 +2255,7 @@ impl StacksChainState { ); let sql = "DELETE FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1 AND processed = 1"; - let args = params![consensus_hash, anchored_block_hash]; + let args = params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; tx.execute(sql, args)?; @@ -2267,7 +2279,7 @@ impl StacksChainState { accept: bool, ) -> Result<(), Error> { let sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 0".to_string(); - let args = params![consensus_hash, anchored_block_hash]; + let args = params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; let has_stored_block = StacksChainState::has_stored_block( tx, @@ -2281,7 +2293,7 @@ impl StacksChainState { None => { // not an error if this block was already orphaned let orphan_sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1".to_string(); - let orphan_args = params![consensus_hash, anchored_block_hash]; + let orphan_args = params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; let orphan_rows = query_rows::(tx, &orphan_sql, orphan_args) .map_err(Error::DBError)?; if orphan_rows.len() == 1 { @@ -2332,8 +2344,8 @@ impl StacksChainState { let update_sql = "UPDATE staging_blocks SET processed = 1, processed_time = ?1 WHERE consensus_hash = ?2 AND anchored_block_hash = ?3".to_string(); let update_args = params![ u64_to_sql(get_epoch_time_secs())?, - consensus_hash, - anchored_block_hash, + consensus_hash.sqlhex(), + anchored_block_hash.sqlhex(), ]; tx.execute(&update_sql, update_args) @@ -2345,7 +2357,7 @@ impl StacksChainState { let update_children_sql = "UPDATE staging_blocks SET attachable = 1 WHERE parent_anchored_block_hash = ?1" .to_string(); - let update_children_args = [&anchored_block_hash]; + let update_children_args = [&anchored_block_hash.sqlhex()]; tx.execute(&update_children_sql, &update_children_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2398,11 +2410,12 @@ impl StacksChainState { &index_block_hash ); let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_block_args = params![consensus_hash, anchored_block_hash]; + let update_block_args = params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args = + params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2419,7 +2432,8 @@ impl StacksChainState { &index_block_hash ); let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; + let update_microblock_children_args = + params![consensus_hash.sqlhex(), anchored_block_hash.sqlhex()]; tx.execute(&update_block_sql, update_block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2455,7 +2469,11 @@ impl StacksChainState { ) -> Result<(), Error> { // find offending sequence let seq_sql = "SELECT sequence FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND microblock_hash = ?3 AND processed = 0 AND orphaned = 0".to_string(); - let seq_args = params![consensus_hash, anchored_block_hash, invalid_block_hash]; + let seq_args = params![ + consensus_hash.sqlhex(), + anchored_block_hash.sqlhex(), + invalid_block_hash.sqlhex() + ]; let seq = match query_int::<_>(tx, &seq_sql, seq_args) { Ok(seq) => seq, Err(e) => match e { @@ -2476,7 +2494,7 @@ impl StacksChainState { // drop staging children at and beyond the invalid block let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE anchored_block_hash = ?1 AND sequence >= ?2".to_string(); - let update_microblock_children_args = params![anchored_block_hash, seq]; + let update_microblock_children_args = params![anchored_block_hash.sqlhex(), seq]; tx.execute( &update_microblock_children_sql, @@ -2486,7 +2504,7 @@ impl StacksChainState { // find all orphaned microblocks hashes, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE anchored_block_hash = ?1 AND sequence >= ?2"; - let find_orphaned_microblocks_args = params![anchored_block_hash, seq]; + let find_orphaned_microblocks_args = params![anchored_block_hash.sqlhex(), seq]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2503,7 +2521,7 @@ impl StacksChainState { for mblock_hash in orphaned_microblock_hashes.iter() { // orphan any staging blocks that build on the now-invalid microblocks let update_block_children_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 0, attachable = 0 WHERE parent_microblock_hash = ?1".to_string(); - let update_block_children_args = [&mblock_hash]; + let update_block_children_args = [&mblock_hash.sqlhex()]; tx.execute(&update_block_children_sql, &update_block_children_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2541,7 +2559,11 @@ impl StacksChainState { test_debug!("Set {}-{} processed", &parent_index_hash, &mblock_hash); // confirm this microblock - let args = params![parent_consensus_hash, parent_block_hash, mblock_hash]; + let args = params![ + parent_consensus_hash.sqlhex(), + parent_block_hash.sqlhex(), + mblock_hash.sqlhex() + ]; tx.execute(sql, args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2591,7 +2613,7 @@ impl StacksChainState { }; let parent_index_block_hash = StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &parent_block_hash); - StacksChainState::read_one_i64(self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence = ?2", &[&parent_index_block_hash, &seq]) + StacksChainState::read_one_i64(self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence = ?2", &[&parent_index_block_hash.sqlhex(), &seq]) .and_then(|processed| { let Some(processed_head) = processed else { // if empty, return false @@ -2608,7 +2630,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result { let sql = "SELECT 1 FROM staging_microblocks WHERE index_microblock_hash = ?1 AND processed = 1 AND orphaned = 0"; - let args = params![index_microblock_hash]; + let args = params![index_microblock_hash.sqlhex()]; let res = conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -2662,7 +2684,7 @@ impl StacksChainState { StacksChainState::has_any_i64( self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 LIMIT 1", - &[&parent_index_block_hash, &min_seq] + params![parent_index_block_hash.sqlhex(), &min_seq] ) } @@ -2678,7 +2700,7 @@ impl StacksChainState { StacksChainState::has_any_i64( self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 LIMIT 1", - &[parent_index_block_hash, microblock_hash] + params![parent_index_block_hash.sqlhex(), microblock_hash.sqlhex()] ) } @@ -2692,7 +2714,7 @@ impl StacksChainState { StacksChainState::has_any_i64( self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 LIMIT 1", - &[&parent_index_block_hash], + params![parent_index_block_hash.sqlhex()], ) } @@ -2707,7 +2729,7 @@ impl StacksChainState { "SELECT {},{} FROM staging_blocks WHERE index_block_hash = ?1", consensus_hash_col, anchored_block_col ); - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; blocks_db .query_row(&sql, args, |row| { @@ -2759,7 +2781,7 @@ impl StacksChainState { staging_microblocks JOIN staging_microblocks_data \ ON staging_microblocks.microblock_hash = staging_microblocks_data.block_hash \ WHERE staging_microblocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2"; - let args = params![parent_index_block_hash, microblock_hash,]; + let args = params![parent_index_block_hash.sqlhex(), microblock_hash.sqlhex()]; query_row(blocks_conn, sql, args).map_err(Error::DBError) } @@ -2772,7 +2794,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 ORDER BY sequence" .to_string(); - let args = params![parent_index_block_hash]; + let args = params![parent_index_block_hash.sqlhex()]; let microblock_info = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; Ok(microblock_info) @@ -3051,7 +3073,8 @@ impl StacksChainState { let args = params![StacksBlockHeader::make_index_block_hash( parent_consensus_hash, parent_block_hash, - )]; + ) + .sqlhex()]; let res = block_conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -3793,8 +3816,8 @@ impl StacksChainState { // block in the headers database? let hdr_sql = "SELECT * FROM block_headers WHERE block_hash = ?1 AND consensus_hash = ?2".to_string(); let hdr_args = params![ - candidate.parent_anchored_block_hash, - candidate.parent_consensus_hash, + candidate.parent_anchored_block_hash.sqlhex(), + candidate.parent_consensus_hash.sqlhex(), ]; let hdr_row = query_row_panic::( blocks_tx, @@ -4194,7 +4217,7 @@ impl StacksChainState { &sender.clone().into(), &recipient.clone().into(), transfered_ustx, - &BuffData { data: memo }, + &BuffData { data: memo.clone().0 }, ) }); match result { @@ -6417,7 +6440,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args = params![consensus_hash, block_bhh]; + let args = params![consensus_hash.sqlhex(), block_bhh.sqlhex()]; query_row(self.db(), sql, args).map_err(Error::DBError) } @@ -6426,7 +6449,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args = params![consensus_hash, block_bhh]; + let args = params![consensus_hash.sqlhex(), block_bhh.sqlhex()]; let Some(staging_block): Option = query_row(self.db(), sql, args).map_err(Error::DBError)? else { @@ -6450,8 +6473,8 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; let args = params![ - staging_block.parent_consensus_hash, - staging_block.parent_anchored_block_hash, + staging_block.parent_consensus_hash.sqlhex(), + staging_block.parent_anchored_block_hash.sqlhex(), ]; query_row(self.db(), sql, args).map_err(Error::DBError) } @@ -6463,7 +6486,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT height FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args = params![consensus_hash, block_hash]; + let args = params![consensus_hash.sqlhex(), block_hash.sqlhex()]; query_row(self.db(), sql, args).map_err(Error::DBError) } @@ -10782,7 +10805,7 @@ pub mod test { sender: addr.clone(), recipient: recipient_addr.clone(), transfered_ustx: ((tenure_id + 1) * 1000) as u128, - memo: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05], + memo: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05].into(), txid: Txid::from_test_data( tenure_id as u64, diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index 1be4fef48a0..8ea5f36969d 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -134,24 +134,24 @@ impl StacksChainState { header.version, total_burn_str, total_work_str, - header.proof, - header.parent_block, - header.parent_microblock, + header.proof.sqlhex(), + header.parent_block.sqlhex(), + header.parent_microblock.sqlhex(), header.parent_microblock_sequence, - header.tx_merkle_root, - header.state_index_root, - header.microblock_pubkey_hash, - block_hash, - index_block_hash, - consensus_hash, - burn_header_hash, + header.tx_merkle_root.sqlhex(), + header.state_index_root.sqlhex(), + header.microblock_pubkey_hash.sqlhex(), + block_hash.sqlhex(), + index_block_hash.sqlhex(), + consensus_hash.sqlhex(), + burn_header_hash.sqlhex(), (burn_header_height as i64), (burn_header_timestamp as i64), (block_height as i64), - index_root, + index_root.sqlhex(), anchored_block_cost, block_size_str, - parent_id + parent_id.sqlhex() ]; tx.execute("INSERT INTO block_headers \ @@ -187,7 +187,7 @@ impl StacksChainState { block: &StacksBlockId, ) -> Result, Error> { let qry = "SELECT cost FROM block_headers WHERE index_block_hash = ?"; - conn.query_row(qry, &[block], |row| row.get(0)) + conn.query_row(qry, params![block.sqlhex()], |row| row.get(0)) .optional() .map_err(|e| Error::from(db_error::from(e))) } @@ -198,7 +198,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT 1 FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args = params![consensus_hash, block_hash]; + let args = params![consensus_hash.sqlhex(), block_hash.sqlhex()]; match conn.query_row(sql, args, |_| Ok(true)) { Ok(_) => Ok(true), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(false), @@ -214,7 +214,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args = params![consensus_hash, block_hash]; + let args = params![consensus_hash.sqlhex(), block_hash.sqlhex()]; query_row_panic(conn, sql, args, || { "FATAL: multiple rows for the same block hash".to_string() }) @@ -228,7 +228,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM block_headers WHERE index_block_hash = ?1"; - query_row_panic(conn, sql, &[&index_block_hash], || { + query_row_panic(conn, sql, &[&index_block_hash.sqlhex()], || { "FATAL: multiple rows for the same block hash".to_string() }) .map_err(Error::DBError) @@ -243,7 +243,7 @@ impl StacksChainState { consensus_hash: &ConsensusHash, ) -> Result, Error> { let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1"; - query_row_panic(conn, sql, &[&consensus_hash], || { + query_row_panic(conn, sql, &[&consensus_hash.sqlhex()], || { "FATAL: multiple rows for the same consensus hash".to_string() }) .map_err(Error::DBError) @@ -308,7 +308,7 @@ impl StacksChainState { pub fn get_genesis_header_info(conn: &Connection) -> Result { // by construction, only one block can have height 0 in this DB let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_height = 0"; - let args = params![FIRST_BURNCHAIN_CONSENSUS_HASH]; + let args = params![FIRST_BURNCHAIN_CONSENSUS_HASH.sqlhex()]; let row_opt = query_row(conn, sql, args)?; Ok(row_opt.expect("BUG: no genesis header info")) } @@ -319,7 +319,7 @@ impl StacksChainState { block_id: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT parent_block_id FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args = params![block_id]; + let args = params![block_id.sqlhex()]; let mut rows = query_row_columns::(conn, sql, args, "parent_block_id")?; Ok(rows.pop()) } @@ -327,7 +327,7 @@ impl StacksChainState { /// Is this block present and processed? pub fn has_stacks_block(conn: &Connection, block_id: &StacksBlockId) -> Result { let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args = params![block_id]; + let args = params![block_id.sqlhex()]; Ok(conn .query_row(sql, args, |_r| Ok(())) .optional() diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index a72f3f9edc9..54f3d452c41 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -37,6 +37,7 @@ use serde::Deserialize; use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::{hex_bytes, to_hex}; use crate::burnchains::bitcoin::address::LegacyBitcoinAddress; @@ -2610,7 +2611,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT txids FROM burnchain_txids WHERE index_block_hash = ?1"; - let args = params![index_block_hash]; + let args = params![index_block_hash.sqlhex()]; let txids = conn .query_row(sql, args, |r| { @@ -2694,7 +2695,7 @@ impl StacksChainState { let txids_json = serde_json::to_string(&txids).expect("FATAL: could not serialize Vec"); let sql = "INSERT INTO burnchain_txids (index_block_hash, txids) VALUES (?1, ?2)"; - let args = params![index_block_hash, &txids_json]; + let args = params![index_block_hash.sqlhex(), &txids_json]; tx.execute(sql, args)?; Ok(()) } @@ -2822,7 +2823,7 @@ impl StacksChainState { if applied_epoch_transition { debug!("Block {} applied an epoch transition", &index_block_hash); let sql = "INSERT INTO epoch_transitions (block_id) VALUES (?)"; - let args = params![&index_block_hash]; + let args = params![&index_block_hash.sqlhex()]; headers_tx.deref_mut().execute(sql, args)?; } @@ -3184,15 +3185,15 @@ pub mod test { to_hex(&[0u8; 32]), to_hex(&[0u8; 32]), to_hex(&[0u8; 20]), - &sample_block_hash, - &sample_index_block_hash, + &sample_block_hash.sqlhex(), + &sample_index_block_hash.sqlhex(), 1, to_hex(&[0u8; 32]), - &sample_consensus_hash, - &sample_burn_header_hash, + &sample_consensus_hash.sqlhex(), + &sample_burn_header_hash.sqlhex(), 100, 1234567890, - &sample_parent_block_id, + &sample_parent_block_id.sqlhex(), serde_json::to_string(&ExecutionCost::ZERO).unwrap(), "1000", 10 @@ -3248,7 +3249,7 @@ pub mod test { .query_row( "SELECT block_hash, consensus_hash, block_size FROM block_headers WHERE index_block_hash = ?", - params![&sample_index_block_hash], + params![&sample_index_block_hash.sqlhex()], |row| { Ok(( row.get::<_, String>(0)?, diff --git a/stackslib/src/chainstate/stacks/index/mod.rs b/stackslib/src/chainstate/stacks/index/mod.rs index 5b920784560..2c8ae808bf6 100644 --- a/stackslib/src/chainstate/stacks/index/mod.rs +++ b/stackslib/src/chainstate/stacks/index/mod.rs @@ -88,7 +88,7 @@ pub struct TrieLeaf { pub trait MarfTrieId: ClarityMarfTrieId - + rusqlite::types::ToSql + + stacks_common::util::db::SqlEncoded + rusqlite::types::FromSql + stacks_common::codec::StacksMessageCodec + std::convert::From diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index 009f2b68886..7b95f3bba5b 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -160,7 +160,7 @@ pub fn migrate_tables_if_needed(conn: &mut Connection) -> Result< pub fn get_block_identifier(conn: &Connection, bhh: &T) -> Result { conn.query_row( "SELECT block_id FROM marf_data WHERE block_hash = ?", - &[bhh], + params![bhh.sqlhex()], |row| row.get("block_id"), ) .map_err(|e| e.into()) @@ -169,7 +169,7 @@ pub fn get_block_identifier(conn: &Connection, bhh: &T) -> Result pub fn get_mined_block_identifier(conn: &Connection, bhh: &T) -> Result { conn.query_row( "SELECT block_id FROM mined_blocks WHERE block_hash = ?", - &[bhh], + params![bhh.sqlhex()], |row| row.get("block_id"), ) .map_err(|e| e.into()) @@ -181,7 +181,7 @@ pub fn get_confirmed_block_identifier( ) -> Result, Error> { conn.query_row( "SELECT block_id FROM marf_data WHERE block_hash = ? AND unconfirmed = 0", - &[bhh], + params![bhh.sqlhex()], |row| row.get("block_id"), ) .optional() @@ -194,7 +194,7 @@ pub fn get_unconfirmed_block_identifier( ) -> Result, Error> { conn.query_row( "SELECT block_id FROM marf_data WHERE block_hash = ? AND unconfirmed = 1", - &[bhh], + params![bhh.sqlhex()], |row| row.get("block_id"), ) .optional() @@ -221,7 +221,7 @@ pub fn write_trie_blob( block_hash: &T, data: &[u8], ) -> Result { - let args = params![block_hash, data, 0, 0, 0,]; + let args = params![block_hash.sqlhex(), data, 0, 0, 0,]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, ?, ?)")?; let block_id = s @@ -249,7 +249,7 @@ fn inner_write_external_trie_blob( // existing entry (i.e. a migration) let empty_blob: &[u8] = &[]; let args = params![ - block_hash, + block_hash.sqlhex(), empty_blob, 0, u64_to_sql(offset)?, @@ -269,7 +269,7 @@ fn inner_write_external_trie_blob( // new entry let empty_blob: &[u8] = &[]; let args = params![ - block_hash, + block_hash.sqlhex(), empty_blob, 0, u64_to_sql(offset)?, @@ -330,7 +330,7 @@ pub fn write_trie_blob_to_mined( .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args = params![block_hash, data]; + let args = params![block_hash.sqlhex(), data]; let mut s = conn.prepare("INSERT INTO mined_blocks (block_hash, data) VALUES (?, ?)")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); @@ -363,7 +363,7 @@ pub fn write_trie_blob_to_unconfirmed( .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args = params![block_hash, data, 1]; + let args = params![block_hash.sqlhex(), data, 1]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, 0, 0)")?; s.execute(args) @@ -451,7 +451,7 @@ pub fn read_node_hash_bytes_by_bhh( ) -> Result<(), Error> { let row_id: i64 = conn.query_row( "SELECT block_id FROM marf_data WHERE block_hash = ?", - &[bhh], + params![bhh.sqlhex()], |r| r.get("block_id"), )?; let mut blob = conn.blob_open(DatabaseName::Main, "marf_data", "data", row_id, true)?; @@ -508,7 +508,7 @@ pub fn get_external_trie_offset_length_by_bhh( bhh: &T, ) -> Result<(u64, u64), Error> { let qry = "SELECT external_offset, external_length FROM marf_data WHERE block_hash = ?1"; - let args = params![bhh]; + let args = params![bhh.sqlhex()]; let (offset, length) = query_row(conn, qry, args)?.ok_or(Error::NotFoundError)?; Ok((offset, length)) } @@ -577,7 +577,7 @@ pub fn get_node_hash_bytes_by_bhh( ) -> Result { let row_id: i64 = conn.query_row( "SELECT block_id FROM marf_data WHERE block_hash = ?", - &[bhh], + params![bhh.sqlhex()], |r| r.get("block_id"), )?; let mut blob = conn.blob_open(DatabaseName::Main, "marf_data", "data", row_id, true)?; @@ -596,7 +596,7 @@ pub fn tx_lock_bhh_for_extension( let is_bhh_committed = tx .query_row( "SELECT 1 FROM marf_data WHERE block_hash = ? LIMIT 1", - &[bhh], + params![bhh.sqlhex()], |_row| Ok(()), ) .optional()? @@ -609,7 +609,7 @@ pub fn tx_lock_bhh_for_extension( let is_bhh_locked = tx .query_row( "SELECT 1 FROM block_extension_locks WHERE block_hash = ? LIMIT 1", - &[bhh], + params![bhh.sqlhex()], |_row| Ok(()), ) .optional()? @@ -620,7 +620,7 @@ pub fn tx_lock_bhh_for_extension( tx.execute( "INSERT INTO block_extension_locks (block_hash) VALUES (?)", - &[bhh], + params![bhh.sqlhex()], )?; Ok(true) } @@ -646,7 +646,7 @@ pub fn count_blocks(conn: &Connection) -> Result { pub fn is_unconfirmed_block(conn: &Connection, block_id: u32) -> Result { let res: i64 = conn.query_row( "SELECT unconfirmed FROM marf_data WHERE block_id = ?1", - &[&block_id], + params![&block_id], |row| row.get("unconfirmed"), )?; Ok(res != 0) @@ -655,7 +655,7 @@ pub fn is_unconfirmed_block(conn: &Connection, block_id: u32) -> Result(conn: &Connection, bhh: &T) -> Result<(), Error> { conn.execute( "DELETE FROM block_extension_locks WHERE block_hash = ?", - &[bhh], + params![bhh.sqlhex()], )?; Ok(()) } @@ -664,7 +664,7 @@ pub fn drop_unconfirmed_trie(conn: &Connection, bhh: &T) -> Resul debug!("Drop unconfirmed trie sqlite blob {}", bhh); conn.execute( "DELETE FROM marf_data WHERE block_hash = ? AND unconfirmed = 1", - &[bhh], + params![bhh.sqlhex()], )?; debug!("Dropped unconfirmed trie sqlite blob {}", bhh); Ok(()) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index fb7434d78a4..91aa4424b73 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -32,6 +32,7 @@ use mempool::MemPoolWalkStrategy; use rand::{thread_rng, Rng}; use rusqlite::params; use stacks_common::address::*; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::MerkleTree; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::{get_epoch_time_ms, sleep_ms}; @@ -4839,7 +4840,7 @@ fn paramaterized_mempool_walk_test( mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![Some(123.0), &txid], + params![Some(123.0), &txid.sqlhex()], ) .unwrap(); } else { @@ -4847,7 +4848,7 @@ fn paramaterized_mempool_walk_test( mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![none, &txid], + params![none, &txid.sqlhex()], ) .unwrap(); } @@ -5065,7 +5066,7 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![Some(fee_rate), &txid], + params![Some(fee_rate), &txid.sqlhex()], ) .unwrap(); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 53dfd481cc9..aaaae36189c 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -22,7 +22,7 @@ use std::{fs, io}; use clarity::vm::coverage::CoverageReporter; use lazy_static::lazy_static; use rand::Rng; -use rusqlite::{Connection, OpenFlags}; +use rusqlite::{params, Connection, OpenFlags}; use serde::Serialize; use serde_json::json; use stacks_common::address::c32::c32_address; @@ -31,6 +31,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, VRFSeed, }; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::{bytes_to_hex, Hash160, Sha512Trunc256Sum}; @@ -311,7 +312,10 @@ fn get_cli_block_height(conn: &Connection, block_id: &StacksBlockId) -> Option( where F: Fn(&Row) -> R, { - let args = params![id_bhh]; + let args = params![id_bhh.sqlhex()]; let table_name = if nakamoto { "nakamoto_block_headers" } else { @@ -777,7 +778,7 @@ fn get_miner_column( where F: FnOnce(&Row) -> R, { - let args = params![id_bhh.0]; + let args = params![id_bhh.0.sqlhex()]; conn.query_row( &format!( "SELECT {} FROM payments WHERE index_block_hash = ? AND miner = 1", @@ -809,7 +810,7 @@ fn get_matured_reward( .conn() .query_row( &format!("SELECT parent_block_id FROM {table_name} WHERE index_block_hash = ?"), - params![child_id_bhh.0], + params![child_id_bhh.0.sqlhex()], |x| { Ok(StacksBlockId::from_column(x, "parent_block_id") .expect("Bad parent_block_id in database")) diff --git a/stackslib/src/config/chain_data.rs b/stackslib/src/config/chain_data.rs index 097c3da5c88..4df73bbf701 100644 --- a/stackslib/src/config/chain_data.rs +++ b/stackslib/src/config/chain_data.rs @@ -282,7 +282,7 @@ impl MinerStats { parent_vtxindex: 1, key_block_ptr: 1, key_vtxindex: 1, - memo: vec![], + memo: vec![].into(), commit_outs: decoded_pox_addrs, burn_fee: unconfirmed_commit.burn, input: (input_txid, unconfirmed_commit.input_index), @@ -447,7 +447,7 @@ impl MinerStats { parent_vtxindex: 2, key_block_ptr: 2, key_vtxindex: 2, - memo: vec![], + memo: vec![].into(), commit_outs: expected_pox_addrs.to_vec(), burn_fee: last_commit.burn_fee, input: (last_commit.txid.clone(), expected_input_index), @@ -555,7 +555,7 @@ pub mod tests { parent_vtxindex: 456, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 12345, input: (Txid([0; 32]), 0), @@ -588,7 +588,7 @@ pub mod tests { parent_vtxindex: 111, key_block_ptr: 122, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 12345, input: (Txid([0; 32]), 0), @@ -624,7 +624,7 @@ pub mod tests { parent_vtxindex: 111, key_block_ptr: 121, key_vtxindex: 10, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 23456, input: (Txid([0; 32]), 0), @@ -908,7 +908,7 @@ echo ] parent_vtxindex: 456, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 2, input: (Txid([0; 32]), 0), @@ -940,7 +940,7 @@ echo ] parent_vtxindex: 111, key_block_ptr: 122, key_vtxindex: 457, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 3, input: (Txid([0; 32]), 0), @@ -975,7 +975,7 @@ echo ] parent_vtxindex: 111, key_block_ptr: 121, key_vtxindex: 10, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 5, input: (Txid([0; 32]), 0), @@ -1010,7 +1010,7 @@ echo ] parent_vtxindex: 456, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 2, input: (Txid([0; 32]), 0), @@ -1037,7 +1037,7 @@ echo ] parent_vtxindex: 444, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 3, input: (Txid([0; 32]), 0), @@ -1064,7 +1064,7 @@ echo ] parent_vtxindex: 445, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 10, input: (Txid([0; 32]), 0), @@ -1091,7 +1091,7 @@ echo ] parent_vtxindex: 445, key_block_ptr: 123, key_vtxindex: 456, - memo: vec![0x80], + memo: vec![0x80].into(), burn_fee: 10, input: (Txid([0; 32]), 0), diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 099e24b7713..8c98140b6d9 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -35,6 +35,7 @@ use stacks_common::codec::{ use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::MempoolCollectionBehavior; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::retry::{BoundReader, RetryReader}; @@ -958,7 +959,7 @@ impl<'a> MemPoolTx<'a> { bloom_counter.remove_raw(dbtx, &txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args = params![txid]; + let args = params![txid.sqlhex()]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; } // help the type inference out @@ -1020,7 +1021,7 @@ impl<'a> MemPoolTx<'a> { bloom_counter.remove_raw(dbtx, &evict_txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args = params![evict_txid]; + let args = params![evict_txid.sqlhex()]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; Some(evict_txid) @@ -1050,7 +1051,7 @@ impl<'a> MemPoolTx<'a> { let hashed_txid = Txid(Sha512Trunc256Sum::from_data(&randomized_buff).0); let sql = "INSERT OR REPLACE INTO randomized_txids (txid,hashed_txid) VALUES (?1,?2)"; - let args = params![txid, hashed_txid]; + let args = params![txid.sqlhex(), hashed_txid.sqlhex()]; self.execute(sql, args).map_err(db_error::SqliteError)?; @@ -1522,7 +1523,7 @@ impl MemPoolDB { sql_tx.execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![fee_rate_f64, txid], + params![fee_rate_f64, txid.sqlhex()], )?; updated += 1; } @@ -1987,12 +1988,20 @@ impl MemPoolDB { } pub fn db_has_tx(conn: &DBConn, txid: &Txid) -> Result { - query_row(conn, "SELECT 1 FROM mempool WHERE txid = ?1", params![txid]) - .map(|row_opt: Option| row_opt.is_some()) + query_row( + conn, + "SELECT 1 FROM mempool WHERE txid = ?1", + params![txid.sqlhex()], + ) + .map(|row_opt: Option| row_opt.is_some()) } pub fn get_tx(conn: &DBConn, txid: &Txid) -> Result, db_error> { - query_row(conn, "SELECT * FROM mempool WHERE txid = ?1", params![txid]) + query_row( + conn, + "SELECT * FROM mempool WHERE txid = ?1", + params![txid.sqlhex()], + ) } /// Get all transactions across all tips @@ -2011,7 +2020,7 @@ impl MemPoolDB { block_header_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT * FROM mempool WHERE consensus_hash = ?1 AND block_header_hash = ?2"; - let args = params![consensus_hash, block_header_hash]; + let args = params![consensus_hash.sqlhex(), block_header_hash.sqlhex()]; let rows = query_rows::(conn, sql, args)?; Ok(rows.len()) } @@ -2028,8 +2037,8 @@ impl MemPoolDB { let sql = "SELECT * FROM mempool WHERE accept_time >= ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY tx_fee DESC LIMIT ?4"; let args = params![ u64_to_sql(timestamp)?, - consensus_hash, - block_header_hash, + consensus_hash.sqlhex(), + block_header_hash.sqlhex(), u64_to_sql(count)?, ]; let rows = query_rows::(conn, sql, args)?; @@ -2228,15 +2237,15 @@ impl MemPoolDB { VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; let args = params![ - txid, + txid.sqlhex(), origin_address.to_string(), u64_to_sql(origin_nonce)?, sponsor_address.to_string(), u64_to_sql(sponsor_nonce)?, u64_to_sql(tx_fee)?, u64_to_sql(length)?, - consensus_hash, - block_header_hash, + consensus_hash.sqlhex(), + block_header_hash.sqlhex(), u64_to_sql(coinbase_height)?, u64_to_sql(get_epoch_time_secs())?, tx_bytes, @@ -2427,7 +2436,7 @@ impl MemPoolDB { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![fee_rate_estimate, txid], + params![fee_rate_estimate, txid.sqlhex()], ) .map_err(db_error::from)?; @@ -2599,7 +2608,7 @@ impl MemPoolDB { pub fn inner_blacklist_txs(tx: &DBTx<'_>, txids: &[Txid], now: u64) -> Result<(), db_error> { for txid in txids { let sql = "INSERT OR REPLACE INTO tx_blacklist (txid, arrival_time) VALUES (?1, ?2)"; - let args = params![txid, &u64_to_sql(now)?]; + let args = params![txid.sqlhex(), &u64_to_sql(now)?]; tx.execute(sql, args)?; } Ok(()) @@ -2628,7 +2637,10 @@ impl MemPoolDB { params![u64_to_sql(to_delete)?], )?; for txid in txids.into_iter() { - tx.execute("DELETE FROM tx_blacklist WHERE txid = ?1", params![txid])?; + tx.execute( + "DELETE FROM tx_blacklist WHERE txid = ?1", + params![txid.sqlhex()], + )?; } } Ok(()) @@ -2640,7 +2652,7 @@ impl MemPoolDB { txid: &Txid, ) -> Result, db_error> { let sql = "SELECT arrival_time FROM tx_blacklist WHERE txid = ?1"; - let args = params![txid]; + let args = params![txid.sqlhex()]; query_row(conn, sql, args) } @@ -2672,7 +2684,7 @@ impl MemPoolDB { fn inner_drop_txs(tx: &DBTx<'_>, txids: &[Txid]) -> Result<(), db_error> { let sql = "DELETE FROM mempool WHERE txid = ?"; for txid in txids.iter() { - tx.execute(sql, &[txid])?; + tx.execute(sql, params![txid.sqlhex()])?; } Ok(()) } @@ -2693,7 +2705,7 @@ impl MemPoolDB { for (txid, time_estimate_ms) in txs.iter() { mempool_tx .tx - .execute(sql, params![time_estimate_ms, txid])?; + .execute(sql, params![time_estimate_ms, txid.sqlhex()])?; } mempool_tx.commit()?; @@ -2826,7 +2838,7 @@ impl MemPoolDB { /// Get the hashed txid for a txid pub fn get_randomized_txid(&self, txid: &Txid) -> Result, db_error> { let sql = "SELECT hashed_txid FROM randomized_txids WHERE txid = ?1 LIMIT 1"; - let args = params![txid]; + let args = params![txid.sqlhex()]; query_row(self.conn(), sql, args) } @@ -2874,7 +2886,7 @@ impl MemPoolDB { ORDER BY randomized_txids.hashed_txid ASC LIMIT ?3"; let args = params![ - last_randomized_txid, + last_randomized_txid.sqlhex(), u64_to_sql(coinbase_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, u64_to_sql(max_run)?, ]; @@ -2918,7 +2930,7 @@ impl MemPoolDB { continue; } - let tx_bytes: Vec = row.get_unwrap("tx"); + let tx_bytes: Vec = row.get("tx")?; let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]) .map_err(|_e| db_error::ParseError)?; @@ -2971,7 +2983,7 @@ pub fn try_flush_considered_txs( let db_tx = conn.transaction()?; for txid in considered_txs { - match db_tx.execute(sql, params![txid]) { + match db_tx.execute(sql, params![txid.sqlhex()]) { Ok(_) => {} Err(rusqlite::Error::SqliteFailure(err, _)) if err.code == rusqlite::ErrorCode::ConstraintViolation => diff --git a/stackslib/src/core/test_util.rs b/stackslib/src/core/test_util.rs index 43ac1357eae..675c37d9454 100644 --- a/stackslib/src/core/test_util.rs +++ b/stackslib/src/core/test_util.rs @@ -9,6 +9,7 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::tests::BurnStateDB; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; +use stacks_common::util::db::SqlEncoded; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; @@ -506,15 +507,15 @@ pub fn insert_tx_in_mempool( .txid() }; let args = rusqlite::params![ - txid, + txid.sqlhex(), origin_addr_str, origin_nonce, origin_addr_str, origin_nonce, fee, length, - consensus_hash, - block_header_hash, + consensus_hash.sqlhex(), + block_header_hash.sqlhex(), height, Utc::now().timestamp(), tx_hex, diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 3e0a5224c30..a821a7451a4 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -59,6 +59,7 @@ use crate::core::mempool::{ }; use crate::core::test_util::{insert_tx_in_mempool, make_stacks_transfer_serialized, to_addr}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; +use crate::stacks_common::util::db::SqlEncoded; use crate::util_lib::bloom::test::setup_bloom_counter; use crate::util_lib::bloom::*; use crate::util_lib::db::tx_begin_immediate; @@ -614,7 +615,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![Some(123.0), txid], + params![Some(123.0), txid.sqlhex()], ) .unwrap(); } else { @@ -622,7 +623,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![none, txid], + params![none, txid.sqlhex()], ) .unwrap(); } @@ -1159,7 +1160,7 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![Some(123.0), txid], + params![Some(123.0), txid.sqlhex()], ) .unwrap(); } else { @@ -1167,7 +1168,7 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![none, txid], + params![none, txid.sqlhex()], ) .unwrap(); } diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index c2cf3e68f13..ceaff61c98e 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -19,8 +19,9 @@ use std::path::PathBuf; use std::{fmt, fs}; use clarity::vm::costs::ExecutionCost; -use rusqlite::{OpenFlags, OptionalExtension}; +use rusqlite::{params, OpenFlags, OptionalExtension}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::uint::{Uint256, Uint512}; use crate::burnchains::{BurnchainSigner, Txid}; @@ -217,7 +218,7 @@ fn txid_tracking_db_contains(conn: &DBConn, txid: &Txid) -> Result(&self.conn, qry, args)?; let mut bool_vector = vec![true; AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE as usize]; @@ -525,7 +526,7 @@ impl AtlasDB { let res = tx.execute( "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 0, ?)", params![ - attachment.hash(), + attachment.hash().sqlhex(), attachment.content, now, ], @@ -581,11 +582,11 @@ impl AtlasDB { let tx = self.tx_begin()?; tx.execute( "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", - params![attachment.hash(), attachment.content, now], + params![attachment.hash().sqlhex(), attachment.content, now], )?; tx.execute( "UPDATE attachment_instances SET is_available = 1 WHERE content_hash = ?1 AND status = ?2", - params![attachment.hash(), AttachmentInstanceStatus::Checked], + params![attachment.hash().sqlhex(), AttachmentInstanceStatus::Checked], )?; tx.commit()?; Ok(()) @@ -692,7 +693,7 @@ impl AtlasDB { params![ AttachmentInstanceStatus::Checked, is_available, - attachment.index_block_hash, + attachment.index_block_hash.sqlhex(), attachment.contract_id.to_string(), attachment.attachment_index, ], @@ -716,15 +717,15 @@ impl AtlasDB { metadata, contract_id, tx_id, status) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", params![ - attachment.content_hash, + attachment.content_hash.sqlhex(), now, - attachment.index_block_hash, + attachment.index_block_hash.sqlhex(), attachment.attachment_index, u64_to_sql(attachment.stacks_block_height)?, is_available, attachment.metadata, attachment.contract_id.to_string(), - attachment.tx_id, + attachment.tx_id.sqlhex(), status ], )?; diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 2c83c8584f1..186980ec7d7 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -21,6 +21,7 @@ use clarity::vm::types::QualifiedContractIdentifier; use rusqlite::params; use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::net::PeerHost; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::hash::Hash160; use super::download::{ @@ -818,15 +819,15 @@ fn schema_2_migration() { metadata, contract_id, tx_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", params![ - attachment.content_hash, + attachment.content_hash.sqlhex(), 0, - attachment.index_block_hash, + attachment.index_block_hash.sqlhex(), attachment.attachment_index, u64_to_sql(attachment.stacks_block_height).unwrap(), true, attachment.metadata, attachment.contract_id.to_string(), - attachment.tx_id, + attachment.tx_id.sqlhex(), ], ) .unwrap(); diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index da5129da371..041faa05306 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -21,6 +21,7 @@ use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; use rusqlite::{params, OpenFlags, OptionalExtension, Row}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::db::SqlEncoded; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; @@ -259,8 +260,8 @@ impl StackerDBTx<'_> { NO_VERSION, 0, vec![], - Sha512Trunc256Sum([0u8; 32]), - MessageSignature::empty(), + Sha512Trunc256Sum([0u8; 32]).sqlhex(), + MessageSignature::empty().sqlhex(), ]; stmt.execute(args)?; @@ -338,8 +339,8 @@ impl StackerDBTx<'_> { NO_VERSION, 0, vec![], - Sha512Trunc256Sum([0u8; 32]), - MessageSignature::empty(), + Sha512Trunc256Sum([0u8; 32]).sqlhex(), + MessageSignature::empty().sqlhex(), ]; stmt.execute(args)?; @@ -383,8 +384,8 @@ impl StackerDBTx<'_> { let args = params![ slot_desc.slot_version, - Sha512Trunc256Sum::from_data(chunk), - slot_desc.signature, + Sha512Trunc256Sum::from_data(chunk).sqlhex(), + slot_desc.signature.sqlhex(), chunk, u64_to_sql(get_epoch_time_secs())?, stackerdb_id, diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index bb396e0e97d..2004ab657de 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -26,6 +26,7 @@ use rusqlite::{ Transaction, TransactionBehavior, }; use serde_json::Error as serde_error; +use stacks_common::codec::Error as CodecError; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::Address; @@ -80,6 +81,8 @@ pub enum Error { OldSchema(u64), /// Database is too old for epoch TooOldForEpoch, + /// Encoding error + CodecError(CodecError), /// Other error Other(String), } @@ -105,6 +108,9 @@ impl fmt::Display for Error { Error::TooOldForEpoch => { write!(f, "Database is not compatible with current system epoch") } + Error::CodecError(ref e) => { + write!(f, "Codec error: {e:?}") + } Error::Other(ref s) => fmt::Display::fmt(s, f), } } @@ -129,6 +135,7 @@ impl error::Error for Error { Error::IndexError(ref e) => Some(e), Error::OldSchema(ref _s) => None, Error::TooOldForEpoch => None, + Error::CodecError(ref e) => Some(e), Error::Other(ref _s) => None, } } @@ -155,6 +162,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: CodecError) -> Self { + Self::CodecError(e) + } +} + pub trait FromRow { fn from_row(row: &Row) -> Result; } @@ -309,39 +322,6 @@ macro_rules! impl_byte_array_from_column_only { impl_byte_array_from_column_only!(SortitionId); impl_byte_array_from_column_only!(StacksBlockId); -macro_rules! impl_byte_array_from_column { - ($thing:ident) => { - impl rusqlite::types::FromSql for $thing { - fn column_result( - value: rusqlite::types::ValueRef, - ) -> rusqlite::types::FromSqlResult { - let hex_str = value.as_str()?; - let byte_str = stacks_common::util::hash::hex_bytes(hex_str) - .map_err(|_e| rusqlite::types::FromSqlError::InvalidType)?; - let inst = $thing::from_bytes(&byte_str) - .ok_or(rusqlite::types::FromSqlError::InvalidType)?; - Ok(inst) - } - } - - impl crate::util_lib::db::FromColumn<$thing> for $thing { - fn from_column( - row: &rusqlite::Row, - column_name: &str, - ) -> Result { - Ok(row.get::<_, Self>(column_name)?) - } - } - - impl rusqlite::types::ToSql for $thing { - fn to_sql(&self) -> rusqlite::Result> { - let hex_str = self.to_hex(); - Ok(hex_str.into()) - } - } - }; -} - /// Load the path of the database from the connection #[cfg(test)] fn get_db_path(conn: &Connection) -> Result {