Skip to content

Commit ff57dc6

Browse files
authored
Merge pull request #5176 from stacks-network/release/2.5.0.0.7
merge 2.5.0.0.7 release branch to develop
2 parents 75d7e27 + 0029861 commit ff57dc6

File tree

4 files changed

+136
-1
lines changed

4 files changed

+136
-1
lines changed

CHANGELOG.md

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,28 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE
1717
- `get-tenure-info?` added
1818
- `get-block-info?` removed
1919

20+
## [2.5.0.0.7]
21+
22+
### Added
23+
24+
- Add warn logs for block validate rejections (#5079)
25+
- Neon mock miner replay (#5060)
26+
27+
### Changed
28+
29+
- Revert BurnchainHeaderHash serialization change (#5094)
30+
- boot_to_epoch_3 in SignerTest should wait for a new commit (#5087)
31+
- Fix block proposal rejection test (#5084)
32+
- Mock signing revamp (#5070)
33+
- Multi miner fixes jude (#5040)
34+
- Remove spurious deadlock condition whenever the sortition DB is opened
35+
36+
## [2.5.0.0.6]
37+
38+
### Changed
39+
40+
- If there is a getchunk/putchunk that fails due to a stale (or future) version NACK, the StackerDB sync state machine should immediately retry sync (#5066)
41+
2042
## [2.5.0.0.5]
2143

2244
### Added

stackslib/src/net/stackerdb/db.rs

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -293,6 +293,15 @@ impl<'a> StackerDBTx<'a> {
293293
Ok(())
294294
}
295295

296+
/// Shrink a StackerDB. Remove all slots at and beyond a particular slot ID.
297+
fn shrink_stackerdb(&self, stackerdb_id: i64, first_slot_id: u32) -> Result<(), net_error> {
298+
let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1 AND slot_id >= ?2";
299+
let args = params![&stackerdb_id, &first_slot_id];
300+
let mut stmt = self.sql_tx.prepare(&qry)?;
301+
stmt.execute(args)?;
302+
Ok(())
303+
}
304+
296305
/// Update a database's storage slots, e.g. from new configuration state in its smart contract.
297306
/// Chunk data for slots that no longer exist will be dropped.
298307
/// Newly-created slots will be instantiated with empty data.
@@ -343,6 +352,8 @@ impl<'a> StackerDBTx<'a> {
343352
stmt.execute(args)?;
344353
}
345354
}
355+
debug!("Shrink {} to {} slots", smart_contract, total_slots_read);
356+
self.shrink_stackerdb(stackerdb_id, total_slots_read)?;
346357
Ok(())
347358
}
348359

stackslib/src/net/stackerdb/mod.rs

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,8 +341,14 @@ impl StackerDBs {
341341
&e
342342
);
343343
}
344-
} else if new_config != stackerdb_config && new_config.signers.len() > 0 {
344+
} else if (new_config != stackerdb_config && new_config.signers.len() > 0)
345+
|| (new_config == stackerdb_config
346+
&& new_config.signers.len()
347+
!= self.get_slot_versions(&stackerdb_contract_id)?.len())
348+
{
345349
// only reconfigure if the config has changed
350+
// (that second check on the length is needed in case the node is a victim of
351+
// #5142, which was a bug whereby a stackerdb could never shrink)
346352
if let Err(e) = self.reconfigure_stackerdb(&stackerdb_contract_id, &new_config) {
347353
warn!(
348354
"Failed to create or reconfigure StackerDB {stackerdb_contract_id}: DB error {:?}",

stackslib/src/net/stackerdb/tests/db.rs

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ use std::path::Path;
2020
use clarity::vm::types::QualifiedContractIdentifier;
2121
use clarity::vm::ContractName;
2222
use libstackerdb::SlotMetadata;
23+
use rusqlite::params;
2324
use stacks_common::address::{
2425
AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG,
2526
};
@@ -649,6 +650,16 @@ fn test_reconfigure_stackerdb() {
649650
initial_metadata.push((slot_metadata, chunk_data));
650651
}
651652

653+
tx.commit().unwrap();
654+
655+
let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap();
656+
assert_eq!(db_slot_metadata.len(), pks.len());
657+
for (i, slot_md) in db_slot_metadata.iter().enumerate() {
658+
let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap();
659+
assert_eq!(slot_metadata, *slot_md);
660+
}
661+
662+
let tx = db.tx_begin(StackerDBConfig::noop()).unwrap();
652663
let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect();
653664
let reconfigured_pks = vec![
654665
// first five slots are unchanged
@@ -722,6 +733,91 @@ fn test_reconfigure_stackerdb() {
722733
assert_eq!(chunk.len(), 0);
723734
}
724735
}
736+
737+
let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap();
738+
assert_eq!(db_slot_metadata.len(), reconfigured_pks.len());
739+
for (i, slot_md) in db_slot_metadata.iter().enumerate() {
740+
let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap();
741+
assert_eq!(slot_metadata, *slot_md);
742+
}
743+
744+
// reconfigure with fewer slots
745+
let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect();
746+
let reconfigured_pks = vec![
747+
// first five slots are unchanged
748+
pks[0], pks[1], pks[2], pks[3], pks[4],
749+
// next five slots are different, so their contents will be dropped and versions and write
750+
// timestamps reset
751+
new_pks[0], new_pks[1], new_pks[2], new_pks[3],
752+
new_pks[4],
753+
// slots 10-15 will disappear
754+
];
755+
let reconfigured_addrs: Vec<_> = reconfigured_pks
756+
.iter()
757+
.map(|pk| {
758+
StacksAddress::from_public_keys(
759+
C32_ADDRESS_VERSION_MAINNET_SINGLESIG,
760+
&AddressHashMode::SerializeP2PKH,
761+
1,
762+
&vec![StacksPublicKey::from_private(&pk)],
763+
)
764+
.unwrap()
765+
})
766+
.collect();
767+
768+
let tx = db.tx_begin(StackerDBConfig::noop()).unwrap();
769+
770+
// reconfigure
771+
tx.reconfigure_stackerdb(
772+
&sc,
773+
&reconfigured_addrs
774+
.clone()
775+
.into_iter()
776+
.map(|addr| (addr, 1))
777+
.collect::<Vec<_>>(),
778+
)
779+
.unwrap();
780+
781+
tx.commit().unwrap();
782+
783+
for (i, pk) in new_pks.iter().enumerate() {
784+
if i < 5 {
785+
// first five are unchanged
786+
let chunk_data = StackerDBChunkData {
787+
slot_id: i as u32,
788+
slot_version: 1,
789+
sig: MessageSignature::empty(),
790+
data: vec![i as u8; 128],
791+
};
792+
793+
let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap();
794+
let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap();
795+
796+
assert_eq!(initial_metadata[i].0, slot_metadata);
797+
assert_eq!(initial_metadata[i].1.data, chunk);
798+
} else if i < 10 {
799+
// next five are wiped
800+
let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap();
801+
assert_eq!(slot_metadata.slot_id, i as u32);
802+
assert_eq!(slot_metadata.slot_version, 0);
803+
assert_eq!(slot_metadata.data_hash, Sha512Trunc256Sum([0x00; 32]));
804+
assert_eq!(slot_metadata.signature, MessageSignature::empty());
805+
806+
let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap();
807+
assert_eq!(chunk.len(), 0);
808+
} else {
809+
// final five are gone
810+
let slot_metadata_opt = db.get_slot_metadata(&sc, i as u32).unwrap();
811+
assert!(slot_metadata_opt.is_none());
812+
}
813+
}
814+
815+
let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap();
816+
assert_eq!(db_slot_metadata.len(), reconfigured_pks.len());
817+
for (i, slot_md) in db_slot_metadata.iter().enumerate() {
818+
let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap();
819+
assert_eq!(slot_metadata, *slot_md);
820+
}
725821
}
726822

727823
// TODO: max chunk size

0 commit comments

Comments
 (0)