Skip to content

Commit 8cb3e0a

Browse files
committed
chore: address PR feedback
1 parent b831b1b commit 8cb3e0a

File tree

5 files changed

+26
-120
lines changed

5 files changed

+26
-120
lines changed

stackslib/src/net/relay.rs

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1725,30 +1725,25 @@ impl Relayer {
17251725
if let Some(config) = stackerdb_configs.get(sc) {
17261726
let tx = stackerdbs.tx_begin(config.clone())?;
17271727
for sync_result in sync_results {
1728-
debug!(
1729-
"Will store {} chunks for {}",
1730-
&sync_result.chunks_to_store.len(),
1731-
sc
1732-
);
17331728
for chunk in sync_result.chunks_to_store.iter() {
17341729
let md = chunk.get_slot_metadata();
17351730
if let Err(e) = tx.try_replace_chunk(sc, &md, &chunk.data) {
17361731
warn!(
1737-
"Failed to store chunk for slot {}.{} ({} bytes) for {}: {:?}",
1738-
&md.slot_id,
1739-
md.slot_version,
1740-
chunk.data.len(),
1741-
sc,
1742-
&e
1732+
"Failed to store chunk for StackerDB";
1733+
"stackerdb_contract_id" => &format!("{}", &sync_result.contract_id),
1734+
"slot_id" => md.slot_id,
1735+
"slot_version" => md.slot_version,
1736+
"num_bytes" => chunk.data.len(),
1737+
"error" => %e
17431738
);
17441739
} else {
1745-
debug!("Stored chunk {}/{}.{}", sc, md.slot_id, md.slot_version);
1740+
debug!("Stored chunk"; "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), "slot_id" => md.slot_id, "slot_version" => md.slot_version);
17461741
}
17471742
}
17481743
}
17491744
tx.commit()?;
17501745
} else {
1751-
info!("Got chunks for unconfigured smart contract {}", sc);
1746+
info!("Got chunks for unconfigured StackerDB replica"; "stackerdb_contract_id" => &format!("{}", &sc));
17521747
}
17531748
}
17541749

@@ -1764,23 +1759,16 @@ impl Relayer {
17641759
) -> Result<(), Error> {
17651760
// synthesize StackerDBSyncResults from each chunk
17661761
let mut sync_results = vec![];
1767-
let nks: Vec<NeighborKey> = unhandled_messages.keys().map(|nk| nk.clone()).collect();
1768-
for nk in nks.into_iter() {
1769-
let mut msgs = unhandled_messages
1770-
.remove(&nk)
1771-
.expect("BUG: hashmap key not mapped");
1772-
1762+
for (_nk, msgs) in unhandled_messages.iter_mut() {
17731763
msgs.retain(|msg| {
17741764
if let StacksMessageType::StackerDBPushChunk(data) = &msg.payload {
17751765
let sync_result = StackerDBSyncResult::from_pushed_chunk(data.clone());
17761766
sync_results.push(sync_result);
1777-
true
1778-
} else {
17791767
false
1768+
} else {
1769+
true
17801770
}
17811771
});
1782-
1783-
unhandled_messages.insert(nk.clone(), msgs);
17841772
}
17851773

17861774
Relayer::process_stacker_db_chunks(stackerdbs, stackerdb_configs, &sync_results)

stackslib/src/net/stackerdb/db.rs

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -359,11 +359,6 @@ impl<'a> StackerDBTx<'a> {
359359
slot_validation.version,
360360
));
361361
}
362-
if slot_validation.write_time + self.config.write_freq >= get_epoch_time_secs() {
363-
return Err(net_error::TooFrequentSlotWrites(
364-
slot_validation.write_time + self.config.write_freq,
365-
));
366-
}
367362
self.insert_chunk(smart_contract, slot_desc, chunk)
368363
}
369364
}

stackslib/src/net/stackerdb/mod.rs

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -458,34 +458,6 @@ impl PeerNetwork {
458458
return Ok(false);
459459
}
460460

461-
// validate -- must not be too bursty
462-
let slot_validation = match self
463-
.stackerdbs
464-
.get_slot_validation(&chunk_data.contract_id, chunk_data.chunk_data.slot_id)?
465-
{
466-
Some(validation) => validation,
467-
None => {
468-
info!(
469-
"StackerDBChunk for {} ID {} has no validation data",
470-
&chunk_data.contract_id, chunk_data.chunk_data.slot_id
471-
);
472-
return Ok(false);
473-
}
474-
};
475-
476-
if slot_validation.write_time + stackerdb_config.write_freq >= get_epoch_time_secs()
477-
{
478-
info!(
479-
"Write frequency exceeded for StackerDBChunk for {} ID {} version {} (expect writes after {})",
480-
&chunk_data.contract_id,
481-
chunk_data.chunk_data.slot_id,
482-
chunk_data.chunk_data.slot_version,
483-
slot_validation.write_time + stackerdb_config.write_freq
484-
);
485-
return Ok(false);
486-
}
487-
488-
// great, we can accept this.
489461
// patch inventory -- we'll accept this chunk
490462
data.slot_versions[chunk_data.chunk_data.slot_id as usize] =
491463
chunk_data.chunk_data.slot_version;

stackslib/src/net/stackerdb/sync.rs

Lines changed: 15 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,8 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
8282
Ok(dbsync)
8383
}
8484

85-
/// Reset this state machine, and get the final result
85+
/// Reset this state machine, and get the StackerDBSyncResult with newly-obtained chunk data
86+
/// and newly-learned information about broken and dead peers.
8687
pub fn reset(
8788
&mut self,
8889
network: Option<&PeerNetwork>,
@@ -155,6 +156,8 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
155156
/// Given the downloaded set of chunk inventories, identify:
156157
/// * which chunks we need to fetch, because they're newer than ours.
157158
/// * what order to fetch chunks in, in rarest-first order
159+
/// Returns a list of (chunk requests, list of neighbors that can service them), which is
160+
/// ordered from rarest chunk to most-common chunk.
158161
pub fn make_chunk_request_schedule(
159162
&self,
160163
network: &PeerNetwork,
@@ -246,6 +249,8 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
246249
.collect();
247250

248251
schedule.sort_by(|item_1, item_2| item_1.1.len().cmp(&item_2.1.len()));
252+
schedule.reverse();
253+
249254
test_debug!(
250255
"{:?}: Will request up to {} chunks for {}",
251256
network.get_local_peer(),
@@ -268,7 +273,7 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
268273
let mut need_chunks: HashMap<usize, (StackerDBPushChunkData, Vec<NeighborAddress>)> =
269274
HashMap::new();
270275

271-
// who has data we need?
276+
// who needs data we can serve?
272277
for (i, local_version) in local_slot_versions.iter().enumerate() {
273278
let mut local_chunk = None;
274279
for (naddr, chunk_inv) in self.chunk_invs.iter() {
@@ -301,7 +306,7 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
301306
});
302307
}
303308

304-
let our_chunk = if let Some(chunk) = local_chunk.take() {
309+
let our_chunk = if let Some(chunk) = local_chunk.as_ref() {
305310
chunk
306311
} else {
307312
// we don't have this chunk
@@ -316,7 +321,6 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
316321
};
317322

318323
if !do_replicate {
319-
local_chunk = Some(our_chunk);
320324
continue;
321325
}
322326

@@ -328,7 +332,6 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
328332
// Add a record for it.
329333
need_chunks.insert(i, (our_chunk.clone(), vec![naddr.clone()]));
330334
};
331-
local_chunk = Some(our_chunk);
332335
}
333336
}
334337

@@ -340,6 +343,7 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
340343
.collect();
341344

342345
schedule.sort_by(|item_1, item_2| item_1.1.len().cmp(&item_2.1.len()));
346+
schedule.reverse();
343347
test_debug!(
344348
"{:?}: Will push up to {} chunks for {}",
345349
network.get_local_peer(),
@@ -383,29 +387,8 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
383387
self.downloaded_chunks.insert(naddr.clone(), vec![data]);
384388
}
385389

386-
// yes, this is a linear scan. But because the number of chunks in the DB is a small O(1)
387-
// enforced by the protocol, this isn't a big deal.
388-
// This loop is only expected to run once, but is in place for defensive purposes.
389-
loop {
390-
let mut remove_idx = None;
391-
for (i, (chunk, ..)) in self.chunk_fetch_priorities.iter().enumerate() {
392-
if chunk.slot_id == slot_id {
393-
remove_idx = Some(i);
394-
break;
395-
}
396-
}
397-
if let Some(remove_idx) = remove_idx {
398-
test_debug!(
399-
"Downloaded chunk {}.{} from {:?}",
400-
slot_id,
401-
_slot_version,
402-
&naddr
403-
);
404-
self.chunk_fetch_priorities.remove(remove_idx);
405-
} else {
406-
break;
407-
}
408-
}
390+
self.chunk_fetch_priorities.retain(|(chunk, ..)| chunk.slot_id != slot_id);
391+
409392
if self.chunk_fetch_priorities.len() > 0 {
410393
let next_chunk_fetch_priority =
411394
self.next_chunk_fetch_priority % self.chunk_fetch_priorities.len();
@@ -422,33 +405,11 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
422405
naddr: NeighborAddress,
423406
new_inv: StackerDBChunkInvData,
424407
slot_id: u32,
425-
slot_version: u32,
426408
) {
427409
self.chunk_invs.insert(naddr.clone(), new_inv);
428410

429-
// yes, this is a linear scan. But because the number of chunks in the DB is a small O(1)
430-
// enforced by the protocol, this isn't a big deal.
431-
// This loop is only expected to run once, but is in place for defensive purposes.
432-
loop {
433-
let mut remove_idx = None;
434-
for (i, (chunk, ..)) in self.chunk_push_priorities.iter().enumerate() {
435-
if chunk.chunk_data.slot_id == slot_id {
436-
remove_idx = Some(i);
437-
break;
438-
}
439-
}
440-
if let Some(remove_idx) = remove_idx {
441-
test_debug!(
442-
"Pushed chunk {}.{} from {:?}",
443-
slot_id,
444-
slot_version,
445-
&naddr
446-
);
447-
self.chunk_push_priorities.remove(remove_idx);
448-
} else {
449-
break;
450-
}
451-
}
411+
self.chunk_push_priorities.retain(|(chunk, ..)| chunk.chunk_data.slot_id != slot_id);
412+
452413
if self.chunk_push_priorities.len() > 0 {
453414
let next_chunk_push_priority =
454415
self.next_chunk_push_priority % self.chunk_push_priorities.len();
@@ -996,8 +957,8 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
996957
&naddr
997958
);
998959

999-
if let Some((slot_id, slot_version)) = self.chunk_push_receipts.get(&naddr) {
1000-
self.add_pushed_chunk(naddr, new_chunk_inv, *slot_id, *slot_version);
960+
if let Some((slot_id, _)) = self.chunk_push_receipts.get(&naddr) {
961+
self.add_pushed_chunk(naddr, new_chunk_inv, *slot_id);
1001962
}
1002963
}
1003964

stackslib/src/net/stackerdb/tests/db.rs

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -513,16 +513,6 @@ fn test_stackerdb_insert_query_chunks() {
513513
);
514514
panic!("Did not get BadSlotSigner");
515515
}
516-
517-
// should fail -- throttled
518-
chunk_data.sign(&pk).unwrap();
519-
if let Err(net_error::TooFrequentSlotWrites(..)) =
520-
tx.try_replace_chunk(&sc, &chunk_data.get_slot_metadata(), &chunk_data.data)
521-
{
522-
chunk_data.slot_version -= 1;
523-
} else {
524-
panic!("Did not get TooFrequentSlotWrites");
525-
}
526516
}
527517

528518
tx.commit().unwrap();

0 commit comments

Comments
 (0)