-
Notifications
You must be signed in to change notification settings - Fork 172
Expand file tree
/
Copy pathcall_tests.rs
More file actions
126 lines (118 loc) · 5.62 KB
/
call_tests.rs
File metadata and controls
126 lines (118 loc) · 5.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
use sov_modules_api::capabilities::UniquenessData;
use sov_modules_api::macros::config_value;
use sov_modules_api::{CredentialId, TxEffect};
use sov_test_utils::{BatchType, SlotInput, TransactionTestCase, TxProcessingError};
use sov_uniqueness::Uniqueness;
use crate::runtime::S;
use crate::utils::{generate_value_setter_tx, setup};
/// This test verifies that the `MAX_STORED_TX_HASHES_PER_CREDENTIAL` limit is respected and that authentication succeeds when that limit is not exceeded.
#[test]
fn test_max_stored_tx_hashes_per_credential_lite() {
// This checks that the "MAX_STORED_TX_HASHES_PER_CREDENTIAL" constant is respected
// and behaves as expected. However, it cannot detect whether your constants.toml configuration
// makes it possible to exceed `MAX_SEQUENCER_EXEC_GAS_PER_TX` before hitting `MAX_STORED_TX_HASHES_PER_CREDENTIAL`,
// which would put your account in an unrecoverable state.
std::env::set_var(
"SOV_TEST_CONST_OVERRIDE_MAX_STORED_TX_HASHES_PER_CREDENTIAL",
"11",
);
do_max_stored_tx_hashes_per_credential_test()
}
#[test]
#[ignore = "This test is long-running, but can detect issues in constants.toml. Run before deploying."]
/// This test checks an account hits `MAX_STORED_TX_HASHES_PER_CREDENTIAL` before maxxing
/// out `MAX_SEQUENCER_EXEC_GAS_PER_TX` for the uniqueness check. This ensures that accounts
/// cannot be put into an unrecoverable state by failing to increment their generation number
/// in a timely manner under the real constants.toml configuration.
///
/// Along the way, this test also checks that the `MAX_STORED_TX_HASHES_PER_CREDENTIAL` limit behaves as expected.
fn test_max_stored_tx_hashes_per_credential() {
do_max_stored_tx_hashes_per_credential_test()
}
/// This function generates a number of transactions that will fill up the "bucket" of stored transaction hashes
/// for a given account. Then it tries to send one more transaction with a current generation number and verifies that
/// the tx is rejected because the bucket is full. Finally, it updates the generation number and verifies that the
/// next transaction is accepted.
fn do_max_stored_tx_hashes_per_credential_test() {
let (admin, mut runner, _) = setup();
let admin_credential_id: CredentialId = admin.credential_id();
runner.query_visible_state(|state| {
assert_eq!(
Uniqueness::<S>::default()
.next_generation(&admin_credential_id, state)
.unwrap(),
0,
"The next generation for a new account should start at 0"
);
});
let max_stored_tx_hashes_per_credential = config_value!("MAX_STORED_TX_HASHES_PER_CREDENTIAL");
let num_generations = config_value!("PAST_TRANSACTION_GENERATIONS");
let txs_per_generation = max_stored_tx_hashes_per_credential / num_generations;
let extra_txs_in_first_generation = max_stored_tx_hashes_per_credential % num_generations;
let mut txs = vec![];
// Generate txs to fill up our "bucket" of stored transaction hashes.
for i in 0..txs_per_generation {
for generation in 0..num_generations {
txs.push(generate_value_setter_tx(
UniquenessData::Generation(generation),
i as u32,
&admin,
));
}
}
// We divided txs evenly across generations - if there was a remainder, account for it by putting the
// extra txs in the first bucket.
for i in 0..extra_txs_in_first_generation {
txs.push(generate_value_setter_tx(
UniquenessData::Generation(0),
(i + txs_per_generation) as u32,
&admin,
))
}
// Execute all the txs in one batch. This is much faster than executing them one by one.
let batch = SlotInput::Batch(BatchType::from(txs));
let (slot, _) = runner.execute(batch);
assert_eq!(
slot.batch_receipts[0].tx_receipts.len(),
max_stored_tx_hashes_per_credential as usize
);
for (i, tx_receipt) in slot.batch_receipts[0].tx_receipts.iter().enumerate() {
assert!(
tx_receipt.receipt.is_successful(),
"Transaction {i} should be successful but failed"
);
}
// Send one more transaction with a current generation number.
// This transaction should be skipped because it would cause the bucket to overflow.
runner.execute_transaction(TransactionTestCase {
input: generate_value_setter_tx(UniquenessData::Generation(0), u32::MAX, &admin),
assert: Box::new(move |ctx, _| {
let TxEffect::Skipped(skipped) = ctx.tx_receipt else {
panic!("Transaction should be skipped");
};
match skipped.error {
TxProcessingError::CheckUniquenessFailed(reason) => {
assert!(reason.contains("Too many transactions for credential_id"));
}
_ => {
panic!("Transaction should be rejected because it's not unique");
}
}
}),
});
// Increment the generation number. Now the transaction should be accepted because it won't cause the bucket to overflow.
// Note that we need to add 1 to the number of generations because we have a strict inequality comparison for buckets.
runner.execute_transaction(TransactionTestCase {
input: generate_value_setter_tx(
UniquenessData::Generation(num_generations + 1),
txs_per_generation as u32,
&admin,
),
assert: Box::new(move |ctx, _| {
assert!(
ctx.tx_receipt.is_successful(),
"Transaction should be successful"
);
}),
});
}