|
16 | 16 | // You should have received a copy of the GNU General Public License |
17 | 17 | // along with this program. If not, see <https://www.gnu.org/licenses/>. |
18 | 18 |
|
19 | | -// Testsuite of fatp integration tests. |
| 19 | +//! Testsuite of transaction pool integration tests. |
| 20 | +
|
20 | 21 | pub mod zombienet; |
21 | 22 |
|
| 23 | +use std::time::Duration; |
| 24 | + |
22 | 25 | use crate::zombienet::{ |
23 | | - default_zn_scenario_builder, |
24 | | - relaychain_rococo_local_network_spec::{ |
25 | | - parachain_asset_hub_network_spec::HIGH_POOL_LIMIT_FATP as PARACHAIN_HIGH_POOL_LIMIT_FATP, |
26 | | - HIGH_POOL_LIMIT_FATP as RELAYCHAIN_HIGH_POOL_LIMIT_FATP, |
27 | | - }, |
28 | | - NetworkSpawner, |
| 26 | + default_zn_scenario_builder, relaychain_rococo_local_network_spec as relay, |
| 27 | + relaychain_rococo_local_network_spec::parachain_asset_hub_network_spec as para, NetworkSpawner, |
29 | 28 | }; |
30 | | -use txtesttool::execution_log::ExecutionLog; |
| 29 | +use futures::future::join_all; |
| 30 | +use tracing::info; |
| 31 | +use txtesttool::{execution_log::ExecutionLog, scenario::ScenarioExecutor}; |
31 | 32 | use zombienet::DEFAULT_SEND_FUTURE_AND_READY_TXS_TESTS_TIMEOUT_IN_SECS; |
32 | 33 |
|
33 | 34 | // Test which sends future and ready txs from many accounts |
34 | 35 | // to an unlimited pool of a parachain collator based on the asset-hub-rococo runtime. |
35 | 36 | #[tokio::test(flavor = "multi_thread")] |
36 | 37 | #[ignore] |
37 | 38 | async fn send_future_and_ready_from_many_accounts_to_parachain() { |
38 | | - let net = NetworkSpawner::from_toml_with_env_logger(PARACHAIN_HIGH_POOL_LIMIT_FATP) |
| 39 | + let net = NetworkSpawner::from_toml_with_env_logger(para::HIGH_POOL_LIMIT_FATP) |
39 | 40 | .await |
40 | 41 | .unwrap(); |
41 | 42 |
|
@@ -86,7 +87,7 @@ async fn send_future_and_ready_from_many_accounts_to_parachain() { |
86 | 87 | #[tokio::test(flavor = "multi_thread")] |
87 | 88 | #[ignore] |
88 | 89 | async fn send_future_and_ready_from_many_accounts_to_relaychain() { |
89 | | - let net = NetworkSpawner::from_toml_with_env_logger(RELAYCHAIN_HIGH_POOL_LIMIT_FATP) |
| 90 | + let net = NetworkSpawner::from_toml_with_env_logger(relay::HIGH_POOL_LIMIT_FATP) |
90 | 91 | .await |
91 | 92 | .unwrap(); |
92 | 93 |
|
@@ -133,3 +134,241 @@ async fn send_future_and_ready_from_many_accounts_to_relaychain() { |
133 | 134 | assert_eq!(finalized_future, 10_000); |
134 | 135 | assert_eq!(finalized_ready, 10_000); |
135 | 136 | } |
| 137 | + |
| 138 | +// Test which sends 5m transactions to parachain. Long execution time expected. |
| 139 | +#[tokio::test(flavor = "multi_thread")] |
| 140 | +#[ignore] |
| 141 | +async fn send_5m_from_many_accounts_to_parachain() { |
| 142 | + let net = NetworkSpawner::from_toml_with_env_logger(para::HIGH_POOL_LIMIT_FATP) |
| 143 | + .await |
| 144 | + .unwrap(); |
| 145 | + |
| 146 | + // Wait for the parachain collator to start block production. |
| 147 | + net.wait_for_block_production("charlie").await.unwrap(); |
| 148 | + |
| 149 | + // Create txs executor. |
| 150 | + let ws = net.node_rpc_uri("charlie").unwrap(); |
| 151 | + let executor = default_zn_scenario_builder(&net) |
| 152 | + .with_rpc_uri(ws) |
| 153 | + .with_start_id(0) |
| 154 | + .with_last_id(999) |
| 155 | + .with_txs_count(5_000) |
| 156 | + .with_executor_id("txs-executor".to_string()) |
| 157 | + .with_send_threshold(7500) |
| 158 | + .build() |
| 159 | + .await; |
| 160 | + |
| 161 | + // Execute transactions and fetch the execution logs. |
| 162 | + let execution_logs = executor.execute().await; |
| 163 | + let finalized_txs = execution_logs.values().filter_map(|tx_log| tx_log.finalized()).count(); |
| 164 | + |
| 165 | + assert_eq!(finalized_txs, 5_000_000); |
| 166 | +} |
| 167 | + |
| 168 | +// Test which sends 5m transactions to relaychain. Long execution time expected. |
| 169 | +#[tokio::test(flavor = "multi_thread")] |
| 170 | +#[ignore] |
| 171 | +async fn send_5m_from_many_accounts_to_relaychain() { |
| 172 | + let net = NetworkSpawner::from_toml_with_env_logger(relay::HIGH_POOL_LIMIT_FATP) |
| 173 | + .await |
| 174 | + .unwrap(); |
| 175 | + |
| 176 | + // Wait for the parachain collator to start block production. |
| 177 | + net.wait_for_block_production("alice").await.unwrap(); |
| 178 | + |
| 179 | + // Create txs executor. |
| 180 | + let ws = net.node_rpc_uri("alice").unwrap(); |
| 181 | + let executor = default_zn_scenario_builder(&net) |
| 182 | + .with_rpc_uri(ws.clone()) |
| 183 | + .with_start_id(0) |
| 184 | + .with_last_id(999) |
| 185 | + .with_txs_count(5000) |
| 186 | + .with_executor_id("txs-executor".to_string()) |
| 187 | + .with_send_threshold(7500) |
| 188 | + .build() |
| 189 | + .await; |
| 190 | + |
| 191 | + // Execute transactions and fetch the execution logs. |
| 192 | + let execution_logs = executor.execute().await; |
| 193 | + let finalized_txs = execution_logs.values().filter_map(|tx_log| tx_log.finalized()).count(); |
| 194 | + |
| 195 | + assert_eq!(finalized_txs, 5_000_000); |
| 196 | +} |
| 197 | + |
| 198 | +/// Internal test that allows to observe how transcactions are gossiped in the network. Requires |
| 199 | +/// external tool to track transactions presence at nodes. Was used to evaluate some metrics of |
| 200 | +/// existing transaction protocol. |
| 201 | +#[tokio::test(flavor = "multi_thread")] |
| 202 | +#[ignore] |
| 203 | +async fn gossiping() { |
| 204 | + let net = NetworkSpawner::from_toml_with_env_logger(relay::HIGH_POOL_LIMIT_FATP_TRACE) |
| 205 | + .await |
| 206 | + .unwrap(); |
| 207 | + |
| 208 | + // Wait for the parachain collator to start block production. |
| 209 | + net.wait_for_block_production("a00").await.unwrap(); |
| 210 | + |
| 211 | + // Create the txs executor. |
| 212 | + let ws = net.node_rpc_uri("a00").unwrap(); |
| 213 | + let executor = default_zn_scenario_builder(&net) |
| 214 | + .with_rpc_uri(ws) |
| 215 | + .with_start_id(0) |
| 216 | + .with_last_id(999) |
| 217 | + .with_executor_id("txs-executor".to_string()) |
| 218 | + .build() |
| 219 | + .await; |
| 220 | + |
| 221 | + // Execute transactions and fetch the execution logs. |
| 222 | + let execution_logs = executor.execute().await; |
| 223 | + let finalized_txs = execution_logs.values().filter_map(|tx_log| tx_log.finalized()).count(); |
| 224 | + |
| 225 | + assert_eq!(finalized_txs, 1000); |
| 226 | + |
| 227 | + tracing::info!("BASEDIR: {:?}", net.base_dir_path()); |
| 228 | +} |
| 229 | + |
| 230 | +/// Creates new transaction scenario executor and sends given batch of ready transactions to the |
| 231 | +/// specified node. Single transaction is sent from single account. |
| 232 | +async fn send_batch( |
| 233 | + net: &NetworkSpawner, |
| 234 | + node_name: &str, |
| 235 | + from: u32, |
| 236 | + to: u32, |
| 237 | + prio: u32, |
| 238 | +) -> ScenarioExecutor { |
| 239 | + let ws = net.node_rpc_uri(node_name).unwrap(); |
| 240 | + info!(from, to, prio, "send_batch"); |
| 241 | + default_zn_scenario_builder(net) |
| 242 | + .with_rpc_uri(ws) |
| 243 | + .with_start_id(from) |
| 244 | + .with_last_id(to) |
| 245 | + .with_txs_count(1) |
| 246 | + .with_tip(prio.into()) |
| 247 | + .with_executor_id(format!("txs-executor_{}_{}_{}", from, to, prio)) |
| 248 | + .with_send_threshold(usize::MAX) |
| 249 | + .with_legacy_backend(true) |
| 250 | + .build() |
| 251 | + .await |
| 252 | +} |
| 253 | + |
| 254 | +/// Repeatedly sends batches of transactions to the specified node with priority provided by |
| 255 | +/// closure. |
| 256 | +/// |
| 257 | +/// This function loops indefinitely, adjusting the priority of the transaction batch each time |
| 258 | +/// based on the provided function. Each batch is executed by an executor that times out after |
| 259 | +/// period duration if not completed. |
| 260 | +/// |
| 261 | +/// The progress of transactions is intentionally not monitored; the utility is intended for |
| 262 | +/// transaction pool limits testing, where the accuracy of execution is challenging to monitor. |
| 263 | +async fn batch_loop<F>( |
| 264 | + net: &NetworkSpawner, |
| 265 | + node_name: &str, |
| 266 | + from: u32, |
| 267 | + to: u32, |
| 268 | + priority: F, |
| 269 | + period: std::time::Duration, |
| 270 | +) where |
| 271 | + F: Fn(u32) -> u32, |
| 272 | +{ |
| 273 | + let mut prio = 0; |
| 274 | + loop { |
| 275 | + prio = priority(prio); |
| 276 | + let executor = send_batch(&net, node_name, from, to, prio).await; |
| 277 | + let start = std::time::Instant::now(); |
| 278 | + let _results = tokio::time::timeout(period, executor.execute()).await; |
| 279 | + let elapsed = start.elapsed(); |
| 280 | + if elapsed < period { |
| 281 | + tokio::time::sleep(period - elapsed).await; |
| 282 | + } |
| 283 | + } |
| 284 | +} |
| 285 | + |
| 286 | +/// Tests the transaction pool limits by continuously sending transaction batches to a parachain |
| 287 | +/// network node. This test checks the pool's behavior under high load by simulating multiple |
| 288 | +/// senders with increasing priorities. |
| 289 | +#[tokio::test(flavor = "multi_thread")] |
| 290 | +#[ignore] |
| 291 | +async fn test_limits_increasing_prio_parachain() { |
| 292 | + let net = NetworkSpawner::from_toml_with_env_logger(para::LOW_POOL_LIMIT_FATP) |
| 293 | + .await |
| 294 | + .unwrap(); |
| 295 | + |
| 296 | + net.wait_for_block_production("charlie").await.unwrap(); |
| 297 | + |
| 298 | + let mut executors = vec![]; |
| 299 | + let senders_count = 25; |
| 300 | + let sender_batch = 2000; |
| 301 | + |
| 302 | + for i in 0..senders_count { |
| 303 | + let from = 0 + i * sender_batch; |
| 304 | + let to = from + sender_batch - 1; |
| 305 | + executors.push(batch_loop( |
| 306 | + &net, |
| 307 | + "charlie", |
| 308 | + from, |
| 309 | + to, |
| 310 | + |prio| prio + 1, |
| 311 | + Duration::from_secs(60), |
| 312 | + )); |
| 313 | + } |
| 314 | + |
| 315 | + let _results = join_all(executors).await; |
| 316 | +} |
| 317 | + |
| 318 | +/// Tests the transaction pool limits by continuously sending transaction batches to a relaychain |
| 319 | +/// network node. This test checks the pool's behavior under high load by simulating multiple |
| 320 | +/// senders with increasing priorities. |
| 321 | +#[tokio::test(flavor = "multi_thread")] |
| 322 | +#[ignore] |
| 323 | +async fn test_limits_increasing_prio_relaychain() { |
| 324 | + let net = NetworkSpawner::from_toml_with_env_logger(relay::LOW_POOL_LIMIT_FATP) |
| 325 | + .await |
| 326 | + .unwrap(); |
| 327 | + |
| 328 | + net.wait_for_block_production("alice").await.unwrap(); |
| 329 | + |
| 330 | + let mut executors = vec![]; |
| 331 | + //this looks like current limit of what we can handle. A bit choky but almost no empty blocks. |
| 332 | + let senders_count = 50; |
| 333 | + let sender_batch = 2000; |
| 334 | + |
| 335 | + for i in 0..senders_count { |
| 336 | + let from = 0 + i * sender_batch; |
| 337 | + let to = from + sender_batch - 1; |
| 338 | + executors.push(batch_loop( |
| 339 | + &net, |
| 340 | + "alice", |
| 341 | + from, |
| 342 | + to, |
| 343 | + |prio| prio + 1, |
| 344 | + Duration::from_secs(15), |
| 345 | + )); |
| 346 | + } |
| 347 | + |
| 348 | + let _results = join_all(executors).await; |
| 349 | +} |
| 350 | + |
| 351 | +/// Tests the transaction pool limits by continuously sending transaction batches to a relaychain |
| 352 | +/// network node. This test checks the pool's behavior under high load by simulating multiple |
| 353 | +/// senders with increasing priorities. |
| 354 | +#[tokio::test(flavor = "multi_thread")] |
| 355 | +#[ignore] |
| 356 | +async fn test_limits_same_prio_relaychain() { |
| 357 | + let net = NetworkSpawner::from_toml_with_env_logger(relay::LOW_POOL_LIMIT_FATP) |
| 358 | + .await |
| 359 | + .unwrap(); |
| 360 | + |
| 361 | + net.wait_for_block_production("alice").await.unwrap(); |
| 362 | + |
| 363 | + let mut executors = vec![]; |
| 364 | + let senders_count = 50; |
| 365 | + let sender_batch = 2000; |
| 366 | + |
| 367 | + for i in 0..senders_count { |
| 368 | + let from = 0 + i * sender_batch; |
| 369 | + let to = from + sender_batch - 1; |
| 370 | + executors.push(batch_loop(&net, "alice", from, to, |prio| prio, Duration::from_secs(15))); |
| 371 | + } |
| 372 | + |
| 373 | + let _results = join_all(executors).await; |
| 374 | +} |
0 commit comments