|
90 | 90 | } |
91 | 91 |
|
92 | 92 | async fn _new(tick: Duration, ttl: Duration) -> Self { |
93 | | - let bucket_count = (ttl.as_millis() / tick.as_millis()) as usize; |
| 93 | + let bucket_count = (ttl.as_nanos() / tick.as_nanos()) as usize; |
94 | 94 | let mut buckets = Vec::with_capacity(bucket_count); |
95 | 95 | for _ in 0..bucket_count { |
96 | 96 | buckets.push(HashSet::new()); |
@@ -340,4 +340,70 @@ mod tests { |
340 | 340 | let final_time = ttl_map.time.load(Ordering::SeqCst); |
341 | 341 | assert!(final_time < 100); |
342 | 342 | } |
| 343 | + |
| 344 | + |
| 345 | + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] |
| 346 | + async fn bench_lock_contention() { |
| 347 | + use std::time::Instant; |
| 348 | + |
| 349 | + let ttl_map = TTLMap::<String, i32>::new(TTLMapParams { |
| 350 | + tick: Duration::from_micros(1), |
| 351 | + ttl: Duration::from_micros(2), |
| 352 | + }) |
| 353 | + .await |
| 354 | + .unwrap(); |
| 355 | + |
| 356 | + let ttl_map = Arc::new(ttl_map); |
| 357 | + |
| 358 | + let key_count = 10; |
| 359 | + let start_time = Instant::now(); |
| 360 | + let operations_per_task = 1_000_000; |
| 361 | + let task_count = 100; |
| 362 | + |
| 363 | + // Spawn 10 tasks that repeatedly read the same keys |
| 364 | + let mut handles = Vec::new(); |
| 365 | + for task_id in 0..task_count { |
| 366 | + let map = Arc::clone(&ttl_map); |
| 367 | + let handle = tokio::spawn(async move { |
| 368 | + let mut local_ops = 0; |
| 369 | + for i in 0..operations_per_task { |
| 370 | + // All tasks fight for the same keys - maximum contention |
| 371 | + let key = format!("key{}", i % key_count); |
| 372 | + let _value = map.get_or_init(key, || task_id * 1000 + i).await; |
| 373 | + local_ops += 1; |
| 374 | + |
| 375 | + // Small yield to allow GC to run frequently |
| 376 | + if i % 10 == 0 { |
| 377 | + tokio::task::yield_now().await; |
| 378 | + } |
| 379 | + } |
| 380 | + local_ops |
| 381 | + }); |
| 382 | + handles.push(handle); |
| 383 | + } |
| 384 | + |
| 385 | + // Wait for all tasks and collect operation counts |
| 386 | + let mut total_operations = 0; |
| 387 | + for handle in handles { |
| 388 | + total_operations += handle.await.unwrap(); |
| 389 | + } |
| 390 | + |
| 391 | + let elapsed = start_time.elapsed(); |
| 392 | + let ops_per_second = total_operations as f64 / elapsed.as_secs_f64(); |
| 393 | + let avg_latency_us = elapsed.as_micros() as f64 / total_operations as f64; |
| 394 | + |
| 395 | + println!("\n=== TTLMap Lock Contention Benchmark ==="); |
| 396 | + println!("Tasks: {}", task_count); |
| 397 | + println!("Operations per task: {}", operations_per_task); |
| 398 | + println!("Total operations: {}", total_operations); |
| 399 | + println!("Total time: {:.2?}", elapsed); |
| 400 | + println!("Throughput: {:.0} ops/sec", ops_per_second); |
| 401 | + println!("Average latency: {:.2} μs per operation", avg_latency_us); |
| 402 | + println!("Entries remaining: {}", ttl_map.data.len()); |
| 403 | + |
| 404 | + // The benchmark passes if it completes without deadlocks |
| 405 | + // Performance metrics are printed for analysis |
| 406 | + assert!(ops_per_second > 0.0); // Sanity check |
| 407 | + } |
| 408 | + |
343 | 409 | } |
0 commit comments