|
| 1 | +use crate::BPlusTreeMap; |
| 2 | +use std::collections::BTreeMap; |
| 3 | +use std::time::Instant; |
| 4 | + |
| 5 | +/// Comprehensive performance benchmark comparing BPlusTreeMap vs BTreeMap |
| 6 | +/// Tests insert, delete, access, and iterate operations on large datasets |
| 7 | +pub fn run_comprehensive_benchmark() { |
| 8 | + println!("=== COMPREHENSIVE PERFORMANCE BENCHMARK ==="); |
| 9 | + println!("BPlusTreeMap vs BTreeMap - Large Tree & Large Capacity\n"); |
| 10 | + |
| 11 | + let tree_size = 1_000_000; |
| 12 | + let capacity = 2048; // Large capacity |
| 13 | + let sample_size = 10_000; // Operations to benchmark |
| 14 | + |
| 15 | + println!("Configuration:"); |
| 16 | + println!(" Tree size: {} items", tree_size); |
| 17 | + println!(" BPlusTreeMap capacity: {}", capacity); |
| 18 | + println!(" Sample operations: {}", sample_size); |
| 19 | + println!(); |
| 20 | + |
| 21 | + // Create and populate trees |
| 22 | + println!("🔧 Setting up trees..."); |
| 23 | + let (bplus, btree) = setup_trees(tree_size, capacity); |
| 24 | + |
| 25 | + println!("📊 Running benchmarks...\n"); |
| 26 | + |
| 27 | + // Test each operation |
| 28 | + benchmark_access(&bplus, &btree, tree_size, sample_size); |
| 29 | + benchmark_insert(&bplus, &btree, tree_size, sample_size); |
| 30 | + benchmark_delete(&bplus, &btree, tree_size, sample_size); |
| 31 | + benchmark_iterate(&bplus, &btree, sample_size); |
| 32 | + |
| 33 | + println!("\n=== BENCHMARK COMPLETE ==="); |
| 34 | +} |
| 35 | + |
| 36 | +fn setup_trees(size: usize, capacity: usize) -> (BPlusTreeMap<usize, usize>, BTreeMap<usize, usize>) { |
| 37 | + let mut bplus = BPlusTreeMap::new(capacity).unwrap(); |
| 38 | + let mut btree = BTreeMap::new(); |
| 39 | + |
| 40 | + // Populate with sequential data |
| 41 | + for i in 0..size { |
| 42 | + bplus.insert(i, i * 2); |
| 43 | + btree.insert(i, i * 2); |
| 44 | + } |
| 45 | + |
| 46 | + (bplus, btree) |
| 47 | +} |
| 48 | + |
| 49 | +fn benchmark_access(bplus: &BPlusTreeMap<usize, usize>, btree: &BTreeMap<usize, usize>, tree_size: usize, sample_size: usize) { |
| 50 | + println!("🔍 ACCESS Performance:"); |
| 51 | + |
| 52 | + // Generate random keys for access |
| 53 | + let keys: Vec<usize> = (0..sample_size) |
| 54 | + .map(|i| (i * 997) % tree_size) // Pseudo-random distribution |
| 55 | + .collect(); |
| 56 | + |
| 57 | + // Benchmark BPlusTreeMap access |
| 58 | + let start = Instant::now(); |
| 59 | + for &key in &keys { |
| 60 | + let _ = bplus.get(&key); |
| 61 | + } |
| 62 | + let bplus_time = start.elapsed(); |
| 63 | + |
| 64 | + // Benchmark BTreeMap access |
| 65 | + let start = Instant::now(); |
| 66 | + for &key in &keys { |
| 67 | + let _ = btree.get(&key); |
| 68 | + } |
| 69 | + let btree_time = start.elapsed(); |
| 70 | + |
| 71 | + let bplus_per_op = bplus_time.as_nanos() as f64 / sample_size as f64; |
| 72 | + let btree_per_op = btree_time.as_nanos() as f64 / sample_size as f64; |
| 73 | + let speedup = btree_per_op / bplus_per_op; |
| 74 | + |
| 75 | + println!(" BPlusTreeMap: {:.1}ns per access", bplus_per_op); |
| 76 | + println!(" BTreeMap: {:.1}ns per access", btree_per_op); |
| 77 | + println!(" Ratio: {:.2}x {}", speedup, if speedup > 1.0 { "(BPlusTreeMap faster)" } else { "(BTreeMap faster)" }); |
| 78 | + println!(); |
| 79 | +} |
| 80 | + |
| 81 | +fn benchmark_insert(bplus: &BPlusTreeMap<usize, usize>, btree: &BTreeMap<usize, usize>, tree_size: usize, sample_size: usize) { |
| 82 | + println!("➕ INSERT Performance:"); |
| 83 | + |
| 84 | + // Generate new keys for insertion (beyond existing range) |
| 85 | + let new_keys: Vec<usize> = (tree_size..tree_size + sample_size).collect(); |
| 86 | + |
| 87 | + // Create fresh trees for insertion testing |
| 88 | + let capacity = bplus.capacity; |
| 89 | + let mut bplus_copy = BPlusTreeMap::new(capacity).unwrap(); |
| 90 | + let mut btree_copy = BTreeMap::new(); |
| 91 | + |
| 92 | + // Pre-populate with original data |
| 93 | + for i in 0..tree_size { |
| 94 | + bplus_copy.insert(i, i * 2); |
| 95 | + btree_copy.insert(i, i * 2); |
| 96 | + } |
| 97 | + |
| 98 | + // Benchmark BPlusTreeMap insert |
| 99 | + let start = Instant::now(); |
| 100 | + for &key in &new_keys { |
| 101 | + bplus_copy.insert(key, key * 2); |
| 102 | + } |
| 103 | + let bplus_time = start.elapsed(); |
| 104 | + |
| 105 | + // Reset and benchmark BTreeMap insert |
| 106 | + btree_copy.clear(); |
| 107 | + for i in 0..tree_size { |
| 108 | + btree_copy.insert(i, i * 2); |
| 109 | + } |
| 110 | + |
| 111 | + let start = Instant::now(); |
| 112 | + for &key in &new_keys { |
| 113 | + btree_copy.insert(key, key * 2); |
| 114 | + } |
| 115 | + let btree_time = start.elapsed(); |
| 116 | + |
| 117 | + let bplus_per_op = bplus_time.as_nanos() as f64 / sample_size as f64; |
| 118 | + let btree_per_op = btree_time.as_nanos() as f64 / sample_size as f64; |
| 119 | + let speedup = btree_per_op / bplus_per_op; |
| 120 | + |
| 121 | + println!(" BPlusTreeMap: {:.1}ns per insert", bplus_per_op); |
| 122 | + println!(" BTreeMap: {:.1}ns per insert", btree_per_op); |
| 123 | + println!(" Ratio: {:.2}x {}", speedup, if speedup > 1.0 { "(BPlusTreeMap faster)" } else { "(BTreeMap faster)" }); |
| 124 | + println!(); |
| 125 | +} |
| 126 | + |
| 127 | +fn benchmark_delete(bplus: &BPlusTreeMap<usize, usize>, btree: &BTreeMap<usize, usize>, tree_size: usize, sample_size: usize) { |
| 128 | + println!("➖ DELETE Performance:"); |
| 129 | + |
| 130 | + // Generate keys to delete (from existing range) |
| 131 | + let delete_keys: Vec<usize> = (0..sample_size) |
| 132 | + .map(|i| (i * 991) % tree_size) // Pseudo-random distribution |
| 133 | + .collect(); |
| 134 | + |
| 135 | + // Create fresh trees for deletion testing |
| 136 | + let capacity = bplus.capacity; |
| 137 | + let mut bplus_copy = BPlusTreeMap::new(capacity).unwrap(); |
| 138 | + let mut btree_copy = BTreeMap::new(); |
| 139 | + |
| 140 | + // Pre-populate with original data |
| 141 | + for i in 0..tree_size { |
| 142 | + bplus_copy.insert(i, i * 2); |
| 143 | + btree_copy.insert(i, i * 2); |
| 144 | + } |
| 145 | + |
| 146 | + // Benchmark BPlusTreeMap delete |
| 147 | + let start = Instant::now(); |
| 148 | + for &key in &delete_keys { |
| 149 | + let _ = bplus_copy.remove(&key); |
| 150 | + } |
| 151 | + let bplus_time = start.elapsed(); |
| 152 | + |
| 153 | + // Reset and benchmark BTreeMap delete |
| 154 | + btree_copy.clear(); |
| 155 | + for i in 0..tree_size { |
| 156 | + btree_copy.insert(i, i * 2); |
| 157 | + } |
| 158 | + |
| 159 | + let start = Instant::now(); |
| 160 | + for &key in &delete_keys { |
| 161 | + let _ = btree_copy.remove(&key); |
| 162 | + } |
| 163 | + let btree_time = start.elapsed(); |
| 164 | + |
| 165 | + let bplus_per_op = bplus_time.as_nanos() as f64 / sample_size as f64; |
| 166 | + let btree_per_op = btree_time.as_nanos() as f64 / sample_size as f64; |
| 167 | + let speedup = btree_per_op / bplus_per_op; |
| 168 | + |
| 169 | + println!(" BPlusTreeMap: {:.1}ns per delete", bplus_per_op); |
| 170 | + println!(" BTreeMap: {:.1}ns per delete", btree_per_op); |
| 171 | + println!(" Ratio: {:.2}x {}", speedup, if speedup > 1.0 { "(BPlusTreeMap faster)" } else { "(BTreeMap faster)" }); |
| 172 | + println!(); |
| 173 | +} |
| 174 | + |
| 175 | +fn benchmark_iterate(bplus: &BPlusTreeMap<usize, usize>, btree: &BTreeMap<usize, usize>, sample_size: usize) { |
| 176 | + println!("🔄 ITERATE Performance:"); |
| 177 | + |
| 178 | + let iterations = 100; |
| 179 | + |
| 180 | + // Benchmark BPlusTreeMap iteration (range) |
| 181 | + let start_key = 100_000; |
| 182 | + let end_key = start_key + sample_size; |
| 183 | + |
| 184 | + let start = Instant::now(); |
| 185 | + for _ in 0..iterations { |
| 186 | + for (_k, _v) in bplus.items_range(Some(&start_key), Some(&end_key)) { |
| 187 | + // Consume iterator |
| 188 | + } |
| 189 | + } |
| 190 | + let bplus_time = start.elapsed(); |
| 191 | + |
| 192 | + // Benchmark BTreeMap iteration (range) |
| 193 | + let start = Instant::now(); |
| 194 | + for _ in 0..iterations { |
| 195 | + for (_k, _v) in btree.range(start_key..=end_key) { |
| 196 | + // Consume iterator |
| 197 | + } |
| 198 | + } |
| 199 | + let btree_time = start.elapsed(); |
| 200 | + |
| 201 | + let bplus_per_item = bplus_time.as_nanos() as f64 / (iterations * sample_size) as f64; |
| 202 | + let btree_per_item = btree_time.as_nanos() as f64 / (iterations * sample_size) as f64; |
| 203 | + let speedup = btree_per_item / bplus_per_item; |
| 204 | + |
| 205 | + println!(" BPlusTreeMap: {:.1}ns per item", bplus_per_item); |
| 206 | + println!(" BTreeMap: {:.1}ns per item", btree_per_item); |
| 207 | + println!(" Ratio: {:.2}x {}", speedup, if speedup > 1.0 { "(BPlusTreeMap faster)" } else { "(BTreeMap faster)" }); |
| 208 | + println!(); |
| 209 | +} |
| 210 | + |
| 211 | +#[cfg(test)] |
| 212 | +mod tests { |
| 213 | + use super::*; |
| 214 | + |
| 215 | + #[test] |
| 216 | + fn test_comprehensive_benchmark() { |
| 217 | + run_comprehensive_benchmark(); |
| 218 | + } |
| 219 | +} |
0 commit comments