|
1 | 1 | use anyhow::Result; |
2 | 2 | use code_guardian_core::{ |
3 | 3 | DetectorFactory, DetectorProfile, OptimizedScanner, Scanner, StreamingScanner, |
| 4 | + BenchmarkSuite, BenchmarkConfigurations, PerformanceAnalyzer, |
4 | 5 | }; |
5 | 6 | use std::path::Path; |
6 | 7 | use std::time::Instant; |
@@ -120,6 +121,180 @@ pub fn run_benchmark(path: &Path) -> Result<()> { |
120 | 121 | Ok(()) |
121 | 122 | } |
122 | 123 |
|
| 124 | +/// Run comprehensive benchmark suite |
| 125 | +pub fn run_comprehensive_benchmark(path: &Path, suite_type: &str) -> Result<()> { |
| 126 | + println!("🚀 Code-Guardian Comprehensive Benchmark"); |
| 127 | + println!("=========================================\n"); |
| 128 | + |
| 129 | + let mut suite = match suite_type { |
| 130 | + "small" => BenchmarkConfigurations::small_project(), |
| 131 | + "medium" => BenchmarkConfigurations::medium_project(), |
| 132 | + "large" => BenchmarkConfigurations::large_project(), |
| 133 | + "regression" => BenchmarkConfigurations::regression_detection(), |
| 134 | + "all" => { |
| 135 | + // Run all benchmark suites |
| 136 | + run_all_benchmark_suites(path)?; |
| 137 | + return Ok(()); |
| 138 | + } |
| 139 | + _ => { |
| 140 | + println!("❌ Unknown benchmark suite: {}", suite_type); |
| 141 | + println!("Available suites: small, medium, large, regression, all"); |
| 142 | + return Ok(()); |
| 143 | + } |
| 144 | + }; |
| 145 | + |
| 146 | + suite.run_benchmarks(path)?; |
| 147 | + |
| 148 | + // Generate detailed report |
| 149 | + generate_benchmark_report(&suite)?; |
| 150 | + |
| 151 | + Ok(()) |
| 152 | +} |
| 153 | + |
| 154 | +/// Run all benchmark suites |
| 155 | +pub fn run_all_benchmark_suites(path: &Path) -> Result<()> { |
| 156 | + println!("🏃♂️ Running All Benchmark Suites"); |
| 157 | + println!("==================================\n"); |
| 158 | + |
| 159 | + let suites = vec![ |
| 160 | + ("Small Project", BenchmarkConfigurations::small_project()), |
| 161 | + ("Medium Project", BenchmarkConfigurations::medium_project()), |
| 162 | + ("Large Project", BenchmarkConfigurations::large_project()), |
| 163 | + ("Regression Detection", BenchmarkConfigurations::regression_detection()), |
| 164 | + ]; |
| 165 | + |
| 166 | + let mut all_results = Vec::new(); |
| 167 | + let mut total_passed = 0; |
| 168 | + let mut total_tests = 0; |
| 169 | + |
| 170 | + for (name, mut suite) in suites { |
| 171 | + println!("🔄 Running {} Suite...", name); |
| 172 | + suite.run_benchmarks(path)?; |
| 173 | + |
| 174 | + total_passed += suite.summary.passed_tests; |
| 175 | + total_tests += suite.summary.total_tests; |
| 176 | + all_results.push((name, suite)); |
| 177 | + } |
| 178 | + |
| 179 | + // Overall summary |
| 180 | + println!("\n🏁 Overall Benchmark Results"); |
| 181 | + println!("============================="); |
| 182 | + println!("Total Suites: {}", all_results.len()); |
| 183 | + println!("Total Tests: {}", total_tests); |
| 184 | + println!("Total Passed: ✅ {}", total_passed); |
| 185 | + println!("Total Failed: ❌ {}", total_tests - total_passed); |
| 186 | + |
| 187 | + let success_rate = (total_passed as f64 / total_tests as f64) * 100.0; |
| 188 | + println!("Success Rate: {:.1}%", success_rate); |
| 189 | + |
| 190 | + if success_rate >= 90.0 { |
| 191 | + println!("🎉 Excellent performance across all benchmarks!"); |
| 192 | + } else if success_rate >= 70.0 { |
| 193 | + println!("✅ Good performance with some areas for improvement"); |
| 194 | + } else { |
| 195 | + println!("⚠️ Performance issues detected - optimization recommended"); |
| 196 | + } |
| 197 | + |
| 198 | + Ok(()) |
| 199 | +} |
| 200 | + |
| 201 | +/// Run performance analysis with detailed metrics |
| 202 | +pub fn run_performance_analysis(path: &Path) -> Result<()> { |
| 203 | + println!("🔍 Performance Analysis"); |
| 204 | + println!("======================\n"); |
| 205 | + |
| 206 | + let mut analyzer = PerformanceAnalyzer::new(); |
| 207 | + |
| 208 | + analyzer.analyze_performance(|| { |
| 209 | + // Run a comprehensive scan for analysis |
| 210 | + let scanner = OptimizedScanner::new(DetectorProfile::Comprehensive.get_detectors()) |
| 211 | + .with_cache_size(10000); |
| 212 | + let (_matches, _metrics) = scanner.scan_optimized(path)?; |
| 213 | + Ok(()) |
| 214 | + })?; |
| 215 | + |
| 216 | + // Generate and display performance report |
| 217 | + let report = analyzer.generate_report(); |
| 218 | + println!("{}", report); |
| 219 | + |
| 220 | + // Save analysis to file |
| 221 | + save_performance_analysis(&analyzer, path)?; |
| 222 | + |
| 223 | + Ok(()) |
| 224 | +} |
| 225 | + |
| 226 | +/// Generate comprehensive benchmark report |
| 227 | +fn generate_benchmark_report(suite: &BenchmarkSuite) -> Result<()> { |
| 228 | + println!("\n📊 Detailed Benchmark Report"); |
| 229 | + println!("============================"); |
| 230 | + |
| 231 | + println!("Suite: {}", suite.name); |
| 232 | + println!("Performance Score: {:.1}/100", suite.summary.performance_score); |
| 233 | + |
| 234 | + if !suite.results.is_empty() { |
| 235 | + println!("\n📈 Individual Test Results:"); |
| 236 | + for result in &suite.results { |
| 237 | + println!("\n🔍 {}", result.name); |
| 238 | + println!(" Duration: {:?}", result.duration); |
| 239 | + println!(" Files/sec: {:.1}", result.throughput.files_per_second); |
| 240 | + println!(" Lines/sec: {:.0}", result.throughput.lines_per_second); |
| 241 | + println!(" Memory: {:.1} MB", result.resource_usage.memory_mb); |
| 242 | + println!(" Score: {:.1}/100", result.performance_score); |
| 243 | + } |
| 244 | + } |
| 245 | + |
| 246 | + if !suite.summary.improvement_areas.is_empty() { |
| 247 | + println!("\n💡 Recommendations:"); |
| 248 | + for area in &suite.summary.improvement_areas { |
| 249 | + println!(" • {}", area); |
| 250 | + } |
| 251 | + } |
| 252 | + |
| 253 | + // Save report to file |
| 254 | + save_benchmark_report(suite)?; |
| 255 | + |
| 256 | + Ok(()) |
| 257 | +} |
| 258 | + |
| 259 | +/// Save benchmark report to file |
| 260 | +fn save_benchmark_report(suite: &BenchmarkSuite) -> Result<()> { |
| 261 | + use std::fs; |
| 262 | + use std::path::PathBuf; |
| 263 | + |
| 264 | + let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S"); |
| 265 | + let filename = format!("benchmark_report_{}_{}.json", |
| 266 | + suite.name.replace(" ", "_").to_lowercase(), timestamp); |
| 267 | + |
| 268 | + let reports_dir = PathBuf::from("reports"); |
| 269 | + fs::create_dir_all(&reports_dir)?; |
| 270 | + |
| 271 | + let report_path = reports_dir.join(filename); |
| 272 | + let json_report = serde_json::to_string_pretty(suite)?; |
| 273 | + fs::write(&report_path, json_report)?; |
| 274 | + |
| 275 | + println!("\n📄 Report saved to: {}", report_path.display()); |
| 276 | + Ok(()) |
| 277 | +} |
| 278 | + |
| 279 | +/// Save performance analysis to file |
| 280 | +fn save_performance_analysis(analyzer: &PerformanceAnalyzer, path: &Path) -> Result<()> { |
| 281 | + use std::fs; |
| 282 | + use std::path::PathBuf; |
| 283 | + |
| 284 | + let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S"); |
| 285 | + let filename = format!("performance_analysis_{}.json", timestamp); |
| 286 | + |
| 287 | + let reports_dir = PathBuf::from("reports"); |
| 288 | + fs::create_dir_all(&reports_dir)?; |
| 289 | + |
| 290 | + let report_path = reports_dir.join(filename); |
| 291 | + let json_analysis = serde_json::to_string_pretty(analyzer)?; |
| 292 | + fs::write(&report_path, json_analysis)?; |
| 293 | + |
| 294 | + println!("\n📄 Analysis saved to: {}", report_path.display()); |
| 295 | + Ok(()) |
| 296 | +} |
| 297 | + |
123 | 298 | /// Quick performance test |
124 | 299 | pub fn quick_performance_test(path: &Path) -> Result<()> { |
125 | 300 | println!("⚡ Quick Performance Test"); |
|
0 commit comments