Skip to content

Commit bedfacb

Browse files
committed
Bump version to 0.1.3
1 parent 270f0fa commit bedfacb

23 files changed

+2984
-254
lines changed

.opencode/agent/agent-coordinator.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,7 @@ tools:
3737
list: true
3838
webfetch: false
3939
todowrite: true
40-
todoread: true
41-
task: true
40+
todoread: true
4241
bash: false
4342
---
4443
## Overview

.opencode/agent/context7-mcp-agent.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ tools:
1818
bash: false
1919
write: false
2020
edit: false
21+
batch: false
22+
grop: false
2123
context7_resolve_library_id: true
2224
context7_get_library_docs: true
2325
---

.opencode/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
{
22
"dependencies": {
3-
"@opencode-ai/plugin": "0.15.0"
3+
"@opencode-ai/plugin": "0.15.3"
44
}
55
}

Cargo.lock

Lines changed: 37 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

crates/cli/Cargo.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "code_guardian_cli"
3-
version = "0.1.2"
3+
version = "0.1.3"
44
edition = "2021"
55

66
[dependencies]
@@ -24,6 +24,7 @@ tokio = { workspace = true }
2424
tracing = { workspace = true }
2525
tracing-subscriber = { workspace = true }
2626
sysinfo = { workspace = true }
27+
rand = "0.8"
2728

2829
code-guardian-core = { path = "../core" }
2930
code-guardian-storage = { path = "../storage" }

crates/cli/src/benchmark.rs

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
use anyhow::Result;
22
use code_guardian_core::{
33
DetectorFactory, DetectorProfile, OptimizedScanner, Scanner, StreamingScanner,
4+
BenchmarkSuite, BenchmarkConfigurations, PerformanceAnalyzer,
45
};
56
use std::path::Path;
67
use std::time::Instant;
@@ -120,6 +121,180 @@ pub fn run_benchmark(path: &Path) -> Result<()> {
120121
Ok(())
121122
}
122123

124+
/// Run comprehensive benchmark suite
125+
pub fn run_comprehensive_benchmark(path: &Path, suite_type: &str) -> Result<()> {
126+
println!("🚀 Code-Guardian Comprehensive Benchmark");
127+
println!("=========================================\n");
128+
129+
let mut suite = match suite_type {
130+
"small" => BenchmarkConfigurations::small_project(),
131+
"medium" => BenchmarkConfigurations::medium_project(),
132+
"large" => BenchmarkConfigurations::large_project(),
133+
"regression" => BenchmarkConfigurations::regression_detection(),
134+
"all" => {
135+
// Run all benchmark suites
136+
run_all_benchmark_suites(path)?;
137+
return Ok(());
138+
}
139+
_ => {
140+
println!("❌ Unknown benchmark suite: {}", suite_type);
141+
println!("Available suites: small, medium, large, regression, all");
142+
return Ok(());
143+
}
144+
};
145+
146+
suite.run_benchmarks(path)?;
147+
148+
// Generate detailed report
149+
generate_benchmark_report(&suite)?;
150+
151+
Ok(())
152+
}
153+
154+
/// Run all benchmark suites
155+
pub fn run_all_benchmark_suites(path: &Path) -> Result<()> {
156+
println!("🏃‍♂️ Running All Benchmark Suites");
157+
println!("==================================\n");
158+
159+
let suites = vec![
160+
("Small Project", BenchmarkConfigurations::small_project()),
161+
("Medium Project", BenchmarkConfigurations::medium_project()),
162+
("Large Project", BenchmarkConfigurations::large_project()),
163+
("Regression Detection", BenchmarkConfigurations::regression_detection()),
164+
];
165+
166+
let mut all_results = Vec::new();
167+
let mut total_passed = 0;
168+
let mut total_tests = 0;
169+
170+
for (name, mut suite) in suites {
171+
println!("🔄 Running {} Suite...", name);
172+
suite.run_benchmarks(path)?;
173+
174+
total_passed += suite.summary.passed_tests;
175+
total_tests += suite.summary.total_tests;
176+
all_results.push((name, suite));
177+
}
178+
179+
// Overall summary
180+
println!("\n🏁 Overall Benchmark Results");
181+
println!("=============================");
182+
println!("Total Suites: {}", all_results.len());
183+
println!("Total Tests: {}", total_tests);
184+
println!("Total Passed: ✅ {}", total_passed);
185+
println!("Total Failed: ❌ {}", total_tests - total_passed);
186+
187+
let success_rate = (total_passed as f64 / total_tests as f64) * 100.0;
188+
println!("Success Rate: {:.1}%", success_rate);
189+
190+
if success_rate >= 90.0 {
191+
println!("🎉 Excellent performance across all benchmarks!");
192+
} else if success_rate >= 70.0 {
193+
println!("✅ Good performance with some areas for improvement");
194+
} else {
195+
println!("⚠️ Performance issues detected - optimization recommended");
196+
}
197+
198+
Ok(())
199+
}
200+
201+
/// Run performance analysis with detailed metrics
202+
pub fn run_performance_analysis(path: &Path) -> Result<()> {
203+
println!("🔍 Performance Analysis");
204+
println!("======================\n");
205+
206+
let mut analyzer = PerformanceAnalyzer::new();
207+
208+
analyzer.analyze_performance(|| {
209+
// Run a comprehensive scan for analysis
210+
let scanner = OptimizedScanner::new(DetectorProfile::Comprehensive.get_detectors())
211+
.with_cache_size(10000);
212+
let (_matches, _metrics) = scanner.scan_optimized(path)?;
213+
Ok(())
214+
})?;
215+
216+
// Generate and display performance report
217+
let report = analyzer.generate_report();
218+
println!("{}", report);
219+
220+
// Save analysis to file
221+
save_performance_analysis(&analyzer, path)?;
222+
223+
Ok(())
224+
}
225+
226+
/// Generate comprehensive benchmark report
227+
fn generate_benchmark_report(suite: &BenchmarkSuite) -> Result<()> {
228+
println!("\n📊 Detailed Benchmark Report");
229+
println!("============================");
230+
231+
println!("Suite: {}", suite.name);
232+
println!("Performance Score: {:.1}/100", suite.summary.performance_score);
233+
234+
if !suite.results.is_empty() {
235+
println!("\n📈 Individual Test Results:");
236+
for result in &suite.results {
237+
println!("\n🔍 {}", result.name);
238+
println!(" Duration: {:?}", result.duration);
239+
println!(" Files/sec: {:.1}", result.throughput.files_per_second);
240+
println!(" Lines/sec: {:.0}", result.throughput.lines_per_second);
241+
println!(" Memory: {:.1} MB", result.resource_usage.memory_mb);
242+
println!(" Score: {:.1}/100", result.performance_score);
243+
}
244+
}
245+
246+
if !suite.summary.improvement_areas.is_empty() {
247+
println!("\n💡 Recommendations:");
248+
for area in &suite.summary.improvement_areas {
249+
println!(" • {}", area);
250+
}
251+
}
252+
253+
// Save report to file
254+
save_benchmark_report(suite)?;
255+
256+
Ok(())
257+
}
258+
259+
/// Save benchmark report to file
260+
fn save_benchmark_report(suite: &BenchmarkSuite) -> Result<()> {
261+
use std::fs;
262+
use std::path::PathBuf;
263+
264+
let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S");
265+
let filename = format!("benchmark_report_{}_{}.json",
266+
suite.name.replace(" ", "_").to_lowercase(), timestamp);
267+
268+
let reports_dir = PathBuf::from("reports");
269+
fs::create_dir_all(&reports_dir)?;
270+
271+
let report_path = reports_dir.join(filename);
272+
let json_report = serde_json::to_string_pretty(suite)?;
273+
fs::write(&report_path, json_report)?;
274+
275+
println!("\n📄 Report saved to: {}", report_path.display());
276+
Ok(())
277+
}
278+
279+
/// Save performance analysis to file
280+
fn save_performance_analysis(analyzer: &PerformanceAnalyzer, path: &Path) -> Result<()> {
281+
use std::fs;
282+
use std::path::PathBuf;
283+
284+
let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S");
285+
let filename = format!("performance_analysis_{}.json", timestamp);
286+
287+
let reports_dir = PathBuf::from("reports");
288+
fs::create_dir_all(&reports_dir)?;
289+
290+
let report_path = reports_dir.join(filename);
291+
let json_analysis = serde_json::to_string_pretty(analyzer)?;
292+
fs::write(&report_path, json_analysis)?;
293+
294+
println!("\n📄 Analysis saved to: {}", report_path.display());
295+
Ok(())
296+
}
297+
123298
/// Quick performance test
124299
pub fn quick_performance_test(path: &Path) -> Result<()> {
125300
println!("⚡ Quick Performance Test");

crates/cli/src/command_handlers.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,18 @@ pub fn handle_benchmark(path: Option<PathBuf>, quick: bool) -> Result<()> {
5555
}
5656
}
5757

58+
/// Handle comprehensive benchmark command with suite selection
59+
pub fn handle_comprehensive_benchmark(path: Option<PathBuf>, suite: String) -> Result<()> {
60+
let benchmark_path = path.unwrap_or_else(|| std::env::current_dir().unwrap());
61+
benchmark::run_comprehensive_benchmark(&benchmark_path, &suite)
62+
}
63+
64+
/// Handle performance analysis command
65+
pub fn handle_performance_analysis(path: Option<PathBuf>) -> Result<()> {
66+
let analysis_path = path.unwrap_or_else(|| std::env::current_dir().unwrap());
67+
benchmark::run_performance_analysis(&analysis_path)
68+
}
69+
5870
// These functions are re-exported from advanced_handlers
5971
pub use crate::advanced_handlers::{
6072
handle_custom_detectors, handle_distributed, handle_incremental,

0 commit comments

Comments
 (0)