Skip to content

Commit b95eb82

Browse files
committed
Refactor error handling and response structures
- Changed several methods in `KodeBridgeError` to `const fn` for improved performance. - Updated `Response` struct methods in `http_client.rs` to `const fn`. - Modified `HttpResponse` and `StreamResponse` constructors to `const fn`. - Enhanced IPC client and server code for better clarity and performance. - Refactored connection pool methods to use `Arc::clone` for thread safety. - Improved retry logic with `const fn` where applicable. - Adjusted parser cache to use boxed slices for headers. - General code cleanup and consistency improvements across multiple files.
1 parent 3ab9990 commit b95eb82

23 files changed

+302
-239
lines changed

CHANGELOG.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,23 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

88

9+
## [0.3.5] - 2025-12-15
10+
11+
### Fixed
12+
- Avoid large stack arrays in `ipc_http_server` by allocating HTTP header storage on the heap (fixes Clippy `large-stack-arrays`).
13+
- Replace `.clone()` on `Arc<T>` with `Arc::clone(&...)` across tests and examples to satisfy Clippy `clone_on_ref_ptr`.
14+
- Address explicit auto-deref and other small Clippy warnings in `ipc_http_server` and related modules.
15+
- Improve benches/examples error handling to address `unwrap_used`/`expect_used` lints; where appropriate, replace `unwrap()` with `expect()` and add targeted `#[allow(...)]` for benign bench/example code.
16+
- Minor example fixes (e.g., replace `vec!` with array in `examples/stream_server.rs`) and other cleanup.
17+
18+
### Changed
19+
- Code hygiene and small refactors to satisfy clippy and improve robustness across tests, benches, and examples.
20+
21+
### Internal
22+
- Updated examples and benches: `examples/stream_server.rs`, `examples/traffic_monitor.rs`, `examples/metrics_demo.rs`, `examples/concurrent_proxy_test.rs`, and `benches/bench_version.rs`.
23+
- Re-ran `cargo clippy` and iterated until the workspace was clean of the reported lints.
24+
25+
926
## [0.3.4] - 2025-10-11
1027

1128
### Changed

Cargo.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[package]
22
name = "kode-bridge"
33
authors = ["Tunglies"]
4-
version = "0.3.4"
4+
version = "0.3.5"
55
edition = "2021"
66
description = "Modern HTTP Over IPC library for Rust with both client and server support (Unix sockets, Windows named pipes)."
77
license = "Apache-2.0"

benches/bench_version.rs

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,14 @@ use criterion::{criterion_group, criterion_main, Criterion};
2525
use kode_bridge::http_client::Response;
2626
use kode_bridge::IpcHttpClient;
2727

28+
#[allow(clippy::expect_used, clippy::unwrap_used)]
2829
async fn bench_version_once(client: &IpcHttpClient) -> Response {
29-
client.get("/version").send().await.unwrap().into_inner()
30+
client
31+
.get("/version")
32+
.send()
33+
.await
34+
.expect("Request failed")
35+
.into_inner()
3036
}
3137

3238
fn get_ipc_path() -> String {
@@ -48,8 +54,9 @@ fn get_ipc_path() -> String {
4854
}
4955
}
5056

57+
#[allow(clippy::expect_used, clippy::unwrap_used)]
5158
fn bench_version(c: &mut Criterion) {
52-
let rt = Runtime::new().unwrap();
59+
let rt = Runtime::new().expect("Failed to create runtime");
5360
let ipc_path = get_ipc_path();
5461

5562
let mut group = c.benchmark_group("ipc_http_version");
@@ -60,15 +67,15 @@ fn bench_version(c: &mut Criterion) {
6067

6168
// Create client once for Unix, or per-iteration for Windows
6269
#[cfg(unix)]
63-
let client = IpcHttpClient::new(&ipc_path).unwrap();
70+
let client = IpcHttpClient::new(&ipc_path).expect("Failed to create IpcHttpClient");
6471

6572
group.bench_function("version_once", |b| {
6673
b.iter(|| {
6774
rt.block_on(async {
6875
// Create client for each iteration on Windows to avoid connection issues
6976
#[cfg(windows)]
7077
{
71-
let client = IpcHttpClient::new(&ipc_path).unwrap();
78+
let client = IpcHttpClient::new(&ipc_path).expect("Failed to create IpcHttpClient");
7279
let _ = bench_version_once(&client).await;
7380
}
7481

@@ -81,7 +88,7 @@ fn bench_version(c: &mut Criterion) {
8188
// Default behavior for other platforms
8289
#[cfg(not(any(unix, windows)))]
8390
{
84-
let client = IpcHttpClient::new(&ipc_path).unwrap();
91+
let client = IpcHttpClient::new(&ipc_path).expect("Failed to create IpcHttpClient");
8592
let _ = bench_version_once(&client).await;
8693
}
8794
});

examples/concurrent_proxy_test.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ async fn concurrent_proxy_test(
2727

2828
// 创建并发任务
2929
for (index, proxy_name) in proxies.iter().enumerate() {
30-
let client = client.clone();
30+
let client = Arc::clone(&client);
3131
let proxy_group = proxy_group.to_string();
3232
let proxy_name = proxy_name.to_string();
3333

@@ -118,8 +118,8 @@ async fn concurrent_proxy_test(
118118
durations.sort();
119119

120120
if !durations.is_empty() {
121-
let min_duration = durations.first().unwrap();
122-
let max_duration = durations.last().unwrap();
121+
let min_duration = durations[0];
122+
let max_duration = durations[durations.len() - 1];
123123
let median_duration = durations[durations.len() / 2];
124124

125125
println!(" 📊 Latency Analysis:");
@@ -264,7 +264,7 @@ async fn main() -> Result<()> {
264264

265265
// 执行并发测试
266266
if let Err(e) =
267-
concurrent_proxy_test(client.clone(), group_name, proxy_names).await
267+
concurrent_proxy_test(Arc::clone(&client), group_name, proxy_names).await
268268
{
269269
println!("❌ Concurrent test failed: {}", e);
270270
}

examples/http_server.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ async fn main() -> Result<()> {
260260

261261
// Helper function to demonstrate server statistics
262262
#[allow(dead_code)]
263-
async fn print_server_stats() {
263+
fn print_server_stats() {
264264
// This would be called periodically to show server stats
265265
// For now, it's just a placeholder
266266
info!("Server statistics would be displayed here");

examples/metrics_demo.rs

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ use kode_bridge::{
33
MetricsSnapshot, ParserCacheStats,
44
};
55
use std::time::Duration;
6+
use std::sync::Arc;
67
use tokio::time::sleep;
78

89
#[tokio::main]
@@ -18,7 +19,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
1819
println!("✅ Metrics system initialized");
1920

2021
// Create health checker
21-
let health_checker = HealthChecker::new(metrics.clone());
22+
let health_checker = HealthChecker::new(Arc::clone(&metrics));
2223
println!("✅ Health checker created");
2324

2425
// Create a test configuration
@@ -84,12 +85,12 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
8485
// Demonstrate buffer pool integration
8586
println!("\n💾 Buffer Pool Statistics:");
8687
println!("==========================");
87-
demonstrate_buffer_pools().await;
88+
demonstrate_buffer_pools();
8889

8990
// Demonstrate parser cache
9091
println!("\n🧠 Parser Cache Statistics:");
9192
println!("===========================");
92-
demonstrate_parser_cache().await;
93+
demonstrate_parser_cache();
9394

9495
println!("\n✨ Demo completed successfully!");
9596
println!("📝 In a real application, you would:");
@@ -198,7 +199,7 @@ fn print_health_report(report: &kode_bridge::HealthReport) {
198199
}
199200
}
200201

201-
async fn demonstrate_buffer_pools() {
202+
fn demonstrate_buffer_pools() {
202203
use kode_bridge::buffer_pool::global_pools;
203204

204205
let pools = global_pools();
@@ -225,7 +226,7 @@ async fn demonstrate_buffer_pools() {
225226
println!("Buffer efficiency: 75% reuse rate");
226227
}
227228

228-
async fn demonstrate_parser_cache() {
229+
fn demonstrate_parser_cache() {
229230
use kode_bridge::parser_cache::global_parser_cache;
230231

231232
let cache = global_parser_cache();

examples/stream_server.rs

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,10 @@
33
//! This example demonstrates how to create a streaming IPC server that
44
//! broadcasts real-time data to multiple connected clients.
55
6+
#![allow(clippy::expect_used, clippy::useless_vec)]
7+
68
use kode_bridge::{IpcStreamServer, JsonDataSource, Result, StreamMessage, StreamServerConfig};
7-
use rand::Rng;
9+
use rand::Rng as _;
810
use serde_json::json;
911
use std::env;
1012
use std::time::{Duration, SystemTime, UNIX_EPOCH};
@@ -40,7 +42,7 @@ struct EventLog {
4042
fn generate_traffic_data() -> Result<serde_json::Value> {
4143
let timestamp = SystemTime::now()
4244
.duration_since(UNIX_EPOCH)
43-
.unwrap()
45+
.expect("SystemTime before UNIX_EPOCH")
4446
.as_secs();
4547

4648
let mut rng = rand::rng();
@@ -57,7 +59,7 @@ fn generate_traffic_data() -> Result<serde_json::Value> {
5759
fn generate_system_metrics() -> Result<serde_json::Value> {
5860
let timestamp = SystemTime::now()
5961
.duration_since(UNIX_EPOCH)
60-
.unwrap()
62+
.expect("SystemTime before UNIX_EPOCH")
6163
.as_secs();
6264

6365
let mut rng = rand::rng();
@@ -76,10 +78,10 @@ fn generate_system_metrics() -> Result<serde_json::Value> {
7678
fn generate_event_log() -> Result<serde_json::Value> {
7779
let timestamp = SystemTime::now()
7880
.duration_since(UNIX_EPOCH)
79-
.unwrap()
81+
.expect("SystemTime before UNIX_EPOCH")
8082
.as_secs();
8183

82-
let events = vec![
84+
let events = [
8385
("INFO", "User logged in successfully", "auth"),
8486
("WARN", "High memory usage detected", "system"),
8587
("ERROR", "Failed to connect to database", "database"),
@@ -270,17 +272,16 @@ async fn main() -> Result<()> {
270272

271273
// Example of manual broadcasting (for demonstration)
272274
#[allow(dead_code)]
273-
async fn manual_broadcast_example() -> Result<()> {
275+
fn manual_broadcast_example() {
274276
// This shows how you might manually broadcast messages
275277
let _data = json!({
276278
"type": "notification",
277279
"message": "Manual broadcast message",
278-
"timestamp": SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs()
280+
"timestamp": SystemTime::now().duration_since(UNIX_EPOCH).expect("SystemTime before UNIX_EPOCH").as_secs()
279281
});
280282

281283
// server.broadcast(StreamMessage::Json(data))?;
282284
info!("Manual broadcast sent");
283-
Ok(())
284285
}
285286

286287
// Example of different message types

examples/traffic_monitor.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ pub struct TrafficMonitor(Arc<RwLock<CurrentTraffic>>);
3737
impl TrafficMonitor {
3838
pub fn new(client: IpcStreamClient) -> Self {
3939
let current = Arc::new(RwLock::new(CurrentTraffic::default()));
40-
let monitor_current = current.clone();
40+
let monitor_current = Arc::clone(&current);
4141

4242
tokio::spawn(async move {
4343
let mut last: Option<TrafficData> = None;
@@ -58,7 +58,7 @@ impl TrafficMonitor {
5858
.unwrap_or((0, 0));
5959

6060
tokio::spawn({
61-
let current = monitor_current.clone();
61+
let current = Arc::clone(&monitor_current);
6262
async move {
6363
*current.write().await = CurrentTraffic {
6464
up_rate,
@@ -104,7 +104,7 @@ async fn main() -> Result<()> {
104104
dotenv().ok();
105105

106106
let client =
107-
IpcStreamClient::new(env::var("CUSTOM_SOCK").unwrap_or("/tmp/example.sock".into()))?;
107+
IpcStreamClient::new(env::var("CUSTOM_SOCK").unwrap_or_else(|_| "/tmp/example.sock".into()))?;
108108

109109
// Test connection
110110
if client

src/buffer_pool.rs

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -51,13 +51,18 @@ impl BufferPool {
5151

5252
/// Pre-warm the pool with buffers
5353
pub fn warm_up(&self, count: usize) {
54-
let mut buffers = self.buffers.lock();
55-
let current_size = buffers.len();
56-
let to_create = (count.saturating_sub(current_size)).min(self.max_pool_size - current_size);
54+
let to_create = {
55+
let mut buffers = self.buffers.lock();
56+
let current_size = buffers.len();
57+
let to_create =
58+
(count.saturating_sub(current_size)).min(self.max_pool_size - current_size);
5759

58-
for _ in 0..to_create {
59-
buffers.push_back(Vec::with_capacity(self.buffer_size));
60-
}
60+
for _ in 0..to_create {
61+
buffers.push_back(Vec::with_capacity(self.buffer_size));
62+
}
63+
64+
to_create
65+
};
6166

6267
debug!("Buffer pool warmed up with {} buffers", to_create);
6368
}
@@ -78,12 +83,12 @@ pub struct PooledBuffer {
7883

7984
impl PooledBuffer {
8085
/// Get mutable reference to the underlying buffer
81-
pub fn as_mut_vec(&mut self) -> &mut Vec<u8> {
86+
pub const fn as_mut_vec(&mut self) -> &mut Vec<u8> {
8287
&mut self.buffer
8388
}
8489

8590
/// Get reference to the underlying buffer
86-
pub fn as_vec(&self) -> &Vec<u8> {
91+
pub const fn as_vec(&self) -> &Vec<u8> {
8792
&self.buffer
8893
}
8994

@@ -131,12 +136,20 @@ impl PooledBuffer {
131136
impl Drop for PooledBuffer {
132137
fn drop(&mut self) {
133138
if let Some(pool) = self.pool.upgrade() {
134-
let mut buffers = pool.lock();
135-
if buffers.len() < self.max_pool_size && self.buffer.capacity() >= 1024 {
136-
// Only return buffer to pool if it's reasonably sized and pool has space
137-
let mut returned_buffer = std::mem::take(&mut self.buffer);
138-
returned_buffer.clear();
139-
buffers.push_back(returned_buffer);
139+
let returned = {
140+
let mut buffers = pool.lock();
141+
if buffers.len() < self.max_pool_size && self.buffer.capacity() >= 1024 {
142+
// Only return buffer to pool if it's reasonably sized and pool has space
143+
let mut returned_buffer = std::mem::take(&mut self.buffer);
144+
returned_buffer.clear();
145+
buffers.push_back(returned_buffer);
146+
true
147+
} else {
148+
false
149+
}
150+
};
151+
152+
if returned {
140153
debug!("Buffer returned to pool");
141154
}
142155
}

0 commit comments

Comments
 (0)