Skip to content

Commit d652c21

Browse files
authored
refactor: centralize progress tracking and optimize logging levels (#75)
- Add ProgressTracker utility for unified progress reporting across download, flash, verify, and decompress operations - Centralize configuration constants in config module (logging intervals, flash delays, paste limits) - Add helper functions: bytes_to_mb(), bytes_to_gb(), strip_compression_ext() - Optimize log levels: use debug for technical details, info for user events - Enable XZ multithreaded optimization (lzma-rust2 optimization feature) This reduces code duplication and improves maintainability by extracting common progress tracking patterns into a reusable component.
1 parent 28baeb0 commit d652c21

File tree

12 files changed

+409
-88
lines changed

12 files changed

+409
-88
lines changed

src-tauri/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ tokio = { version = "1", features = ["full"] }
2222
reqwest = { version = "0.12", default-features = false, features = ["json", "stream", "rustls-tls"] }
2323
futures-util = "0.3"
2424
# Multi-threaded decompression libraries
25-
lzma-rust2 = { version = "0.15", features = ["xz", "std"] }
25+
lzma-rust2 = { version = "0.15", features = ["xz", "std", "optimization"] }
2626
bzip2 = "0.4"
2727
flate2 = "1.0"
2828
zstd = "0.13"

src-tauri/src/config/mod.rs

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,42 @@ pub mod flash {
5454

5555
/// Progress log interval (percentage points)
5656
pub const LOG_INTERVAL_PERCENT: u64 = 6;
57+
58+
/// Delay after unmount before writing (milliseconds)
59+
pub const UNMOUNT_DELAY_MS: u64 = 500;
60+
}
61+
62+
/// Progress logging intervals
63+
pub mod logging {
64+
/// SHA256 calculation buffer size
65+
pub const SHA_BUFFER_SIZE: usize = 8192;
66+
67+
/// SHA256 progress log interval (MB)
68+
pub const SHA_LOG_INTERVAL_MB: u64 = 50;
69+
70+
/// Download progress log interval (MB)
71+
pub const DOWNLOAD_LOG_INTERVAL_MB: u64 = 10;
72+
73+
/// Write progress log interval (MB)
74+
pub const WRITE_LOG_INTERVAL_MB: u64 = 512;
75+
76+
/// Decompression progress log interval (MB)
77+
pub const DECOMPRESS_LOG_INTERVAL_MB: u64 = 100;
78+
79+
/// Linux sync interval for flush operations
80+
pub const LINUX_SYNC_INTERVAL: u64 = 32 * 1024 * 1024;
81+
}
82+
83+
/// Log paste service settings
84+
pub mod paste {
85+
/// Maximum lines to show in truncated preview
86+
pub const TRUNCATE_LINES: usize = 500;
87+
88+
/// Maximum log file size to upload (5 MB)
89+
pub const MAX_LOG_SIZE: u64 = 5 * 1024 * 1024;
90+
91+
/// Maximum log lines to process
92+
pub const MAX_LOG_LINES: usize = 10_000;
5793
}
5894

5995
/// Device detection settings

src-tauri/src/decompress.rs

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ use zstd::stream::read::Decoder as ZstdDecoder;
1717
use crate::config;
1818
use crate::download::DownloadState;
1919
use crate::log_info;
20-
use crate::utils::get_recommended_threads;
20+
use crate::utils::{get_recommended_threads, strip_compression_ext, ProgressTracker};
2121

2222
const MODULE: &str = "decompress";
2323

@@ -104,6 +104,16 @@ fn decompress_with_reader_mt<R: Read>(
104104
BufWriter::with_capacity(config::download::DECOMPRESS_BUFFER_SIZE, output_file);
105105
let mut buffer = vec![0u8; config::download::CHUNK_SIZE];
106106

107+
// Progress tracking - we don't know the decompressed size (0), so track output bytes
108+
// Use config interval for consistent logging
109+
let operation_name = format!("Decompress ({})", format_name);
110+
let mut tracker = ProgressTracker::new(
111+
&operation_name,
112+
MODULE,
113+
0, // Unknown total size for decompression
114+
config::logging::DECOMPRESS_LOG_INTERVAL_MB,
115+
);
116+
107117
loop {
108118
if state.is_cancelled.load(Ordering::SeqCst) {
109119
drop(buf_writer);
@@ -122,12 +132,18 @@ fn decompress_with_reader_mt<R: Read>(
122132
buf_writer
123133
.write_all(&buffer[..bytes_read])
124134
.map_err(|e| format!("Failed to write decompressed data: {}", e))?;
135+
136+
// ProgressTracker handles logging automatically
137+
tracker.update(bytes_read as u64);
125138
}
126139

127140
buf_writer
128141
.flush()
129142
.map_err(|e| format!("Failed to flush output: {}", e))?;
130143

144+
// Log final summary
145+
tracker.finish();
146+
131147
Ok(())
132148
}
133149

@@ -143,11 +159,7 @@ pub fn decompress_local_file(
143159
.ok_or("Invalid filename")?;
144160

145161
// Extract base filename (remove compression extension)
146-
let base_filename = filename
147-
.trim_end_matches(".xz")
148-
.trim_end_matches(".gz")
149-
.trim_end_matches(".bz2")
150-
.trim_end_matches(".zst");
162+
let base_filename = strip_compression_ext(filename);
151163

152164
// Generate unique filename with timestamp to handle concurrent operations
153165
let timestamp = std::time::SystemTime::now()

src-tauri/src/download.rs

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ use tokio::sync::Mutex;
1414

1515
use crate::config;
1616
use crate::decompress::decompress_with_rust_xz;
17+
use crate::utils::{bytes_to_mb, ProgressTracker};
1718
use crate::{log_debug, log_error, log_info, log_warn};
1819

1920
const MODULE: &str = "download";
@@ -72,7 +73,7 @@ fn extract_filename(url: &str) -> Result<&str, String> {
7273

7374
/// Fetch expected SHA256 from URL
7475
async fn fetch_expected_sha(client: &Client, sha_url: &str) -> Result<String, String> {
75-
log_info!(MODULE, "Fetching SHA256 from: {}", sha_url);
76+
log_debug!(MODULE, "Fetching SHA256 from: {}", sha_url);
7677

7778
let response = client
7879
.get(sha_url)
@@ -104,13 +105,13 @@ async fn fetch_expected_sha(client: &Client, sha_url: &str) -> Result<String, St
104105
return Err(format!("Invalid SHA256 hash format: {}", hash));
105106
}
106107

107-
log_info!(MODULE, "Expected SHA256: {}", hash);
108+
log_debug!(MODULE, "Expected SHA256: {}", hash);
108109
Ok(hash)
109110
}
110111

111112
/// Calculate SHA256 of a file
112113
fn calculate_file_sha256(path: &Path, state: &Arc<DownloadState>) -> Result<String, String> {
113-
log_info!(MODULE, "Calculating SHA256 of: {}", path.display());
114+
log_debug!(MODULE, "Calculating SHA256 of: {}", path.display());
114115
log_debug!(
115116
MODULE,
116117
"File size: {:?} bytes",
@@ -150,7 +151,7 @@ fn calculate_file_sha256(path: &Path, state: &Arc<DownloadState>) -> Result<Stri
150151

151152
let result = hasher.finalize();
152153
let hash = format!("{:x}", result);
153-
log_info!(MODULE, "Calculated SHA256: {}", hash);
154+
log_debug!(MODULE, "Calculated SHA256: {}", hash);
154155
Ok(hash)
155156
}
156157

@@ -209,7 +210,7 @@ pub async fn download_image(
209210
let output_path = output_dir.join(output_filename);
210211

211212
log_info!(MODULE, "Download requested: {}", url);
212-
log_info!(MODULE, "Output path: {}", output_path.display());
213+
log_debug!(MODULE, "Output path: {}", output_path.display());
213214

214215
// Check if image is already in cache (also updates mtime for LRU)
215216
if let Some(cached_path) = crate::cache::get_cached_image(output_filename) {
@@ -250,17 +251,23 @@ pub async fn download_image(
250251
MODULE,
251252
"Download size: {} bytes ({:.2} MB)",
252253
total_size,
253-
total_size as f64 / 1024.0 / 1024.0
254+
bytes_to_mb(total_size)
254255
);
255256

256257
// Create temp file for compressed data
257258
let temp_path = output_dir.join(format!("{}.downloading", filename));
258259
let mut temp_file =
259260
File::create(&temp_path).map_err(|e| format!("Failed to create temp file: {}", e))?;
260261

261-
// Download with progress
262+
// Download with progress tracking
262263
let mut stream = response.bytes_stream();
263264
let mut downloaded: u64 = 0;
265+
let mut tracker = ProgressTracker::new(
266+
"Download",
267+
MODULE,
268+
total_size,
269+
config::logging::DOWNLOAD_LOG_INTERVAL_MB,
270+
);
264271

265272
while let Some(chunk) = stream.next().await {
266273
if state.is_cancelled.load(Ordering::SeqCst) {
@@ -277,10 +284,11 @@ pub async fn download_image(
277284

278285
downloaded += chunk.len() as u64;
279286
state.downloaded_bytes.store(downloaded, Ordering::SeqCst);
287+
tracker.update(chunk.len() as u64);
280288
}
281289

282290
drop(temp_file);
283-
log_info!(MODULE, "Download complete: {} bytes", downloaded);
291+
tracker.finish();
284292

285293
// Verify SHA256 if URL provided
286294
if let Some(sha_url) = sha_url {

src-tauri/src/flash/linux/writer.rs

Lines changed: 29 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@ use std::sync::Arc;
1212

1313
use crate::config;
1414
use crate::flash::{sync_device, unmount_device, FlashState};
15-
use crate::{log_error, log_info};
15+
use crate::utils::{bytes_to_gb, ProgressTracker};
16+
use crate::{log_debug, log_error, log_info};
1617

1718
const MODULE: &str = "flash::linux::writer";
1819

@@ -21,7 +22,7 @@ const MODULE: &str = "flash::linux::writer";
2122
async fn open_device_udisks2(device_path: &str) -> Result<File, String> {
2223
use std::collections::HashMap;
2324

24-
log_info!(MODULE, "Opening device via UDisks2: {}", device_path);
25+
log_debug!(MODULE, "Opening device via UDisks2: {}", device_path);
2526

2627
// Create UDisks2 client
2728
let client = udisks2::Client::new()
@@ -35,7 +36,7 @@ async fn open_device_udisks2(device_path: &str) -> Result<File, String> {
3536

3637
let object_path = format!("/org/freedesktop/UDisks2/block_devices/{}", dev_name);
3738

38-
log_info!(MODULE, "UDisks2 object path: {}", object_path);
39+
log_debug!(MODULE, "UDisks2 object path: {}", object_path);
3940

4041
// Get the block device object
4142
let object = client
@@ -56,7 +57,7 @@ async fn open_device_udisks2(device_path: &str) -> Result<File, String> {
5657
.await
5758
.map_err(|e| format!("Failed to open device (polkit auth may have failed): {}", e))?;
5859

59-
log_info!(MODULE, "Device opened successfully via UDisks2");
60+
log_debug!(MODULE, "Device opened successfully via UDisks2");
6061

6162
// Convert the file descriptor to a File
6263
// OwnedFd implements AsRawFd, so we get the raw fd and create a File from it
@@ -72,7 +73,7 @@ async fn open_device_udisks2(device_path: &str) -> Result<File, String> {
7273
fn open_device_direct(device_path: &str) -> Result<File, String> {
7374
use std::fs::OpenOptions;
7475

75-
log_info!(MODULE, "Attempting direct device open: {}", device_path);
76+
log_debug!(MODULE, "Attempting direct device open: {}", device_path);
7677

7778
OpenOptions::new()
7879
.read(true)
@@ -108,23 +109,25 @@ pub async fn flash_image(
108109
MODULE,
109110
"Image size: {} bytes ({:.2} GB)",
110111
image_size,
111-
image_size as f64 / 1024.0 / 1024.0 / 1024.0
112+
bytes_to_gb(image_size)
112113
);
113114

114115
// Unmount the device first
115116
log_info!(MODULE, "Unmounting device partitions...");
116117
unmount_device(device_path)?;
117118

118119
// Small delay to ensure unmount completes
119-
std::thread::sleep(std::time::Duration::from_millis(500));
120+
std::thread::sleep(std::time::Duration::from_millis(
121+
config::flash::UNMOUNT_DELAY_MS,
122+
));
120123

121124
// Try to open device via UDisks2 first (handles polkit auth)
122125
// Fall back to direct open if UDisks2 fails (e.g., if running as root)
123-
log_info!(MODULE, "Opening device for writing...");
126+
log_debug!(MODULE, "Opening device for writing...");
124127
let mut device = match open_device_udisks2(device_path).await {
125128
Ok(file) => file,
126129
Err(e) => {
127-
log_info!(MODULE, "UDisks2 open failed ({}), trying direct open...", e);
130+
log_debug!(MODULE, "UDisks2 open failed ({}), trying direct open...", e);
128131
open_device_direct(device_path)?
129132
}
130133
};
@@ -143,11 +146,18 @@ pub async fn flash_image(
143146
let mut buffer = vec![0u8; chunk_size];
144147
let mut written: u64 = 0;
145148

149+
// Use ProgressTracker for automatic progress logging
150+
let mut tracker = ProgressTracker::new(
151+
"Write",
152+
MODULE,
153+
image_size,
154+
config::logging::WRITE_LOG_INTERVAL_MB,
155+
);
156+
146157
log_info!(MODULE, "Writing image...");
147158

148-
// Sync interval: sync every 32MB to show real progress (not just cache writes)
159+
// Sync interval to show real progress (not just cache writes)
149160
// This ensures the progress bar reflects actual disk writes, not just memory cache
150-
const SYNC_INTERVAL: u64 = 32 * 1024 * 1024;
151161
let mut bytes_since_sync: u64 = 0;
152162

153163
loop {
@@ -172,25 +182,21 @@ pub async fn flash_image(
172182
bytes_since_sync += bytes_read as u64;
173183

174184
// Periodic sync to flush data to disk and show real progress
175-
if bytes_since_sync >= SYNC_INTERVAL {
185+
if bytes_since_sync >= config::logging::LINUX_SYNC_INTERVAL {
176186
unsafe {
177187
libc::fdatasync(device_fd);
178188
}
179189
bytes_since_sync = 0;
180190
state.written_bytes.store(written, Ordering::SeqCst);
181191
}
182192

183-
// Log progress every 512MB
184-
if written % (512 * 1024 * 1024) == 0 {
185-
log_info!(
186-
MODULE,
187-
"Progress: {:.1}%",
188-
(written as f64 / image_size as f64) * 100.0
189-
);
190-
}
193+
// ProgressTracker handles logging automatically
194+
tracker.update(bytes_read as u64);
191195
}
192196

193-
log_info!(MODULE, "Write complete, syncing...");
197+
// Log final summary
198+
tracker.finish();
199+
log_debug!(MODULE, "Syncing...");
194200

195201
// Sync
196202
device.flush().ok();
@@ -228,7 +234,7 @@ fn quick_erase(device: &mut File) -> Result<(), String> {
228234
let erase_size = config::flash::QUICK_ERASE_SIZE;
229235
let chunk_size = config::flash::ERASE_CHUNK_SIZE;
230236

231-
log_info!(
237+
log_debug!(
232238
MODULE,
233239
"Quick erase: writing zeros to first {} MB",
234240
erase_size / (1024 * 1024)
@@ -258,7 +264,7 @@ fn quick_erase(device: &mut File) -> Result<(), String> {
258264
.seek(SeekFrom::Start(0))
259265
.map_err(|e| format!("Failed to seek to start: {}", e))?;
260266

261-
log_info!(MODULE, "Quick erase complete");
267+
log_debug!(MODULE, "Quick erase complete");
262268
Ok(())
263269
}
264270

0 commit comments

Comments
 (0)