Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ jobs:
uu_fold,
uu_join,
uu_ls,
uu_chmod,
uu_mv,
uu_nl,
uu_numfmt,
Expand Down
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 9 additions & 0 deletions src/uu/chmod/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,12 @@ uucore = { workspace = true, features = ["safe-traversal"] }
[[bin]]
name = "chmod"
path = "src/main.rs"

[dev-dependencies]
divan = { workspace = true }
tempfile = { workspace = true }
uucore = { workspace = true, features = ["benchmark"] }

[[bench]]
name = "chmod_bench"
harness = false
175 changes: 175 additions & 0 deletions src/uu/chmod/benches/chmod_bench.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
// This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.

use divan::{Bencher, black_box};
use std::alloc::{GlobalAlloc, Layout, System};
use std::sync::atomic::{AtomicUsize, Ordering};
use tempfile::TempDir;
use uu_chmod::uumain;
use uucore::benchmark::{fs_tree, run_util_function};

struct CountingAlloc;

#[global_allocator]
static ALLOC: CountingAlloc = CountingAlloc;

static CURRENT: AtomicUsize = AtomicUsize::new(0);
static PEAK: AtomicUsize = AtomicUsize::new(0);
static ALLOCS: AtomicUsize = AtomicUsize::new(0);
static DEALLOCS: AtomicUsize = AtomicUsize::new(0);

unsafe impl GlobalAlloc for CountingAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ptr = unsafe { System.alloc(layout) };
if !ptr.is_null() {
ALLOCS.fetch_add(1, Ordering::Relaxed);
let size = layout.size();
let current = CURRENT.fetch_add(size, Ordering::Relaxed) + size;
update_peak(current);
}
ptr
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
unsafe { System.dealloc(ptr, layout) };
if !ptr.is_null() {
DEALLOCS.fetch_add(1, Ordering::Relaxed);
CURRENT.fetch_sub(layout.size(), Ordering::Relaxed);
}
}

unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let new_ptr = unsafe { System.realloc(ptr, layout, new_size) };
if !new_ptr.is_null() {
let old_size = layout.size();
if new_size > old_size {
let delta = new_size - old_size;
let current = CURRENT.fetch_add(delta, Ordering::Relaxed) + delta;
update_peak(current);
} else {
CURRENT.fetch_sub(old_size - new_size, Ordering::Relaxed);
}
}
new_ptr
}
}

#[inline]
fn update_peak(current: usize) {
let mut peak = PEAK.load(Ordering::Relaxed);
while current > peak {
match PEAK.compare_exchange_weak(peak, current, Ordering::Relaxed, Ordering::Relaxed) {
Ok(_) => break,
Err(actual) => peak = actual,
}
}
}

#[derive(Clone, Copy)]
struct AllocStats {
current: usize,
peak: usize,
allocs: usize,
deallocs: usize,
}

fn reset_stats_for_interval() -> usize {
let baseline = CURRENT.load(Ordering::Relaxed);
PEAK.store(baseline, Ordering::Relaxed);
ALLOCS.store(0, Ordering::Relaxed);
DEALLOCS.store(0, Ordering::Relaxed);
baseline
}

fn alloc_stats() -> AllocStats {
AllocStats {
current: CURRENT.load(Ordering::Relaxed),
peak: PEAK.load(Ordering::Relaxed),
allocs: ALLOCS.load(Ordering::Relaxed),
deallocs: DEALLOCS.load(Ordering::Relaxed),
}
}

fn mem_enabled() -> bool {
std::env::var_os("UU_CHMOD_MEM").is_some()
}

fn run_chmod(args: &[&str]) {
black_box(run_util_function(uumain, args));
}

fn maybe_report_allocs(label: &str, args: &[&str]) {
if !mem_enabled() {
return;
}

let baseline = reset_stats_for_interval();
run_chmod(args);
let stats = alloc_stats();
let peak_delta = stats.peak.saturating_sub(baseline);
let current_delta = stats.current.saturating_sub(baseline);

eprintln!(
"chmod mem {label}: peak={peak_delta}B current={current_delta}B allocs={allocs} deallocs={deallocs}",
allocs = stats.allocs,
deallocs = stats.deallocs
);
}

#[cfg(unix)]
fn cap_dirs_by_rlimit(total_dirs: usize) -> usize {
use uucore::libc::{RLIM_INFINITY, RLIMIT_NOFILE, getrlimit, rlimit};

let mut lim = rlimit {
rlim_cur: 0,
rlim_max: 0,
};
let lim_ptr: *mut rlimit = &raw mut lim;
let rc = unsafe { getrlimit(RLIMIT_NOFILE, lim_ptr) };
if rc != 0 || lim.rlim_cur == RLIM_INFINITY {
return total_dirs;
}

let headroom = 32;
let cap = lim.rlim_cur.saturating_sub(headroom).max(1) as usize;
total_dirs.min(cap)
}

#[cfg(not(unix))]
fn cap_dirs_by_rlimit(total_dirs: usize) -> usize {
total_dirs
}

fn bench_chmod_recursive(bencher: Bencher, temp_dir: &TempDir, label: &str) {
let temp_path = temp_dir.path().to_str().unwrap();
let args = ["-R", "755", temp_path];

maybe_report_allocs(label, &args);

bencher.bench(|| {
run_chmod(&args);
});
}

#[divan::bench(args = [(2000, 200)])]
fn chmod_recursive_wide_tree(bencher: Bencher, (total_files, total_dirs): (usize, usize)) {
let temp_dir = TempDir::new().unwrap();
let capped_dirs = cap_dirs_by_rlimit(total_dirs);
fs_tree::create_wide_tree(temp_dir.path(), total_files, capped_dirs);
let label = format!("wide files={total_files} dirs={capped_dirs}");
bench_chmod_recursive(bencher, &temp_dir, &label);
}

#[divan::bench(args = [(200, 2)])]
fn chmod_recursive_deep_tree(bencher: Bencher, (depth, files_per_level): (usize, usize)) {
let temp_dir = TempDir::new().unwrap();
fs_tree::create_deep_tree(temp_dir.path(), depth, files_per_level);
let label = format!("deep depth={depth} files_per_level={files_per_level}");
bench_chmod_recursive(bencher, &temp_dir, &label);
}

fn main() {
divan::main();
}
92 changes: 51 additions & 41 deletions src/uu/chmod/src/chmod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ impl Chmoder {
if (!file_path.is_symlink() || should_follow_symlink) && file_path.is_dir() {
match DirFd::open(file_path) {
Ok(dir_fd) => {
r = self.safe_traverse_dir(&dir_fd, file_path).and(r);
r = self.safe_traverse_dir(dir_fd, file_path).and(r);
}
Err(err) => {
// Handle permission denied errors with proper file path context
Expand All @@ -491,57 +491,67 @@ impl Chmoder {
}

#[cfg(all(unix, not(target_os = "redox")))]
fn safe_traverse_dir(&self, dir_fd: &DirFd, dir_path: &Path) -> UResult<()> {
fn safe_traverse_dir(&self, root_fd: DirFd, root_path: &Path) -> UResult<()> {
let mut r = Ok(());

let entries = dir_fd.read_dir()?;
// Depth-first traversal without recursive calls while keeping descriptor usage bounded.
let mut stack: Vec<(Vec<OsString>, PathBuf)> = vec![(Vec::new(), root_path.to_path_buf())];

// Determine if we should follow symlinks (doesn't depend on entry_name)
let should_follow_symlink = self.traverse_symlinks == TraverseSymlinks::All;

for entry_name in entries {
let entry_path = dir_path.join(&entry_name);
while let Some((relative_dir_components, dir_path)) = stack.pop() {
let dir_fd = match root_fd
.open_subdir_chain(relative_dir_components.iter().map(OsString::as_os_str))
{
Ok(fd) => fd,
Err(err) => {
let error = if err.kind() == std::io::ErrorKind::PermissionDenied {
ChmodError::PermissionDenied(dir_path).into()
} else {
err.into()
};
r = r.and(Err(error));
continue;
}
};

let dir_meta = dir_fd.metadata_at(&entry_name, should_follow_symlink);
let Ok(meta) = dir_meta else {
// Handle permission denied with proper file path context
let e = dir_meta.unwrap_err();
let error = if e.kind() == std::io::ErrorKind::PermissionDenied {
ChmodError::PermissionDenied(entry_path).into()
} else {
e.into()
let entries = dir_fd.read_dir()?;

for entry_name in entries {
let entry_path = dir_path.join(&entry_name);

let dir_meta = dir_fd.metadata_at(&entry_name, should_follow_symlink);
let Ok(meta) = dir_meta else {
// Handle permission denied with proper file path context
let e = dir_meta.unwrap_err();
let error = if e.kind() == std::io::ErrorKind::PermissionDenied {
ChmodError::PermissionDenied(entry_path).into()
} else {
e.into()
};
r = r.and(Err(error));
continue;
};
r = r.and(Err(error));
continue;
};

if entry_path.is_symlink() {
r = self
.handle_symlink_during_safe_recursion(&entry_path, dir_fd, &entry_name)
.and(r);
} else {
// For regular files and directories, chmod them
r = self
.safe_chmod_file(&entry_path, dir_fd, &entry_name, meta.mode() & 0o7777)
.and(r);

// Recurse into subdirectories using the existing directory fd
if meta.is_dir() {
match dir_fd.open_subdir(&entry_name) {
Ok(child_dir_fd) => {
r = self.safe_traverse_dir(&child_dir_fd, &entry_path).and(r);
}
Err(err) => {
let error = if err.kind() == std::io::ErrorKind::PermissionDenied {
ChmodError::PermissionDenied(entry_path).into()
} else {
err.into()
};
r = r.and(Err(error));
}
if entry_path.is_symlink() {
r = self
.handle_symlink_during_safe_recursion(&entry_path, &dir_fd, &entry_name)
.and(r);
} else {
// For regular files and directories, chmod them
r = self
.safe_chmod_file(&entry_path, &dir_fd, &entry_name, meta.mode() & 0o7777)
.and(r);

// Queue subdirectories; parent dir_fd can be dropped before processing children.
if meta.is_dir() {
let mut child_components = relative_dir_components.clone();
child_components.push(entry_name.clone());
stack.push((child_components, entry_path.clone()));
}
}
}
// dir_fd is dropped here, releasing its file descriptor before the next depth step.
}
r
}
Expand Down
Loading
Loading