Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -61,21 +61,21 @@ blake2b_simd = { version = "1", optional = true }
chrono = { version = "0.4", features = ["serde"] }
# Having dev dependencies as optional makes it possible to
# build verneuil's examples out of tree.
clap = { version = "3", optional = true }
clap = { version = "4", optional = true }
crossbeam-channel = "0.5"
derivative = "2.2.0"
governor = { version = "0.4", default-features = false, features = ["std", "jitter"] } # 0.4 doesn't build without "jitter"
itertools = "0.10"
derivative = "2"
governor = { version = "0.6", default-features = false, features = ["std"] }
itertools = "0.13"
kismet-cache = "0.2"
lazy_static = "1"
libc = "0.2"
lru = { version = "0.7", default-features = false } # Disable hashbrown
memmap2 = "0.5"
lru = { version = "0.12", default-features = false } # Disable hashbrown
memmap2 = "0.7"
percent-encoding = "2"
prost = "0.9"
prost = "0.13"
quinine = "0.2"
rand = "0.8"
rayon = "1.5"
rayon = "1"
regex = "1"
rust-s3 = { version = "0.28", default-features = false, features = ["tokio-rustls-tls", "blocking"] }
serde = { version = "1.0", features = ["derive"] }
Expand All @@ -85,15 +85,15 @@ tokio = { version = "1", features = ["rt", "time"], default-features = false }
tracing = "0.1"
tracing-subscriber = { version = "0.3", optional = true, features = ["tracing-log"] } # Also collect log! calls.
uluru = "3"
umash = "0.4"
uuid = { version = "0.8", features = ["v4"] }
zstd = "0.11"
umash = "0.6"
uuid = { version = "1", features = ["v4"] }
zstd = "0.13"

[dev-dependencies]
rusqlite = { version = "0.26" } # For the sample rusqlite_integration.
tracing-subscriber = { version = "0.3", features = ["env-filter", "tracing-log"] } # Examples set up tracing.

clap = { version = "3", features = ["derive"] }
clap = { version = "4", features = ["derive"] }
test_dir = "0.2"

[build-dependencies]
Expand Down
14 changes: 5 additions & 9 deletions examples/verneuilctl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,13 @@ struct Restore {

/// The path to the source file that was replicated by Verneuil,
/// when it ran on `--hostname`.
#[clap(short, long, parse(from_os_str))]
#[clap(short, long)]
source_path: Option<PathBuf>,

/// The path to the reconstructed output file.
///
/// Defaults to stdout.
#[clap(short, long, parse(from_os_str))]
#[clap(short, long)]
out: Option<PathBuf>,
}

Expand Down Expand Up @@ -181,7 +181,6 @@ fn restore(cmd: Restore, config: Options) -> Result<()> {
/// the corresponding manifest file to stdout.
struct ManifestName {
/// The path to the source file that was replicated by Verneuil.
#[clap(parse(from_os_str))]
source: PathBuf,

/// The hostname (/etc/hostname) of the machine that replicated
Expand All @@ -205,7 +204,6 @@ fn manifest_name(cmd: ManifestName) -> Result<()> {
/// of the corresponding manifest file to `--out`, or stdout by default.
struct Manifest {
/// The path to the source file that was replicated by Verneuil.
#[clap(parse(from_os_str))]
source: PathBuf,

/// The hostname (/etc/hostname) of the machine that replicated
Expand All @@ -216,7 +214,7 @@ struct Manifest {
/// The path to the output manifest file.
///
/// Defaults to stdout.
#[clap(short, long, parse(from_os_str))]
#[clap(short, long)]
out: Option<PathBuf>,
}

Expand All @@ -239,7 +237,6 @@ fn manifest(cmd: Manifest, config: Options) -> Result<()> {
/// attempts to upload all the files pending replication in that directory.
struct Flush {
/// The replication spooling directory prefix.
#[clap(parse(from_os_str))]
spooling: PathBuf,
}

Expand All @@ -254,7 +251,6 @@ fn flush(cmd: Flush) -> Result<()> {
/// On success, prints the manifest name to stdout.
struct Sync {
/// The source sqlite database file.
#[clap(parse(from_os_str))]
source: PathBuf,

/// Whether to optimize the database before uploading it.
Expand Down Expand Up @@ -314,7 +310,7 @@ fn sync(cmd: Sync, config: Options) -> Result<()> {
}

#[derive(Debug, Parser)]
#[clap(setting = clap::AppSettings::TrailingVarArg)]
#[clap(trailing_var_arg = true)]
/// The verneuilctl shell utility accepts the path to a verneuil
/// manifest file, and opens the snapshot it describes in the sqlite
/// shell.
Expand Down Expand Up @@ -346,7 +342,7 @@ struct Shell {

/// The path to the source file that was replicated by Verneuil,
/// when it ran on `--hostname`.
#[clap(short, long, parse(from_os_str))]
#[clap(short, long)]
source_path: Option<PathBuf>,

/// The path to the sqlite3 shell executable; defaults to
Expand Down
10 changes: 7 additions & 3 deletions src/copier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@
//! asynchronously acquiring the current "ready" buffer in any number
//! of replication directories, and sending the ready snapshot to
//! object stores like S3.
use core::num::NonZeroU32;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::ffi::OsStr;
use std::fs::File;
use std::future;
use std::future::Future;
use std::io::ErrorKind;
use std::num::NonZeroU32;
use std::num::NonZeroUsize;
use std::path::Path;
use std::path::PathBuf;
use std::sync::atomic::AtomicBool;
Expand Down Expand Up @@ -110,8 +111,11 @@ const COPY_REQUEST_JITTER: Duration = Duration::from_secs(600);
/// we expect during `COPY_REQUEST_MIN_AGE` to avoid spuriously losing
/// useful entries to LRU... but we still want a bound on the capacity,
/// because unbounded data structures are a bad idea.
const COPY_REQUEST_MEMORY: usize =
(1.5 * (COPY_RATE.get() as f64) * (COPY_REQUEST_MIN_AGE.as_secs() as f64)) as usize;
const COPY_REQUEST_MEMORY: NonZeroUsize = unsafe {
NonZeroUsize::new_unchecked(
1 + (1.5 * (COPY_RATE.get() as f64) * (COPY_REQUEST_MIN_AGE.as_secs() as f64)) as usize,
)
};

/// Perform background work for one spooling directory approximately
/// once per BACKGROUND_SCAN_PERIOD.
Expand Down
3 changes: 2 additions & 1 deletion src/loader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::ErrorKind;
use std::num::NonZeroUsize;
use std::path::Path;
use std::sync::Arc;
use std::sync::Mutex;
Expand Down Expand Up @@ -53,7 +54,7 @@ const LOAD_RETRY_MULTIPLIER: f64 = 10.0;

/// Keep up to this many cached `Chunk`s alive, regardless of whether
/// there is any external strong reference to them.
const LRU_CACHE_SIZE: usize = 128;
const LRU_CACHE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(128) };

/// Load chunks in a dedicated thread pool of this size.
const LOADER_POOL_SIZE: usize = 10;
Expand Down
4 changes: 2 additions & 2 deletions src/manifest_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,7 @@ pub(crate) fn extract_version_id(
#[instrument(err)]
pub(crate) fn update_version_id(file: &File, cached_uuid: Option<Uuid>) -> Result<()> {
use std::os::unix::io::AsRawFd;
use uuid::adapter::Hyphenated;
use uuid::fmt::Hyphenated;

extern "C" {
fn verneuil__setxattr(fd: i32, name: *const i8, buf: *const u8, bufsz: usize) -> isize;
Expand All @@ -513,7 +513,7 @@ pub(crate) fn update_version_id(file: &File, cached_uuid: Option<Uuid>) -> Resul
let mut buf = [0u8; Hyphenated::LENGTH];
let tag = cached_uuid
.unwrap_or_else(Uuid::new_v4)
.to_hyphenated()
.hyphenated()
.encode_lower(&mut buf);

#[cfg(feature = "no_xattr")]
Expand Down
3 changes: 2 additions & 1 deletion src/recent_work_set.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::num::NonZeroUsize;
use std::time::Duration;
use std::time::Instant;

Expand Down Expand Up @@ -61,7 +62,7 @@ impl RecentWorkSet {
/// to `offset_range` will be subtracted from the work unit's
/// start timestamp. This avoids thundering herds by ensuring
/// work units expire at different times.
pub fn new(capacity: usize, offset_range: Duration) -> RecentWorkSet {
pub fn new(capacity: NonZeroUsize, offset_range: Duration) -> RecentWorkSet {
RecentWorkSet {
recent: lru::LruCache::new(capacity),
offset_range,
Expand Down