Skip to content
428 changes: 187 additions & 241 deletions Cargo.lock

Large diffs are not rendered by default.

8 changes: 3 additions & 5 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,9 @@ rusqlite_migration = { version = "2.3", features = ["from-directory"] }
include_dir = { version = "0.7" }

# Logging
tracing = { version = "0.1", git = "https://github.com/tokio-rs/tracing.git" }
tracing-subscriber = { version = "0.3.20", features = [
"env-filter",
], git = "https://github.com/tokio-rs/tracing.git" }
tracing-appender = { version = "0.2", git = "https://github.com/tokio-rs/tracing.git" }
tracing = { version = "0.1" }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-appender = { version = "0.2" }

# Misc
unicode-segmentation = { version = "1.12" } # Limit preview width by grapheme clusters
Expand Down
4 changes: 4 additions & 0 deletions benches/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,12 @@ use std::sync::LazyLock;
use clipvault::cli::{GetDelArgs, ListArgs, StoreArgs};
use clipvault::commands::{get, list, store};
use clipvault::defaults;
use divan::AllocProfiler;
use tempfile::NamedTempFile;

#[global_allocator]
static ALLOC: AllocProfiler = AllocProfiler::system();

/// Get temporary file for DB.
fn get_temp() -> NamedTempFile {
NamedTempFile::new().expect("couldn't create tempfile")
Expand Down
2 changes: 1 addition & 1 deletion src/cli.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::{path::PathBuf, str::FromStr};

use clap::{Parser, Subcommand, ValueHint, command};
use clap::{Parser, Subcommand, ValueHint};
use regex::Regex;

use crate::defaults;
Expand Down
34 changes: 20 additions & 14 deletions src/commands/list.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use std::{
io::{Cursor, Write, stdout},
io::{BufWriter, Cursor, Write, stdout},
path::Path,
};

Expand Down Expand Up @@ -102,23 +102,29 @@ fn execute_inner(path_db: &Path, args: ListArgs, show_output: bool) -> Result<()
return Ok(());
}

// Combine previews into a single string so that all the output can be written to STDOUT at the same time
let output = entries
let stdout = stdout();
let stdout = stdout.lock();

// [`BufWriter`] for more efficient, buffered writes
let mut writer = BufWriter::with_capacity(8 * 1024, stdout);

for entry in entries
.into_iter()
.map(|entry| preview(entry.id, &entry.content, preview_width))
.collect::<Vec<_>>()
.join("\n");

// Used for benchmarks - don't actually write to stdout
if !show_output {
return Ok(());
{
if show_output {
writer
.write(&entry.into_bytes())
.into_diagnostic()
.context("failed to write to STDOUT")?;
writer
.write(b"\n")
.into_diagnostic()
.context("failed to write to STDOUT")?;
}
}

let mut stdout = stdout().lock();
ignore_broken_pipe(writeln!(&mut stdout, "{output}",))
.into_diagnostic()
.context("failed to write to STDOUT")?;
ignore_broken_pipe(stdout.flush())
ignore_broken_pipe(writer.flush())
.into_diagnostic()
.context("failed to flush STDOUT")?;

Expand Down
9 changes: 9 additions & 0 deletions src/database/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,15 @@ pub fn init_db(path_db: &Path) -> Result<Connection> {
conn.pragma_update(None, "journal_mode", "WAL")
.into_diagnostic()
.context("failed to apply PRAGMA: journal mode")?;
conn.pragma_update(None, "synchronous", "normal")
.into_diagnostic()
.context("failed to apply PRAGMA: synchronous")?;
conn.pragma_update(None, "journal_size_limit", "6144000")
.into_diagnostic()
.context("failed to apply PRAGMA: journal size limit")?;
conn.pragma_update(None, "cache_size", "10000")
.into_diagnostic()
.context("failed to apply PRAGMA: cache size")?;

tracing::trace!("applying migrations");
MIGRATIONS
Expand Down
2 changes: 2 additions & 0 deletions src/database/queries/estimated_free_space.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
SELECT (freelist_count * page_size) AS freelist_size
FROM pragma_freelist_count, pragma_page_size
25 changes: 24 additions & 1 deletion src/database/queries/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,24 @@ pub fn delete_all_entries(conn: &Connection) -> Result<()> {
conn.execute(include_str!("./delete_all.sql"), params![])
.map(|_| ())
.into_diagnostic()
.context("failed to execute: wipe entries")
.context("failed to execute: wipe entries")?;

vacuum(conn)
}

/// Perform a `VACUUM` on the DB, reducing its size by clearing deleted entries and defragmenting.
#[tracing::instrument(skip(conn))]
fn vacuum(conn: &Connection) -> Result<()> {
tracing::debug!("vacuuming DB");

let estimated_free = get_estimated_free_space(conn).unwrap_or(1_000_000);
if estimated_free < 1_000_000 {
tracing::debug!(
"estimated freed space ({estimated_free}) under the threshold - skipping VACUUM"
);
return Ok(());
}

conn.execute("VACUUM;", params![])
.map(|_| ())
.into_diagnostic()
Expand Down Expand Up @@ -110,6 +120,19 @@ pub fn get_entry_by_id(conn: &Connection, id: u64) -> Result<ClipboardEntry> {
.context("couldn't get entry by ID")
}

#[tracing::instrument(skip(conn))]
pub fn get_estimated_free_space(conn: &Connection) -> Result<u64> {
tracing::debug!("getting estimate of space that can be freed");

conn.query_one(
include_str!("./estimated_free_space.sql"),
params![],
|row| row.get("freelist_size"),
)
.into_diagnostic()
.context("couldn't get entry by ID")
}

#[tracing::instrument(skip(conn))]
pub fn delete_entry_by_id(conn: &Connection, id: u64) -> Result<()> {
tracing::debug!("deleting specific entry by ID");
Expand Down
3 changes: 3 additions & 0 deletions src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use std::{
use unicode_segmentation::UnicodeSegmentation;

/// Returns the given number of bytes as a human-readable string representation.
#[must_use]
pub fn human_bytes(mut bytes: usize) -> String {
let unit = if bytes < 1_000 {
"B"
Expand All @@ -24,6 +25,7 @@ pub fn human_bytes(mut bytes: usize) -> String {
}

/// Truncates a string to the given number of characters.
#[must_use]
pub fn truncate(s: &str, max_graphemes: usize) -> Cow<'_, str> {
let graphemes = s.graphemes(true).collect::<Vec<_>>();

Expand All @@ -40,6 +42,7 @@ pub fn truncate(s: &str, max_graphemes: usize) -> Cow<'_, str> {
}

/// Current Unix timestamp in seconds - based on system time.
#[must_use]
pub fn now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
Expand Down
49 changes: 37 additions & 12 deletions tests/cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -357,21 +357,36 @@ fn test_db_shrinks() {
.expect("can't read temp DB metadata")
.size()
};

// Initialise DB with a call don't won't actually add any entries
get_cmd(db).arg("list").assert().success();
let initial_size = get_size();

// Grow DB with many small entries
let store_random = || {
for i in 0..500 {
get_cmd(db)
.arg("store")
// Store enough random data to make the DB grow past the initial size and exceed the VACUUM limit
.write_stdin("random_data".repeat(i))
.assert()
.success();
}
assert!(initial_size < get_size(), "DB size did not increase");
};
// Grow DB with one large entry
let store_large = || {
get_cmd(db)
.arg("store")
// Store enough random data to make the DB grow past the initial size
.write_stdin("random_data".repeat(200))
.args(["store"])
.write_stdin("E".repeat(1_000_000))
.assert()
.success();
assert!(initial_size < get_size(), "DB size did not increase");
};

// Initialise DB with a call don't won't actually add any entries
get_cmd(db).arg("list").assert().success();
let initial_size = get_size();

// DELETE WITH ID
store_random();
store_large();
assert_ne!(initial_size, get_size());
assert!(initial_size < get_size(), "DB size did not increase");

get_cmd(db).args(["delete", "1"]).assert().success();
Expand All @@ -382,7 +397,7 @@ fn test_db_shrinks() {
);

// DELETE WITH RELATIVE INDEXING
store_random();
store_large();
assert!(initial_size < get_size(), "DB size did not increase");

get_cmd(db)
Expand All @@ -397,17 +412,27 @@ fn test_db_shrinks() {

// DELETE WITH EXCEEDED ENTRY COUNT
store_random();
let max_size = get_size();
assert!(initial_size < get_size(), "DB size did not increase");

get_cmd(db)
.args(["store", "--max-entries", "1"])
.write_stdin("test")
.assert()
.success();
assert!(
max_size > get_size(),
assert_eq!(
initial_size,
get_size(),
"DB size did not shrink after deleting entries by exceeding the max entry count"
);

// DELETE ALL
store_random();
get_cmd(db).arg("clear").assert().success();
assert_eq!(
initial_size,
get_size(),
"DB did not shrink after deleting all entries"
);
}

// PROP TESTS
Expand Down
Loading