Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo llvm-cov --workspace --lcov --output-path lcov.info
run: cargo llvm-cov --workspace --lcov --output-path lcov.info --all-features
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
env:
Expand Down
54 changes: 7 additions & 47 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,36 +2,10 @@ on: [push, pull_request]

name: Run tests

jobs:
check:
name: Check
strategy:
matrix:
rust:
- stable
- nightly
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2

- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ matrix.rust }}
override: true

- name: Run cargo check nightly features
if: ${{ matrix.rust == 'nightly' }}
uses: actions-rs/cargo@v1
with:
command: check
- name: Run cargo check
uses: actions-rs/cargo@v1
with:
command: check
env:
CARGO_TERM_COLOR: always

jobs:
test:
name: Test Suite
runs-on: ubuntu-latest
Expand All @@ -42,21 +16,7 @@ jobs:
- nightly
steps:
- name: Checkout sources
uses: actions/checkout@v2

- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true

- name: Run cargo test with nightly features
if: ${{ matrix.rust == 'nightly' }}
uses: actions-rs/cargo@v1
with:
command: test
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
uses: actions/checkout@v4
- run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }}
- run: cargo build --verbose --all-features
- run: cargo test --verbose --all-features
30 changes: 26 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ authors = [
"Adrian Seyboldt <[email protected]>",
"PyMC Developers <[email protected]>",
]
edition = "2021"
edition = "2024"
license = "MIT"
repository = "https://github.com/pymc-devs/nuts-rs"
keywords = ["statistics", "bayes"]
Expand All @@ -22,22 +22,44 @@ rand = { version = "0.9.0", features = ["small_rng"] }
rand_distr = "0.5.0"
itertools = "0.14.0"
thiserror = "2.0.3"
arrow = { version = "55.1.0", default-features = false, features = ["ffi"] }
rand_chacha = "0.9.0"
anyhow = "1.0.72"
faer = { version = "0.22.6", default-features = false, features = ["linalg"] }
pulp = "0.21.4"
rayon = "1.10.0"
zarrs = { version = "0.21.0", features = [
"filesystem",
"gzip",
"sharding",
"async",
], optional = true }
ndarray = { version = "0.16.1", optional = true }
nuts-derive = { path = "./nuts-derive" }
nuts-storable = { path = "./nuts-storable" }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.0", features = ["rt"], optional = true }

[dev-dependencies]
proptest = "1.6.0"
pretty_assertions = "1.4.0"
criterion = "0.6.0"
criterion = "0.7.0"
nix = { version = "0.30.0", features = ["sched"] }
approx = "0.5.1"
ndarray = "0.16.1"
equator = "0.4.2"
serde_json = "1.0"
ndarray = "0.16.1"
tempfile = "3.0"
zarrs_object_store = "0.4.3"
object_store = "0.12.0"
tokio = { version = "1.0", features = ["rt", "rt-multi-thread"] }

[features]
zarr = ["dep:zarrs", "dep:tokio"]
ndarray = ["dep:ndarray"]

[[bench]]
name = "sample"
harness = false

[workspace]
27 changes: 24 additions & 3 deletions benches/sample.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
use std::hint::black_box;

use criterion::{criterion_group, criterion_main, Criterion};
use nix::sched::{sched_setaffinity, CpuSet};
use criterion::{Criterion, criterion_group, criterion_main};
use nix::sched::{CpuSet, sched_setaffinity};
use nix::unistd::Pid;
use nuts_rs::{Chain, CpuLogpFunc, CpuMath, LogpError, Math, Settings};
use nuts_storable::HasDims;
use rand::SeedableRng;
use rayon::ThreadPoolBuilder;
use thiserror::Error;
Expand All @@ -22,11 +23,20 @@ impl LogpError for PosteriorLogpError {
}
}

impl HasDims for PosteriorDensity {
fn dim_sizes(&self) -> std::collections::HashMap<String, u64> {
vec![("unconstrained_parameter".to_string(), self.dim() as u64)]
.into_iter()
.collect()
}
}

impl CpuLogpFunc for PosteriorDensity {
type LogpError = PosteriorLogpError;
type ExpandedVector = Vec<f64>;

// Only used for transforming adaptation.
type TransformParams = ();
type FlowParameters = ();

// We define a 10 dimensional normal distribution
fn dim(&self) -> usize {
Expand All @@ -48,6 +58,17 @@ impl CpuLogpFunc for PosteriorDensity {
.sum();
return Ok(logp);
}

fn expand_vector<R>(
&mut self,
_rng: &mut R,
array: &[f64],
) -> Result<Self::ExpandedVector, nuts_rs::CpuMathError>
where
R: rand::Rng + ?Sized,
{
Ok(array.to_vec())
}
}

fn make_sampler(dim: usize) -> impl Chain<CpuMath<PosteriorDensity>> {
Expand Down
178 changes: 178 additions & 0 deletions examples/adam_adaptation.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
//! Example demonstrating the Adam optimizer for step size adaptation.
//!
//! This example shows how to use the Adam optimizer instead of dual averaging
//! for adapting the step size in NUTS.

use nuts_rs::{
AdamOptions, Chain, CpuLogpFunc, CpuMath, DiagGradNutsSettings, LogpError, Settings,
StepSizeAdaptMethod,
};
use nuts_storable::HasDims;
use thiserror::Error;

// Define a function that computes the unnormalized posterior density
// and its gradient.
#[derive(Debug)]
struct PosteriorDensity {}

// The density might fail in a recoverable or non-recoverable manner...
#[derive(Debug, Error)]
enum PosteriorLogpError {}
impl LogpError for PosteriorLogpError {
fn is_recoverable(&self) -> bool {
false
}
}

impl HasDims for PosteriorDensity {
fn dim_sizes(&self) -> std::collections::HashMap<String, u64> {
vec![("unconstrained_parameter".to_string(), self.dim() as u64)]
.into_iter()
.collect()
}
}

impl CpuLogpFunc for PosteriorDensity {
type LogpError = PosteriorLogpError;
type ExpandedVector = Vec<f64>;

// Only used for transforming adaptation.
type FlowParameters = ();

// We define a 10 dimensional normal distribution
fn dim(&self) -> usize {
10
}

// The normal likelihood with mean 3 and its gradient.
fn logp(&mut self, position: &[f64], grad: &mut [f64]) -> Result<f64, Self::LogpError> {
let mu = 3f64;
let logp = position
.iter()
.copied()
.zip(grad.iter_mut())
.map(|(x, grad)| {
let diff = x - mu;
*grad = -diff;
-diff * diff / 2f64
})
.sum();
return Ok(logp);
}

fn expand_vector<R>(
&mut self,
_rng: &mut R,
array: &[f64],
) -> Result<Self::ExpandedVector, nuts_rs::CpuMathError>
where
R: rand::Rng + ?Sized,
{
Ok(array.to_vec())
}
}

fn main() {
println!("Running NUTS with Adam step size adaptation...");

// Create sampler settings with Adam optimizer
let mut settings = DiagGradNutsSettings::default();

// Configure for Adam adaptation
settings
.adapt_options
.step_size_settings
.adapt_options
.method = StepSizeAdaptMethod::Adam;

// Set Adam options
let adam_options = AdamOptions {
beta1: 0.9,
beta2: 0.999,
epsilon: 1e-8,
learning_rate: 0.05,
};

settings.adapt_options.step_size_settings.adapt_options.adam = adam_options;

// Standard MCMC settings
settings.num_tune = 1000;
settings.num_draws = 1000;
settings.maxdepth = 10;

// Create the posterior density function
let logp_func = PosteriorDensity {};
let math = CpuMath::new(logp_func);

// Initialize the sampler
let chain = 0;
let mut rng = rand::rng();
let mut sampler = settings.new_chain(chain, math, &mut rng);

// Set initial position
let initial_position = vec![0f64; 10];
sampler
.set_position(&initial_position)
.expect("Unrecoverable error during init");

// Collect samples
let mut trace = vec![];
let mut stats = vec![];

// Sampling with progress reporting
println!("Warmup phase:");
for i in 0..settings.num_tune {
if i % 100 == 0 {
println!("\rWarmup: {}/{}", i, settings.num_tune);
}

let (draw, info) = sampler.draw().expect("Unrecoverable error during sampling");
println!("{:?}", info.step_size);
trace.push(draw);
stats.push(info);
}
println!("\rWarmup: {}/{}", settings.num_tune, settings.num_tune);

println!("\nSampling phase:");
for i in 0..settings.num_draws {
if i % 100 == 0 {
print!("\rSampling: {}/{}", i, settings.num_draws);
}

let (draw, info) = sampler.draw().expect("Unrecoverable error during sampling");
trace.push(draw);
stats.push(info);
}
println!("\rSampling: {}/{}", settings.num_draws, settings.num_draws);

// Calculate mean of samples (post-warmup)
let warmup_samples = settings.num_tune as usize;
let mut means = vec![0.0; 10];

for i in warmup_samples..trace.len() {
for (j, mean) in means.iter_mut().enumerate() {
*mean += trace[i][j];
}
}

for mean in &mut means {
*mean /= settings.num_draws as f64;
}

// Print results
println!("\nResults after {} samples:", settings.num_draws);
println!("Target mean: 3.0 for all dimensions");
println!("Estimated means:");
for (i, mean) in means.iter().enumerate() {
println!("Dimension {}: {:.4}", i, mean);
}

// Print adaptation statistics
let last_stats = &stats[stats.len() - 1];
println!("\nFinal adaptation statistics:");
println!("Step size: {:.6}", last_stats.step_size);
// Note: the full acceptance stats are in the Progress struct, but we don't have direct access to mean_tree_accept
println!("Number of steps: {}", last_stats.num_steps);

println!("\nSampling completed successfully!");
}
Loading
Loading