diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index d156f35..4441bbf 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -1,20 +1,20 @@ name: Security audit - on: schedule: - - cron: '40 10 * * *' + - cron: "0 0 * * *" push: paths: - - '**/Cargo.toml' - - '**/Cargo.lock' - - '**/audit.yml' + - "**/Cargo.toml" + - "**/Cargo.lock" + - "**/audit.yml" pull_request: - jobs: audit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions-rs/audit-check@v1 + - uses: actions/checkout@v4 + - name: generate Cargo.lock + run: cargo generate-lockfile + - uses: rustsec/audit-check@v2 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5156f8c..6d01189 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -5,18 +5,18 @@ on: [pull_request, push] jobs: coverage: runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v3 - - name: Set nightly - run: rustup override set nightly - - name: Install llvm-tools-preview - run: rustup component add llvm-tools-preview + - uses: actions/checkout@v4 + - name: Install Rust + run: rustup update stable - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov - name: Generate code coverage - run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info + run: cargo llvm-cov --workspace --lcov --output-path lcov.info - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 with: files: lcov.info fail_ci_if_error: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 37eaf65..2a53edc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ All notable changes to this project will be documented in this file. +## [0.15.0] - 2025-02-12 + +### Miscellaneous Tasks + +- Update dependencies (Adrian Seyboldt) + + +### Ci + +- Update coverage and audit ci (Adrian Seyboldt) + + ## [0.14.0] - 2024-12-12 ### Documentation @@ -24,6 +36,8 @@ All notable changes to this project will be documented in this file. - Update multiversion (Adrian Seyboldt) +- Update version and changelog (Adrian Seyboldt) + ### Refactor diff --git a/Cargo.toml b/Cargo.toml index ddbf95b..29d00dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nuts-rs" -version = "0.14.0" +version = "0.15.0" authors = [ "Adrian Seyboldt ", "PyMC Developers ", @@ -18,16 +18,20 @@ opt-level = 2 codegen-units = 1 [dependencies] -rand = { version = "0.8.5", features = ["small_rng"] } -rand_distr = "0.4.3" +rand = { version = "0.9.0", features = ["small_rng"] } +rand_distr = "0.5.0" multiversion = "0.8.0" -itertools = "0.13.0" +itertools = "0.14.0" thiserror = "2.0.3" -arrow = { version = "53.1.0", default-features = false, features = ["ffi"] } -rand_chacha = "0.3.1" +arrow = { version = "54.1.0", default-features = false, features = ["ffi"] } +rand_chacha = "0.9.0" anyhow = "1.0.72" -faer = { version = "0.19.4", default-features = false, features = ["std"] } -pulp = "0.20.1" +faer = { version = "0.21.4", default-features = false, features = [ + "std", + "npy", + "linalg", +] } +pulp = "0.21.4" rayon = "1.10.0" [dev-dependencies] @@ -37,6 +41,7 @@ criterion = "0.5.1" nix = "0.29.0" approx = "0.5.1" ndarray = "0.16.1" +equator = "0.4.2" [[bench]] name = "sample" diff --git a/src/cpu_math.rs b/src/cpu_math.rs index 7305c23..4877ea1 100644 --- a/src/cpu_math.rs +++ b/src/cpu_math.rs @@ -13,27 +13,12 @@ use crate::{ pub struct CpuMath { logp_func: F, arch: pulp::Arch, - _parallel: faer::Parallelism<'static>, } impl CpuMath { pub fn new(logp_func: F) -> Self { let arch = pulp::Arch::new(); - let parallel = faer::Parallelism::None; - Self { - logp_func, - arch, - _parallel: parallel, - } - } - - pub fn with_parallel(logp_func: F, parallel: faer::Parallelism<'static>) -> Self { - let arch = pulp::Arch::new(); - Self { - logp_func, - arch, - _parallel: parallel, - } + Self { logp_func, arch } } } @@ -65,8 +50,9 @@ impl Math for CpuMath { let mut vectors: Mat = Mat::zeros(ndim, nvecs); vectors.col_iter_mut().zip_eq(vals).for_each(|(col, vals)| { - col.try_as_slice_mut() + col.try_as_col_major_mut() .expect("Array is not contiguous") + .as_slice_mut() .copy_from_slice(vals) }); @@ -75,7 +61,11 @@ impl Math for CpuMath { fn new_eig_values(&mut self, vals: &[f64]) -> Self::EigValues { let mut values: Col = Col::zeros(vals.len()); - values.as_slice_mut().copy_from_slice(vals); + values + .try_as_col_major_mut() + .expect("Array is not contiguous") + .as_slice_mut() + .copy_from_slice(vals); values } @@ -84,8 +74,16 @@ impl Math for CpuMath { position: &Self::Vector, gradient: &mut Self::Vector, ) -> Result { - self.logp_func - .logp(position.as_slice(), gradient.as_slice_mut()) + self.logp_func.logp( + position + .try_as_col_major() + .expect("Array is not contiguous") + .as_slice(), + gradient + .try_as_col_major_mut() + .expect("Array is not contiguous") + .as_slice_mut(), + ) } fn logp(&mut self, position: &[f64], gradient: &mut [f64]) -> Result { @@ -105,11 +103,11 @@ impl Math for CpuMath { y: &Self::Vector, ) -> (f64, f64) { scalar_prods3( - positive1.as_slice(), - negative1.as_slice(), - positive2.as_slice(), - x.as_slice(), - y.as_slice(), + positive1.try_as_col_major().unwrap().as_slice(), + negative1.try_as_col_major().unwrap().as_slice(), + positive2.try_as_col_major().unwrap().as_slice(), + x.try_as_col_major().unwrap().as_slice(), + y.try_as_col_major().unwrap().as_slice(), ) } @@ -121,27 +119,32 @@ impl Math for CpuMath { y: &Self::Vector, ) -> (f64, f64) { scalar_prods2( - positive1.as_slice(), - positive2.as_slice(), - x.as_slice(), - y.as_slice(), + positive1.try_as_col_major().unwrap().as_slice(), + positive2.try_as_col_major().unwrap().as_slice(), + x.try_as_col_major().unwrap().as_slice(), + y.try_as_col_major().unwrap().as_slice(), ) } fn sq_norm_sum(&mut self, x: &Self::Vector, y: &Self::Vector) -> f64 { - x.as_slice() + x.try_as_col_major() + .unwrap() + .as_slice() .iter() - .zip(y.as_slice()) + .zip(y.try_as_col_major().unwrap().as_slice()) .map(|(&x, &y)| (x + y) * (x + y)) .sum() } fn read_from_slice(&mut self, dest: &mut Self::Vector, source: &[f64]) { - dest.as_slice_mut().copy_from_slice(source); + dest.try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .copy_from_slice(source); } fn write_to_slice(&mut self, source: &Self::Vector, dest: &mut [f64]) { - dest.copy_from_slice(source.as_slice()) + dest.copy_from_slice(source.try_as_col_major().unwrap().as_slice()) } fn copy_into(&mut self, array: &Self::Vector, dest: &mut Self::Vector) { @@ -149,24 +152,37 @@ impl Math for CpuMath { } fn axpy_out(&mut self, x: &Self::Vector, y: &Self::Vector, a: f64, out: &mut Self::Vector) { - axpy_out(x.as_slice(), y.as_slice(), a, out.as_slice_mut()); + axpy_out( + x.try_as_col_major().unwrap().as_slice(), + y.try_as_col_major().unwrap().as_slice(), + a, + out.try_as_col_major_mut().unwrap().as_slice_mut(), + ); } fn axpy(&mut self, x: &Self::Vector, y: &mut Self::Vector, a: f64) { - axpy(x.as_slice(), y.as_slice_mut(), a); + axpy( + x.try_as_col_major().unwrap().as_slice(), + y.try_as_col_major_mut().unwrap().as_slice_mut(), + a, + ); } fn fill_array(&mut self, array: &mut Self::Vector, val: f64) { - array.fill(val); + faer::zip!(array).for_each(|faer::unzip!(pos)| *pos = val); } fn array_all_finite(&mut self, array: &Self::Vector) -> bool { - array.is_all_finite() + let mut ok = true; + faer::zip!(array).for_each(|faer::unzip!(val)| ok = ok & val.is_finite()); + ok } fn array_all_finite_and_nonzero(&mut self, array: &Self::Vector) -> bool { self.arch.dispatch(|| { array + .try_as_col_major() + .unwrap() .as_slice() .iter() .all(|&x| x.is_finite() & (x != 0f64)) @@ -179,7 +195,11 @@ impl Math for CpuMath { array2: &Self::Vector, dest: &mut Self::Vector, ) { - multiply(array1.as_slice(), array2.as_slice(), dest.as_slice_mut()) + multiply( + array1.try_as_col_major().unwrap().as_slice(), + array2.try_as_col_major().unwrap().as_slice(), + dest.try_as_col_major_mut().unwrap().as_slice_mut(), + ) } fn array_mult_eigs( @@ -190,16 +210,19 @@ impl Math for CpuMath { vecs: &Self::EigVectors, vals: &Self::EigValues, ) { - let rhs = stds.column_vector_as_diagonal() * rhs; + let rhs = stds.as_diagonal() * rhs; let trafo = vecs.transpose() * (&rhs); - let inner_prod = vecs * (vals.column_vector_as_diagonal() * (&trafo) - (&trafo)) + rhs; - let scaled = stds.column_vector_as_diagonal() * inner_prod; + let inner_prod = vecs * (vals.as_diagonal() * (&trafo) - (&trafo)) + rhs; + let scaled = stds.as_diagonal() * inner_prod; let _ = replace(dest, scaled); } fn array_vector_dot(&mut self, array1: &Self::Vector, array2: &Self::Vector) -> f64 { - vector_dot(array1.as_slice(), array2.as_slice()) + vector_dot( + array1.try_as_col_major().unwrap().as_slice(), + array2.try_as_col_major().unwrap().as_slice(), + ) } fn array_gaussian( @@ -209,9 +232,11 @@ impl Math for CpuMath { stds: &Self::Vector, ) { let dist = rand_distr::StandardNormal; - dest.as_slice_mut() + dest.try_as_col_major_mut() + .unwrap() + .as_slice_mut() .iter_mut() - .zip(stds.as_slice().iter()) + .zip(stds.try_as_col_major().unwrap().as_slice().iter()) .for_each(|(p, &s)| { let norm: f64 = rng.sample(dist); *p = s * norm; @@ -228,14 +253,18 @@ impl Math for CpuMath { ) { let mut draw: Col = Col::zeros(self.dim()); let dist = rand_distr::StandardNormal; - draw.as_slice_mut().iter_mut().for_each(|p| { - *p = rng.sample(dist); - }); + draw.try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut() + .for_each(|p| { + *p = rng.sample(dist); + }); let trafo = vecs.transpose() * (&draw); - let inner_prod = vecs * (vals.column_vector_as_diagonal() * (&trafo) - (&trafo)) + draw; + let inner_prod = vecs * (vals.as_diagonal() * (&trafo) - (&trafo)) + draw; - let scaled = scale.column_vector_as_diagonal() * inner_prod; + let scaled = scale.as_diagonal() * inner_prod; let _ = replace(dest, scaled); } @@ -249,9 +278,16 @@ impl Math for CpuMath { ) { self.arch.dispatch(|| { izip!( - mean.as_slice_mut().iter_mut(), - variance.as_slice_mut().iter_mut(), - value.as_slice() + mean.try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + variance + .try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + value.try_as_col_major().unwrap().as_slice() ) .for_each(|(mean, var, x)| { let diff = x - *mean; @@ -272,9 +308,17 @@ impl Math for CpuMath { ) { self.arch.dispatch(|| { izip!( - variance_out.as_slice_mut().iter_mut(), - inv_std.as_slice_mut().iter_mut(), - draw_var.as_slice().iter(), + variance_out + .try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + inv_std + .try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + draw_var.try_as_col_major().unwrap().as_slice().iter(), ) .for_each(|(var_out, inv_std_out, &draw_var)| { let draw_var = draw_var * scale; @@ -303,10 +347,18 @@ impl Math for CpuMath { ) { self.arch.dispatch(|| { izip!( - variance_out.as_slice_mut().iter_mut(), - inv_std.as_slice_mut().iter_mut(), - draw_var.as_slice().iter(), - grad_var.as_slice().iter(), + variance_out + .try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + inv_std + .try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + draw_var.try_as_col_major().unwrap().as_slice().iter(), + grad_var.try_as_col_major().unwrap().as_slice().iter(), ) .for_each(|(var_out, inv_std_out, &draw_var, &grad_var)| { let val = (draw_var / grad_var).sqrt(); @@ -334,9 +386,17 @@ impl Math for CpuMath { ) { self.arch.dispatch(|| { izip!( - variance_out.as_slice_mut().iter_mut(), - inv_std.as_slice_mut().iter_mut(), - gradient.as_slice().iter(), + variance_out + .try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + inv_std + .try_as_col_major_mut() + .unwrap() + .as_slice_mut() + .iter_mut(), + gradient.try_as_col_major().unwrap().as_slice().iter(), ) .for_each(|(var_out, inv_std_out, &grad_var)| { let val = grad_var.abs().clamp(clamp.0, clamp.1).recip(); @@ -348,7 +408,12 @@ impl Math for CpuMath { } fn eigs_as_array(&mut self, source: &Self::EigValues) -> Box<[f64]> { - source.as_slice().to_vec().into() + source + .try_as_col_major() + .unwrap() + .as_slice() + .to_vec() + .into() } fn inv_transform_normalize( @@ -361,10 +426,22 @@ impl Math for CpuMath { ) -> Result { self.logp_func.inv_transform_normalize( params, - untransformed_position.as_slice(), - untransofrmed_gradient.as_slice(), - transformed_position.as_slice_mut(), - transformed_gradient.as_slice_mut(), + untransformed_position + .try_as_col_major() + .unwrap() + .as_slice(), + untransofrmed_gradient + .try_as_col_major() + .unwrap() + .as_slice(), + transformed_position + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), + transformed_gradient + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), ) } @@ -378,10 +455,22 @@ impl Math for CpuMath { ) -> Result<(f64, f64), Self::LogpErr> { self.logp_func.init_from_untransformed_position( params, - untransformed_position.as_slice(), - untransformed_gradient.as_slice_mut(), - transformed_position.as_slice_mut(), - transformed_gradient.as_slice_mut(), + untransformed_position + .try_as_col_major() + .unwrap() + .as_slice(), + untransformed_gradient + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), + transformed_position + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), + transformed_gradient + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), ) } @@ -395,10 +484,19 @@ impl Math for CpuMath { ) -> Result<(f64, f64), Self::LogpErr> { self.logp_func.init_from_transformed_position( params, - untransformed_position.as_slice_mut(), - untransformed_gradient.as_slice_mut(), - transformed_position.as_slice(), - transformed_gradient.as_slice_mut(), + untransformed_position + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), + untransformed_gradient + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), + transformed_position.try_as_col_major().unwrap().as_slice(), + transformed_gradient + .try_as_col_major_mut() + .unwrap() + .as_slice_mut(), ) } @@ -412,8 +510,8 @@ impl Math for CpuMath { ) -> Result<(), Self::LogpErr> { self.logp_func.update_transformation( rng, - untransformed_positions.map(|x| x.as_slice()), - untransformed_gradients.map(|x| x.as_slice()), + untransformed_positions.map(|x| x.try_as_col_major().unwrap().as_slice()), + untransformed_gradients.map(|x| x.try_as_col_major().unwrap().as_slice()), untransformed_logp, params, ) @@ -428,8 +526,14 @@ impl Math for CpuMath { ) -> Result { self.logp_func.new_transformation( rng, - untransformed_position.as_slice(), - untransfogmed_gradient.as_slice(), + untransformed_position + .try_as_col_major() + .unwrap() + .as_slice(), + untransfogmed_gradient + .try_as_col_major() + .unwrap() + .as_slice(), chain, ) } diff --git a/src/hamiltonian.rs b/src/hamiltonian.rs index 8841ca5..1584904 100644 --- a/src/hamiltonian.rs +++ b/src/hamiltonian.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use rand_distr::{Distribution, StandardUniform}; + use crate::{ nuts::Collector, sampler_stats::SamplerStats, @@ -32,9 +34,9 @@ pub enum Direction { Backward, } -impl rand::distributions::Distribution for rand::distributions::Standard { +impl Distribution for StandardUniform { fn sample(&self, rng: &mut R) -> Direction { - if rng.gen::() { + if rng.random::() { Direction::Forward } else { Direction::Backward diff --git a/src/low_rank_mass_matrix.rs b/src/low_rank_mass_matrix.rs index e2f2b6d..384c641 100644 --- a/src/low_rank_mass_matrix.rs +++ b/src/low_rank_mass_matrix.rs @@ -4,7 +4,7 @@ use arrow::{ array::{ArrayBuilder, FixedSizeListBuilder, ListBuilder, PrimitiveBuilder, StructArray}, datatypes::{Field, Float64Type, UInt64Type}, }; -use faer::{Col, Mat, Scale}; +use faer::{Col, ColRef, Mat, MatRef, Scale}; use itertools::Itertools; use crate::{ @@ -16,6 +16,18 @@ use crate::{ Math, NutsError, }; +fn mat_all_finite(mat: &MatRef) -> bool { + let mut ok = true; + faer::zip!(mat).for_each(|faer::unzip!(val)| ok = ok & val.is_finite()); + ok +} + +fn col_all_finite(mat: &ColRef) -> bool { + let mut ok = true; + faer::zip!(mat).for_each(|faer::unzip!(val)| ok = ok & val.is_finite()); + ok +} + #[derive(Debug)] struct InnerMatrix { vecs: M::EigVectors, @@ -28,12 +40,12 @@ impl InnerMatrix { fn new(math: &mut M, mut vals: Col, vecs: Mat) -> Self { let vecs = math.new_eig_vectors( vecs.col_iter() - .map(|col| col.try_as_slice().expect("Array not contiguous")), + .map(|col| col.try_as_col_major().unwrap().as_slice()), ); - let vals_math = math.new_eig_values(vals.as_slice()); + let vals_math = math.new_eig_values(vals.try_as_col_major().unwrap().as_slice()); vals.iter_mut().for_each(|x| *x = x.sqrt().recip()); - let vals_inv_math = math.new_eig_values(vals.as_slice()); + let vals_inv_math = math.new_eig_values(vals.try_as_col_major().unwrap().as_slice()); Self { vecs, @@ -85,15 +97,21 @@ impl LowRankMassMatrix { } fn update(&mut self, math: &mut M, mut stds: Col, vals: Col, vecs: Mat) { - math.read_from_slice(&mut self.stds, stds.as_slice()); + math.read_from_slice(&mut self.stds, stds.try_as_col_major().unwrap().as_slice()); stds.iter_mut().for_each(|x| *x = x.recip()); - math.read_from_slice(&mut self.inv_stds, stds.as_slice()); + math.read_from_slice( + &mut self.inv_stds, + stds.try_as_col_major().unwrap().as_slice(), + ); stds.iter_mut().for_each(|x| *x = x.recip() * x.recip()); - math.read_from_slice(&mut self.variance, stds.as_slice()); + math.read_from_slice( + &mut self.variance, + stds.try_as_col_major().unwrap().as_slice(), + ); - if vals.is_all_finite() & vecs.is_all_finite() { + if col_all_finite(&vals.as_ref()) & mat_all_finite(&vecs.as_ref()) { self.inner = Some(InnerMatrix::new(math, vals, vecs)); } else { self.inner = None; @@ -398,14 +416,14 @@ impl LowRankMassMatrixStrategy { let stds = rescale_points(&mut draws, &mut grads); - let svd_draws = draws.thin_svd(); - let svd_grads = grads.thin_svd(); + let svd_draws = draws.thin_svd().expect("Failed to compute SVD"); + let svd_grads = grads.thin_svd().expect("Failed to compute SVD"); - let subspace = faer::concat![[svd_draws.u(), svd_grads.u()]]; + let subspace = faer::concat![[svd_draws.U(), svd_grads.U()]]; let subspace_qr = subspace.col_piv_qr(); - let subspace_basis = subspace_qr.compute_thin_q(); + let subspace_basis = subspace_qr.compute_thin_Q(); let draws_proj = subspace_basis.transpose() * (&draws); let grads_proj = subspace_basis.transpose() * (&grads); @@ -421,7 +439,7 @@ impl LowRankMassMatrixStrategy { .collect_vec(); let vals = filtered.iter().map(|x| *x.0).collect_vec(); - let vals = faer::col::from_slice(&vals).to_owned(); + let vals = ColRef::from_slice(&vals).to_owned(); let vecs_vec = filtered.into_iter().map(|x| x.1).collect_vec(); let mut vecs = Mat::zeros(subspace_basis.ncols(), vals.nrows()); @@ -493,38 +511,44 @@ fn estimate_mass_matrix(draws: Mat, grads: Mat, gamma: f64) -> (Col, cov_grads: Mat) -> Mat { - let eigs_grads = cov_grads.selfadjoint_eigendecomposition(faer::Side::Lower); + let eigs_grads = cov_grads + .self_adjoint_eigen(faer::Side::Lower) + .expect("Failed to compute eigenvalue decomposition"); - let u = eigs_grads.u(); - let eigs = eigs_grads.s().column_vector().to_owned(); + let u = eigs_grads.U(); + let eigs = eigs_grads.S().column_vector().to_owned(); let mut eigs_sqrt = eigs.clone(); eigs_sqrt.iter_mut().for_each(|val| *val = val.sqrt()); - let cov_grads_sqrt = u * eigs_sqrt.column_vector_into_diagonal() * u.transpose(); + let cov_grads_sqrt = u * eigs_sqrt.into_diagonal() * u.transpose(); let m = (&cov_grads_sqrt) * cov_draws * cov_grads_sqrt; - let m_eig = m.selfadjoint_eigendecomposition(faer::Side::Lower); + let m_eig = m + .self_adjoint_eigen(faer::Side::Lower) + .expect("Failed to compute eigenvalue decomposition"); - let m_u = m_eig.u(); - let mut m_s = m_eig.s().column_vector().to_owned(); + let m_u = m_eig.U(); + let mut m_s = m_eig.S().column_vector().to_owned(); m_s.iter_mut().for_each(|val| *val = val.sqrt()); - let m_sqrt = m_u * m_s.column_vector_into_diagonal() * m_u.transpose(); + let m_sqrt = m_u * m_s.into_diagonal() * m_u.transpose(); let mut eigs_grads_inv = eigs; eigs_grads_inv .iter_mut() .for_each(|val| *val = val.sqrt().recip()); - let grads_inv_sqrt = u * eigs_grads_inv.column_vector_into_diagonal() * u.transpose(); + let grads_inv_sqrt = u * eigs_grads_inv.into_diagonal() * u.transpose(); (&grads_inv_sqrt) * m_sqrt * grads_inv_sqrt } @@ -612,10 +636,13 @@ impl MassMatrixAdaptStrategy for LowRankMassMatrixStrategy { mod test { use std::ops::AddAssign; - use faer::{assert_matrix_eq, Col, Mat}; + use equator::Cmp; + use faer::{utils::approx::ApproxEq, Col, Mat}; use rand::{rngs::SmallRng, Rng, SeedableRng}; use rand_distr::StandardNormal; + use crate::low_rank_mass_matrix::mat_all_finite; + use super::{estimate_mass_matrix, spd_mean}; #[test] @@ -637,7 +664,14 @@ mod test { .column_vector_mut() .add_assign(expected_diag); - faer::assert_matrix_eq!(out, expected, comp = ulp, tol = 8); + let comp = ApproxEq { + abs_tol: 1e-10, + rel_tol: 1e-10, + }; + + faer::zip!(&out, &expected).for_each(|faer::unzip!(out, expected)| { + comp.test(out, expected).unwrap(); + }); } #[test] @@ -652,12 +686,17 @@ mod test { let (vals, vecs) = estimate_mass_matrix(draws, grads, 0.0001); assert!(vals.iter().cloned().all(|x| x > 0.)); - assert!(vecs.is_all_finite()); - assert_matrix_eq!( - vals.as_2d(), - Col::full(20, 1.).as_2d(), - comp = abs, - tol = 1e-4 - ); + assert!(mat_all_finite(&vecs.as_ref())); + + let comp = ApproxEq { + abs_tol: 1e-5, + rel_tol: 1e-5, + }; + + let expected = Col::full(20, 1.); + + faer::zip!(&vals, &expected).for_each(|faer::unzip!(out, expected)| { + comp.test(out, expected).unwrap(); + }); } } diff --git a/src/nuts.rs b/src/nuts.rs index 18e782e..4184967 100644 --- a/src/nuts.rs +++ b/src/nuts.rs @@ -199,7 +199,7 @@ impl, C: Collector> NutsTree { }; if (other.log_size >= self_log_size) - || (rng.gen_bool((other.log_size - self_log_size).exp())) + || (rng.random_bool((other.log_size - self_log_size).exp())) { self.draw = other.draw; } @@ -275,7 +275,7 @@ where let mut tree = NutsTree::new(init.clone()); while tree.depth < options.maxdepth { - let direction: Direction = rng.gen(); + let direction: Direction = rng.random(); tree = match tree.extend(math, rng, hamiltonian, direction, collector, options) { ExtendResult::Ok(tree) => tree, ExtendResult::Turning(tree) => { @@ -300,7 +300,7 @@ where #[cfg(test)] mod tests { - use rand::{rngs::ThreadRng, thread_rng}; + use rand::{rng, rngs::ThreadRng}; use crate::{ adapt_strategy::test_logps::NormalLogp, @@ -318,7 +318,7 @@ mod tests { let math = CpuMath::new(func); let settings = DiagGradNutsSettings::default(); - let mut rng = thread_rng(); + let mut rng = rng(); let mut chain = settings.new_chain(0, math, &mut rng); diff --git a/src/sampler.rs b/src/sampler.rs index 785af0e..9e3f00a 100644 --- a/src/sampler.rs +++ b/src/sampler.rs @@ -177,7 +177,7 @@ impl Settings for LowRankNutsSettings { &self, chain: u64, mut math: M, - rng: &mut R, + mut rng: &mut R, ) -> Self::Chain { let num_tune = self.num_tune; let strategy = GlobalStrategy::new(&mut math, self.adapt_options, num_tune, chain); @@ -193,7 +193,7 @@ impl Settings for LowRankNutsSettings { check_turning: self.check_turning, }; - let rng = rand::rngs::SmallRng::from_rng(rng).expect("Could not seed rng"); + let rng = rand::rngs::SmallRng::try_from_rng(&mut rng).expect("Could not seed rng"); NutsChain::new(math, potential, strategy, options, rng, chain) } @@ -233,7 +233,7 @@ impl Settings for DiagGradNutsSettings { &self, chain: u64, mut math: M, - rng: &mut R, + mut rng: &mut R, ) -> Self::Chain { let num_tune = self.num_tune; let strategy = GlobalStrategy::new(&mut math, self.adapt_options, num_tune, chain); @@ -252,7 +252,7 @@ impl Settings for DiagGradNutsSettings { check_turning: self.check_turning, }; - let rng = rand::rngs::SmallRng::from_rng(rng).expect("Could not seed rng"); + let rng = rand::rngs::SmallRng::try_from_rng(&mut rng).expect("Could not seed rng"); NutsChain::new(math, potential, strategy, options, rng, chain) } @@ -292,7 +292,7 @@ impl Settings for TransformedNutsSettings { &self, chain: u64, mut math: M, - rng: &mut R, + mut rng: &mut R, ) -> Self::Chain { let num_tune = self.num_tune; let max_energy_error = self.max_energy_error; @@ -308,7 +308,7 @@ impl Settings for TransformedNutsSettings { check_turning: self.check_turning, }; - let rng = rand::rngs::SmallRng::from_rng(rng).expect("Could not seed rng"); + let rng = rand::rngs::SmallRng::try_from_rng(&mut rng).expect("Could not seed rng"); NutsChain::new(math, hamiltonian, strategy, options, rng, chain) }