Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions MCintegration/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,19 @@
# MCintegration/__init__.py
#
# Monte Carlo Integration Package
#
# This package provides tools for numerical integration using various Monte Carlo methods.
# It includes plain Monte Carlo, Markov Chain Monte Carlo (MCMC), and VEGAS algorithms
# for efficient multi-dimensional integration.
#
# The package provides:
# - Base distributions for sampling
# - Transformation maps for importance sampling
# - Various integration algorithms
# - Utilities for running averages and error estimation
# - Multi-CPU/GPU support through torch.distributed
#

from .integrators import MonteCarlo, MarkovChainMonteCarlo, setup
from .maps import Vegas
from .utils import RAvg, set_seed, get_device
122 changes: 109 additions & 13 deletions MCintegration/base.py
Original file line number Diff line number Diff line change
@@ -1,60 +1,126 @@
# base.py
# This file contains the base distribution classes for Monte Carlo integration methods
# It defines foundational classes for sampling distributions and transformations

import torch
from torch import nn
import numpy as np
import sys
from MCintegration.utils import get_device

# Constants for numerical stability
# Small but safe non-zero value
MINVAL = 10 ** (sys.float_info.min_10_exp + 50)
MAXVAL = 10 ** (sys.float_info.max_10_exp - 50)
MAXVAL = 10 ** (sys.float_info.max_10_exp - 50) # Large but safe value
EPSILON = 1e-16 # Small value to ensure numerical stability


class BaseDistribution(nn.Module):
"""
Base distribution of a flow-based model
Parameters do not depend of target variable (as is the case for a VAE encoder)
Base distribution class for flow-based models.
This is an abstract base class that provides structure for probability distributions
used in Monte Carlo integration. Parameters do not depend on target variables
(unlike a VAE encoder).
"""

def __init__(self, dim, device="cpu", dtype=torch.float32):
"""
Initialize BaseDistribution.

Args:
dim (int): Dimensionality of the distribution
device (str or torch.device): Device to use for computation
dtype (torch.dtype): Data type for computations
"""
super().__init__()
self.dtype = dtype
self.dim = dim
self.device = device

def sample(self, batch_size=1, **kwargs):
"""Samples from base distribution
"""
Sample from the base distribution.

Args:
num_samples: Number of samples to draw from the distriubtion
batch_size (int): Number of samples to draw
**kwargs: Additional arguments

Returns:
Samples drawn from the distribution
tuple: (samples, log_det_jacobian)

Raises:
NotImplementedError: This is an abstract method
"""
raise NotImplementedError

def sample_with_detJ(self, batch_size=1, **kwargs):
"""
Sample from base distribution with Jacobian determinant (not log).

Args:
batch_size (int): Number of samples to draw
**kwargs: Additional arguments

Returns:
tuple: (samples, det_jacobian)
"""
u, detJ = self.sample(batch_size, **kwargs)
detJ.exp_()
detJ.exp_() # Convert log_det to det
return u, detJ


class Uniform(BaseDistribution):
"""
Multivariate uniform distribution
Multivariate uniform distribution over [0,1]^dim.
Samples from a uniform distribution in the hypercube [0,1]^dim.
"""

def __init__(self, dim, device="cpu", dtype=torch.float32):
"""
Initialize Uniform distribution.

Args:
dim (int): Dimensionality of the distribution
device (str or torch.device): Device to use for computation
dtype (torch.dtype): Data type for computations
"""
super().__init__(dim, device, dtype)

def sample(self, batch_size=1, **kwargs):
"""
Sample from uniform distribution over [0,1]^dim.

Args:
batch_size (int): Number of samples to draw
**kwargs: Additional arguments

Returns:
tuple: (uniform samples, log_det_jacobian=0)
"""
# torch.manual_seed(0) # test seed
u = torch.rand((batch_size, self.dim), device=self.device, dtype=self.dtype)
log_detJ = torch.zeros(batch_size, device=self.device, dtype=self.dtype)
u = torch.rand((batch_size, self.dim),
device=self.device, dtype=self.dtype)
log_detJ = torch.zeros(
batch_size, device=self.device, dtype=self.dtype)
return u, log_detJ


class LinearMap(nn.Module):
"""
Linear transformation map of the form x = u * A + b.
Maps points from one space to another using a linear transformation.
"""

def __init__(self, A, b, device=None, dtype=torch.float32):
"""
Initialize LinearMap with scaling A and offset b.

Args:
A (list, numpy.ndarray, torch.Tensor): Scaling factors
b (list, numpy.ndarray, torch.Tensor): Offset values
device (str or torch.device): Device to use for computation
dtype (torch.dtype): Data type for computations
"""
if device is None:
self.device = get_device()
else:
Expand All @@ -67,24 +133,54 @@
elif isinstance(A, torch.Tensor):
self.A = A.to(dtype=self.dtype, device=self.device)
else:
raise ValueError("'A' must be a list, numpy array, or torch tensor.")
raise ValueError(

Check warning on line 136 in MCintegration/base.py

View check run for this annotation

Codecov / codecov/patch

MCintegration/base.py#L136

Added line #L136 was not covered by tests
"'A' must be a list, numpy array, or torch tensor.")

if isinstance(b, (list, np.ndarray)):
self.b = torch.tensor(b, dtype=self.dtype, device=self.device)
elif isinstance(b, torch.Tensor):
self.b = b.to(dtype=self.dtype, device=self.device)
else:
raise ValueError("'b' must be a list, numpy array, or torch tensor.")
raise ValueError(

Check warning on line 144 in MCintegration/base.py

View check run for this annotation

Codecov / codecov/patch

MCintegration/base.py#L144

Added line #L144 was not covered by tests
"'b' must be a list, numpy array, or torch tensor.")

# Pre-compute determinant of Jacobian for efficiency
self._detJ = torch.prod(self.A)

def forward(self, u):
"""
Apply forward transformation: x = u * A + b.

Args:
u (torch.Tensor): Input points

Returns:
tuple: (transformed points, log_det_jacobian)
"""
return u * self.A + self.b, torch.log(self._detJ.repeat(u.shape[0]))

def forward_with_detJ(self, u):
"""
Apply forward transformation with Jacobian determinant (not log).

Args:
u (torch.Tensor): Input points

Returns:
tuple: (transformed points, det_jacobian)
"""
u, detJ = self.forward(u)
detJ.exp_()
detJ.exp_() # Convert log_det to det
return u, detJ

def inverse(self, x):
"""
Apply inverse transformation: u = (x - b) / A.

Args:
x (torch.Tensor): Input points

Returns:
tuple: (transformed points, log_det_jacobian)
"""
return (x - self.b) / self.A, torch.log(self._detJ.repeat(x.shape[0]))
Loading