Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ include-package-data = true
where = ["src"]

[tool.setuptools.package-data]
resins = ["instrument_data/*.yaml"]
resins = ["instrument_data/*.yaml", "instrument_data/*.npz"]

[tool.ruff]
line-length = 88
Expand Down
2 changes: 1 addition & 1 deletion src/resins/instrument.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

import numpy as np
import yaml
from typing import Iterator, Iterable, Optional, Union, TYPE_CHECKING
from typing import Iterator, Optional, Union, TYPE_CHECKING

from .models import MODELS

Expand Down
9 changes: 9 additions & 0 deletions src/resins/instrument_data/tosca.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -143,3 +143,12 @@ version:
<<: *tosca_configurations_forward
# TODO: Recompute below once average_secondary_flight_path for Forward has been recomputed
distance_sample_analyzer: 0.2312830382624369 # m (as above)
perrichon2022: "perrichon2022_v1"
perrichon2022_v1:
function: "scaled_tabulated"
citation: ["A. Perrichon, Nucl. Instrum. Methods Phys. Res., Sect. A, 2022, 167401. https://doi.org/10.1016/j.nima.2022.167401"]
parameters:
defaults: {}
restrictions: {}
npz: "tosca_perrichon2022_v1.npz"
configurations: {}
Binary file not shown.
2 changes: 2 additions & 0 deletions src/resins/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
GenericGaussian1DModel,
GenericLorentzian1DModel,
)
from .lookuptables import ScaledTabulatedModel
from .polynomial import PolynomialModel1D, DiscontinuousPolynomialModel1D
from .panther_abins import PantherAbINSModel
from .pychop import PyChopModelFermi, PyChopModelNonFermi, PyChopModelCNCS, PyChopModelLET
Expand All @@ -44,5 +45,6 @@
'pychop_fit_fermi': PyChopModelFermi,
'pychop_fit_cncs': PyChopModelCNCS,
'pychop_fit_let': PyChopModelLET,
'scaled_tabulated': ScaledTabulatedModel,
}
"""A dictionary mapping the unique name of a model to the corresponding class."""
192 changes: 192 additions & 0 deletions src/resins/models/lookuptables.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
"""
A model based on interpolated lookup tables

All classes within are exposed for reference only and should not be instantiated directly. For
obtaining the :term:`resolution function` of an :term:`instrument`, please use the
`resolution_functions.instrument.Instrument.get_resolution_function` method.
"""

from __future__ import annotations

from dataclasses import dataclass
import importlib
from typing import ClassVar, TYPE_CHECKING

import numpy as np
from numpy.polynomial import Polynomial
from scipy.interpolate import RegularGridInterpolator

from .model_base import InstrumentModel, ModelData
from .mixins import SimpleBroaden1DMixin

if TYPE_CHECKING:
from jaxtyping import Float


@dataclass(init=True, repr=True, frozen=True, slots=True, kw_only=True)
class ScaledTabulatedModelData(ModelData):
"""
Data for the `ScaledTabulatedModel` :term:`model`.

Attributes
----------
function
The name of the function, i.e. the alias for `ScaledTabulatedModel`.
citation
The citation for the model. Please use this to look up more details and cite the model.
npz
Relative path from Instrument yaml files to lookup table file
restrictions
defaults
"""

npz: str


class ScaledTabulatedModel(SimpleBroaden1DMixin, InstrumentModel):
"""
Model using a lookup table to model a 1D :term:`instrument`.

This allows non-Gaussian shapes to be produced. For smooth interpolation
and data efficiency, the x-axis is scaled in proportion to an approximated
standard deviation. This standard deviation is fitted to a polynomial
function of energy.

Parameters
----------
model_data
The data associated with the model for a given version of a given instrument.

Attributes
----------
input
The names of the columns in the ``points`` array expected by all computation methods, i.e.
the names of the independent variables ([Q, w]) that the model models.
data_class
Reference to the `ScaledTabulatedModelData` type.
npz
The .npz file containing the model data
citation
"""

input = ("energy_transfer",)

data_class: ClassVar[type[ScaledTabulatedModelData]] = ScaledTabulatedModelData

def __init__(self, model_data: ScaledTabulatedModelData, **_):
super().__init__(model_data)
self.data = np.load(
importlib.resources.files("resins.instrument_data") / model_data.npz
)

self.polynomial = Polynomial(
coef=self.data["coef"],
domain=self.data["domain"],
window=self.data["window"],
)
self._interp = RegularGridInterpolator(
(self.data["energy_transfer"], self.data["kernel_energies"]),
self.data["table"],
method="linear",
bounds_error=False,
fill_value=0.0,
)

def get_characteristics(
self, points: Float[np.ndarray, "energy_transfer dimension=1"]
) -> dict[str, Float[np.ndarray, "sigma"]]:
"""
Computes the broadening width at each value of energy transfer (`points`).

The model approximates the broadening using the Gaussian distribution, so the returned
widths are in the form of the standard deviation (sigma).

Parameters
----------
points
The energy transfer in meV at which to compute the width in sigma of the kernel.
This *must* be a ``sample`` x 1 2D array where ``sample`` is the number of energy
transfers.

Returns
-------
characteristics
The characteristics of the broadening function, i.e. the Gaussian width as sigma.
"""
return {"sigma": self.polynomial(points[:, 0])}

def get_kernel(
self,
points: Float[np.ndarray, "sample dimension=1"],
mesh: Float[np.ndarray, "mesh"],
) -> Float[np.ndarray, "sample mesh"]:
"""Computes a zero-centered broadening kernel at each value of energy transfer.

Parameters
----------
points
The energy transfer or momentum scalar for which to compute the
kernel. This *must* be a Nx1 2D array where N is the number of w/Q
values.
mesh
The mesh on which to evaluate the kernel. A 1D array.

Returns
-------
kernel
The broadening kernel at each value of `points` as given by this
model, computed on the `mesh` and centered on zero. This is a 2D N
x M array where N is the number of w/Q values and M is the length
of the `mesh` array.

"""

assert len(points.shape) == 2 and points.shape[1] == 1
energy = points

scale_factors = self.polynomial(energy)
scaled_x_values = mesh / scale_factors

# Clip lookup energies to known maximum; width scaling should give a
# reasonable extrapolation from there
energy = np.minimum(energy, max(self.data["energy_transfer"]))

energy_expanded = np.meshgrid(energy[:, None], mesh, indexing="ij")[0]
lookup_mesh = np.stack([energy_expanded, scaled_x_values], axis=-1)
interp_kernels = self._interp(lookup_mesh) / scale_factors
return interp_kernels

def get_peak(
self,
points: Float[np.ndarray, "sample dimension=1"],
mesh: Float[np.ndarray, "mesh"],
) -> Float[np.ndarray, "sample mesh"]:
"""
Computes the broadening kernel at each value of energy transfer.

Parameters
----------
points
The energy transfer in meV for which to compute the kernel. This
*must* be a Nx1 2D array where N is the number of energy transfers.
mesh
The mesh on which to evaluate the kernel. This is a 1D array which
*must* span the `points` transfer space of interest.

Returns
-------
kernel
The broadening kernel at each value of `points` as given by this
model, computed on the `mesh` and centered on the corresponding
energy transfer. This is a 2D N x M array where N is the number of
w/Q values and M is the length of the `mesh` array.
"""

shifted_meshes = [mesh - energy for energy in points[:, 0]]

shifted_kernels = [
self.get_kernel(np.array([point]), shifted_mesh)
for point, shifted_mesh in zip(points, shifted_meshes)
]

return np.array(np.vstack(shifted_kernels))
Loading