Skip to content

Commit a6f14eb

Browse files
authored
Merge pull request #92 from scipp/orso-multi-dataset
docs: example writing multiple datasets to orso file
2 parents f8f6b39 + a36f6e0 commit a6f14eb

File tree

3 files changed

+199
-32
lines changed

3 files changed

+199
-32
lines changed

docs/user-guide/amor/amor-reduction.ipynb

Lines changed: 89 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -143,8 +143,32 @@
143143
"source": [
144144
"## Computing sample reflectivity\n",
145145
"\n",
146-
"We now compute the sample reflectivity from 3 runs that used different sample rotation angles.\n",
147-
"The different rotation angles cover different ranges in $Q$."
146+
"We now compute the sample reflectivity from 4 runs that used different sample rotation angles.\n",
147+
"The measurements at different rotation angles cover different ranges of $Q$."
148+
]
149+
},
150+
{
151+
"cell_type": "markdown",
152+
"metadata": {},
153+
"source": [
154+
"In this tutorial we use some Amor data files we have received.\n",
155+
"The file paths to the tutorial files are obtained by calling:"
156+
]
157+
},
158+
{
159+
"cell_type": "code",
160+
"execution_count": null,
161+
"metadata": {},
162+
"outputs": [],
163+
"source": [
164+
"amor.data.amor_sample_run('608')"
165+
]
166+
},
167+
{
168+
"cell_type": "markdown",
169+
"metadata": {},
170+
"source": [
171+
"When you encounter `amor.data.amor_sample_run` you should imagining replacing that with a path to your own dataset."
148172
]
149173
},
150174
{
@@ -154,17 +178,31 @@
154178
"outputs": [],
155179
"source": [
156180
"runs = {\n",
157-
" '608': sc.scalar(0.85, unit='deg'),\n",
158-
" '609': sc.scalar(2.25, unit='deg'),\n",
159-
" '610': sc.scalar(3.65, unit='deg'),\n",
160-
" '611': sc.scalar(5.05, unit='deg'),\n",
181+
" '608': {\n",
182+
" # The sample rotation values in the files are slightly off, so we replace\n",
183+
" # them with corrected values.\n",
184+
" SampleRotation[SampleRun]: sc.scalar(0.85, unit='deg'),\n",
185+
" Filename[SampleRun]: amor.data.amor_sample_run('608'),\n",
186+
" },\n",
187+
" '609': {\n",
188+
" SampleRotation[SampleRun]: sc.scalar(2.25, unit='deg'),\n",
189+
" Filename[SampleRun]: amor.data.amor_sample_run('609'),\n",
190+
" },\n",
191+
" '610': {\n",
192+
" SampleRotation[SampleRun]: sc.scalar(3.65, unit='deg'),\n",
193+
" Filename[SampleRun]: amor.data.amor_sample_run('610'),\n",
194+
" },\n",
195+
" '611': {\n",
196+
" SampleRotation[SampleRun]: sc.scalar(5.05, unit='deg'),\n",
197+
" Filename[SampleRun]: amor.data.amor_sample_run('611'),\n",
198+
" },\n",
161199
"}\n",
162200
"\n",
163201
"reflectivity = {}\n",
164-
"for file, angle in runs.items():\n",
165-
" workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n",
166-
" workflow[SampleRotation[SampleRun]] = angle\n",
167-
" reflectivity[file] = workflow.compute(ReflectivityOverQ).hist()\n",
202+
"for run_number, params in runs.items():\n",
203+
" workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n",
204+
" workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n",
205+
" reflectivity[run_number] = workflow.compute(ReflectivityOverQ).hist()\n",
168206
"\n",
169207
"sc.plot(reflectivity, norm='log', vmin=1e-4)"
170208
]
@@ -186,7 +224,12 @@
186224
"source": [
187225
"from ess.reflectometry.tools import scale_reflectivity_curves_to_overlap\n",
188226
"\n",
189-
"scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(reflectivity.values())\n",
227+
"scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(\n",
228+
" reflectivity.values(),\n",
229+
" # Optionally specify a Q-interval where the reflectivity is known to be 1.0\n",
230+
" critical_edge_interval=(sc.scalar(0.01, unit='1/angstrom'), sc.scalar(0.014, unit='1/angstrom'))\n",
231+
")\n",
232+
"\n",
190233
"sc.plot(dict(zip(reflectivity.keys(), scaled_reflectivity_curves, strict=True)), norm='log', vmin=1e-5)"
191234
]
192235
},
@@ -236,14 +279,14 @@
236279
"source": [
237280
"# Start by computing the `ReflectivityData` for each of the files\n",
238281
"diagnostics = {}\n",
239-
"for file, angle in runs.items():\n",
240-
" workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n",
241-
" workflow[SampleRotation[SampleRun]] = angle\n",
242-
" diagnostics[file] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n",
282+
"for run_number, params in runs.items():\n",
283+
" workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n",
284+
" workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n",
285+
" diagnostics[run_number] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n",
243286
"\n",
244287
"# Scale the results using the scale factors computed earlier\n",
245-
"for key, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n",
246-
" diagnostics[key][ReflectivityData] *= scale_factor"
288+
"for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n",
289+
" diagnostics[run_number][ReflectivityData] *= scale_factor"
247290
]
248291
},
249292
{
@@ -263,8 +306,8 @@
263306
"from ess.amor.figures import wavelength_theta_figure\n",
264307
"\n",
265308
"wavelength_theta_figure(\n",
266-
" [res[ReflectivityData] for res in diagnostics.values()],\n",
267-
" theta_bins=[res[ThetaBins[SampleRun]] for res in diagnostics.values()],\n",
309+
" [result[ReflectivityData] for result in diagnostics.values()],\n",
310+
" theta_bins=[result[ThetaBins[SampleRun]] for result in diagnostics.values()],\n",
268311
" q_edges_to_display=(sc.scalar(0.018, unit='1/angstrom'), sc.scalar(0.113, unit='1/angstrom'))\n",
269312
")"
270313
]
@@ -318,8 +361,8 @@
318361
"source": [
319362
"from ess.amor.figures import wavelength_z_figure\n",
320363
"\n",
321-
"workflow[Filename[SampleRun]] = amor.data.amor_sample_run('608')\n",
322-
"workflow[SampleRotation[SampleRun]] = runs['608']\n",
364+
"workflow[Filename[SampleRun]] = runs['608'][Filename[SampleRun]]\n",
365+
"workflow[SampleRotation[SampleRun]] = runs['608'][SampleRotation[SampleRun]]\n",
323366
"wavelength_z_figure(\n",
324367
" workflow.compute(FootprintCorrectedData[SampleRun]),\n",
325368
" wavelength_bins=workflow.compute(WavelengthBins),\n",
@@ -430,6 +473,30 @@
430473
")"
431474
]
432475
},
476+
{
477+
"cell_type": "markdown",
478+
"metadata": {},
479+
"source": [
480+
"Now let's repeat this for all the sample measurements!\n",
481+
"To do that we can use an utility in `ess.reflectometry.tools`:"
482+
]
483+
},
484+
{
485+
"cell_type": "code",
486+
"execution_count": null,
487+
"metadata": {},
488+
"outputs": [],
489+
"source": [
490+
"from ess.reflectometry.tools import orso_datasets_from_measurements\n",
491+
"\n",
492+
"datasets = orso_datasets_from_measurements(\n",
493+
" workflow,\n",
494+
" runs.values(),\n",
495+
" # Optionally scale the curves to overlap using `scale_reflectivity_curves_to_overlap`\n",
496+
" scale_to_overlap=True\n",
497+
")"
498+
]
499+
},
433500
{
434501
"cell_type": "markdown",
435502
"metadata": {},
@@ -444,7 +511,7 @@
444511
"metadata": {},
445512
"outputs": [],
446513
"source": [
447-
"iofq_dataset.save('amor_reduced_iofq.ort')"
514+
"fileio.orso.save_orso(datasets=datasets, fname='amor_reduced_iofq.ort')"
448515
]
449516
},
450517
{

src/ess/reflectometry/tools.py

Lines changed: 67 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,17 @@
11
# SPDX-License-Identifier: BSD-3-Clause
22
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
3-
from collections.abc import Sequence
3+
from collections.abc import Mapping, Sequence
44
from itertools import chain
5+
from typing import Any
56

67
import numpy as np
8+
import sciline
79
import scipp as sc
810
import scipy.optimize as opt
11+
from orsopy.fileio.orso import OrsoDataset
12+
13+
from ess.reflectometry import orso
14+
from ess.reflectometry.types import ReflectivityOverQ
915

1016
_STD_TO_FWHM = sc.scalar(2.0) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0)))
1117

@@ -286,3 +292,63 @@ def combine_curves(
286292
),
287293
coords={'Q': q_bin_edges},
288294
)
295+
296+
297+
def orso_datasets_from_measurements(
298+
workflow: sciline.Pipeline,
299+
runs: Sequence[Mapping[type, Any]],
300+
*,
301+
scale_to_overlap: bool = True,
302+
) -> list[OrsoDataset]:
303+
'''Produces a list of ORSO datasets containing one
304+
reflectivity curve for each of the provided runs.
305+
Each entry of :code:`runs` is a mapping of parameters and
306+
values needed to produce the dataset.
307+
308+
Optionally, the reflectivity curves can be scaled to overlap in
309+
the regions where they have the same Q-value.
310+
311+
Parameters
312+
-----------
313+
workflow:
314+
The sciline workflow used to compute `ReflectivityOverQ` for each of the runs.
315+
316+
runs:
317+
The sciline parameters to be used for each run
318+
319+
scale_to_overlap:
320+
If True the curves will be scaled to overlap.
321+
Note that the curve of the first run is unscaled and
322+
the rest are scaled to match it.
323+
324+
Returns
325+
---------
326+
list of the computed ORSO datasets, containing one reflectivity curve each
327+
'''
328+
reflectivity_curves = []
329+
for parameters in runs:
330+
wf = workflow.copy()
331+
for name, value in parameters.items():
332+
wf[name] = value
333+
reflectivity_curves.append(wf.compute(ReflectivityOverQ))
334+
335+
scale_factors = (
336+
scale_reflectivity_curves_to_overlap([r.hist() for r in reflectivity_curves])[1]
337+
if scale_to_overlap
338+
else (1,) * len(runs)
339+
)
340+
341+
datasets = []
342+
for parameters, curve, scale_factor in zip(
343+
runs, reflectivity_curves, scale_factors, strict=True
344+
):
345+
wf = workflow.copy()
346+
for name, value in parameters.items():
347+
wf[name] = value
348+
wf[ReflectivityOverQ] = scale_factor * curve
349+
dataset = wf.compute(orso.OrsoIofQDataset)
350+
dataset.info.reduction.corrections = orso.find_corrections(
351+
wf.get(orso.OrsoIofQDataset)
352+
)
353+
datasets.append(dataset)
354+
return datasets

tests/tools_test.py

Lines changed: 43 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,21 @@
11
# SPDX-License-Identifier: BSD-3-Clause
22
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
3+
import numpy as np
34
import pytest
5+
import sciline as sl
46
import scipp as sc
57
from numpy.testing import assert_allclose as np_assert_allclose
8+
from orsopy.fileio import Orso, OrsoDataset
69
from scipp.testing import assert_allclose
710

8-
from ess.reflectometry import tools
9-
from ess.reflectometry.tools import combine_curves, scale_reflectivity_curves_to_overlap
11+
from ess.reflectometry.orso import OrsoIofQDataset
12+
from ess.reflectometry.tools import (
13+
combine_curves,
14+
linlogspace,
15+
orso_datasets_from_measurements,
16+
scale_reflectivity_curves_to_overlap,
17+
)
18+
from ess.reflectometry.types import Filename, ReflectivityOverQ, SampleRun
1019

1120

1221
def curve(d, qmin, qmax):
@@ -138,31 +147,31 @@ def test_combined_curves():
138147

139148

140149
def test_linlogspace_linear():
141-
q_lin = tools.linlogspace(
150+
q_lin = linlogspace(
142151
dim='qz', edges=[0.008, 0.08], scale='linear', num=50, unit='1/angstrom'
143152
)
144153
expected = sc.linspace(dim='qz', start=0.008, stop=0.08, num=50, unit='1/angstrom')
145154
assert sc.allclose(q_lin, expected)
146155

147156

148157
def test_linlogspace_linear_list_input():
149-
q_lin = tools.linlogspace(
158+
q_lin = linlogspace(
150159
dim='qz', edges=[0.008, 0.08], unit='1/angstrom', scale=['linear'], num=[50]
151160
)
152161
expected = sc.linspace(dim='qz', start=0.008, stop=0.08, num=50, unit='1/angstrom')
153162
assert sc.allclose(q_lin, expected)
154163

155164

156165
def test_linlogspace_log():
157-
q_log = tools.linlogspace(
166+
q_log = linlogspace(
158167
dim='qz', edges=[0.008, 0.08], unit='1/angstrom', scale='log', num=50
159168
)
160169
expected = sc.geomspace(dim='qz', start=0.008, stop=0.08, num=50, unit='1/angstrom')
161170
assert sc.allclose(q_log, expected)
162171

163172

164173
def test_linlogspace_linear_log():
165-
q_linlog = tools.linlogspace(
174+
q_linlog = linlogspace(
166175
dim='qz',
167176
edges=[0.008, 0.03, 0.08],
168177
unit='1/angstrom',
@@ -176,7 +185,7 @@ def test_linlogspace_linear_log():
176185

177186

178187
def test_linlogspace_log_linear():
179-
q_loglin = tools.linlogspace(
188+
q_loglin = linlogspace(
180189
dim='qz',
181190
edges=[0.008, 0.03, 0.08],
182191
unit='1/angstrom',
@@ -190,7 +199,7 @@ def test_linlogspace_log_linear():
190199

191200

192201
def test_linlogspace_linear_log_linear():
193-
q_linloglin = tools.linlogspace(
202+
q_linloglin = linlogspace(
194203
dim='qz',
195204
edges=[0.008, 0.03, 0.08, 0.12],
196205
unit='1/angstrom',
@@ -206,10 +215,35 @@ def test_linlogspace_linear_log_linear():
206215

207216
def test_linlogspace_bad_input():
208217
with pytest.raises(ValueError, match="Sizes do not match"):
209-
_ = tools.linlogspace(
218+
_ = linlogspace(
210219
dim='qz',
211220
edges=[0.008, 0.03, 0.08, 0.12],
212221
unit='1/angstrom',
213222
scale=['linear', 'log'],
214223
num=[16, 20],
215224
)
225+
226+
227+
@pytest.mark.filterwarnings("ignore:No suitable")
228+
def test_orso_datasets_tool():
229+
def normalized_ioq(filename: Filename[SampleRun]) -> ReflectivityOverQ:
230+
return filename
231+
232+
def orso_dataset(filename: Filename[SampleRun]) -> OrsoIofQDataset:
233+
class Reduction:
234+
corrections = [] # noqa: RUF012
235+
236+
return OrsoDataset(
237+
Orso({}, Reduction, [], name=f'{filename}.orso'), np.ones((0, 0))
238+
)
239+
240+
workflow = sl.Pipeline(
241+
[normalized_ioq, orso_dataset], params={Filename[SampleRun]: 'default'}
242+
)
243+
datasets = orso_datasets_from_measurements(
244+
workflow,
245+
[{}, {Filename[SampleRun]: 'special'}],
246+
scale_to_overlap=False,
247+
)
248+
assert len(datasets) == 2
249+
assert tuple(d.info.name for d in datasets) == ('default.orso', 'special.orso')

0 commit comments

Comments
 (0)