Skip to content

Commit ab520b5

Browse files
committed
docs: example writing multiple datasets to orso file
1 parent f8f6b39 commit ab520b5

File tree

3 files changed

+190
-30
lines changed

3 files changed

+190
-30
lines changed

docs/user-guide/amor/amor-reduction.ipynb

Lines changed: 76 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -143,8 +143,32 @@
143143
"source": [
144144
"## Computing sample reflectivity\n",
145145
"\n",
146-
"We now compute the sample reflectivity from 3 runs that used different sample rotation angles.\n",
147-
"The different rotation angles cover different ranges in $Q$."
146+
"We now compute the sample reflectivity from 4 runs that used different sample rotation angles.\n",
147+
"The measurements at different rotation angles cover different ranges of $Q$."
148+
]
149+
},
150+
{
151+
"cell_type": "markdown",
152+
"metadata": {},
153+
"source": [
154+
"In this tutorial we use some Amor data files we have received.\n",
155+
"The file paths to the tutorial files are obtained by calling:"
156+
]
157+
},
158+
{
159+
"cell_type": "code",
160+
"execution_count": null,
161+
"metadata": {},
162+
"outputs": [],
163+
"source": [
164+
"amor.data.amor_sample_run(608)"
165+
]
166+
},
167+
{
168+
"cell_type": "markdown",
169+
"metadata": {},
170+
"source": [
171+
"When you encounter `amor.data.amor_sample_run` you should imagining replacing that with a path to your own dataset."
148172
]
149173
},
150174
{
@@ -154,17 +178,25 @@
154178
"outputs": [],
155179
"source": [
156180
"runs = {\n",
157-
" '608': sc.scalar(0.85, unit='deg'),\n",
158-
" '609': sc.scalar(2.25, unit='deg'),\n",
159-
" '610': sc.scalar(3.65, unit='deg'),\n",
160-
" '611': sc.scalar(5.05, unit='deg'),\n",
181+
" run_number: {\n",
182+
" # The sample rotation values in the files are slightly off, so we replace\n",
183+
" # them with corrected values.\n",
184+
" SampleRotation[SampleRun]: sc.scalar(sample_rotation_angle, unit='deg'),\n",
185+
" Filename[SampleRun]: amor.data.amor_sample_run(run_number),\n",
186+
" }\n",
187+
" for run_number, sample_rotation_angle in (\n",
188+
" ('608', 0.85),\n",
189+
" ('609', 2.25),\n",
190+
" ('610', 3.65),\n",
191+
" ('611', 5.05),\n",
192+
" )\n",
161193
"}\n",
162194
"\n",
163195
"reflectivity = {}\n",
164-
"for file, angle in runs.items():\n",
165-
" workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n",
166-
" workflow[SampleRotation[SampleRun]] = angle\n",
167-
" reflectivity[file] = workflow.compute(ReflectivityOverQ).hist()\n",
196+
"for run_number, params in runs.items():\n",
197+
" workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n",
198+
" workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n",
199+
" reflectivity[run_number] = workflow.compute(ReflectivityOverQ).hist()\n",
168200
"\n",
169201
"sc.plot(reflectivity, norm='log', vmin=1e-4)"
170202
]
@@ -186,7 +218,7 @@
186218
"source": [
187219
"from ess.reflectometry.tools import scale_reflectivity_curves_to_overlap\n",
188220
"\n",
189-
"scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(reflectivity.values())\n",
221+
"scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(reflectivity.values(), critical_edge_interval=(sc.scalar(0.01, unit='1/angstrom'), sc.scalar(0.014, unit='1/angstrom')))\n",
190222
"sc.plot(dict(zip(reflectivity.keys(), scaled_reflectivity_curves, strict=True)), norm='log', vmin=1e-5)"
191223
]
192224
},
@@ -236,14 +268,14 @@
236268
"source": [
237269
"# Start by computing the `ReflectivityData` for each of the files\n",
238270
"diagnostics = {}\n",
239-
"for file, angle in runs.items():\n",
240-
" workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n",
241-
" workflow[SampleRotation[SampleRun]] = angle\n",
242-
" diagnostics[file] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n",
271+
"for run_number, params in runs.items():\n",
272+
" workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n",
273+
" workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n",
274+
" diagnostics[run_number] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n",
243275
"\n",
244276
"# Scale the results using the scale factors computed earlier\n",
245-
"for key, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n",
246-
" diagnostics[key][ReflectivityData] *= scale_factor"
277+
"for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n",
278+
" diagnostics[run_number][ReflectivityData] *= scale_factor"
247279
]
248280
},
249281
{
@@ -263,8 +295,8 @@
263295
"from ess.amor.figures import wavelength_theta_figure\n",
264296
"\n",
265297
"wavelength_theta_figure(\n",
266-
" [res[ReflectivityData] for res in diagnostics.values()],\n",
267-
" theta_bins=[res[ThetaBins[SampleRun]] for res in diagnostics.values()],\n",
298+
" [result[ReflectivityData] for result in diagnostics.values()],\n",
299+
" theta_bins=[result[ThetaBins[SampleRun]] for result in diagnostics.values()],\n",
268300
" q_edges_to_display=(sc.scalar(0.018, unit='1/angstrom'), sc.scalar(0.113, unit='1/angstrom'))\n",
269301
")"
270302
]
@@ -430,6 +462,30 @@
430462
")"
431463
]
432464
},
465+
{
466+
"cell_type": "markdown",
467+
"metadata": {},
468+
"source": [
469+
"Now let's repeat this for all the sample measurements!\n",
470+
"To do that we can use an utility in `ess.reflectometry.tools`:"
471+
]
472+
},
473+
{
474+
"cell_type": "code",
475+
"execution_count": null,
476+
"metadata": {},
477+
"outputs": [],
478+
"source": [
479+
"from ess.reflectometry.tools import orso_datasets_from_measurements\n",
480+
"\n",
481+
"datasets = orso_datasets_from_measurements(\n",
482+
" workflow,\n",
483+
" runs.values(),\n",
484+
" # Optionally scale the curves to overlap using `scale_reflectivity_curves_to_overlap`\n",
485+
" scale_to_overlap=True\n",
486+
")"
487+
]
488+
},
433489
{
434490
"cell_type": "markdown",
435491
"metadata": {},
@@ -444,7 +500,7 @@
444500
"metadata": {},
445501
"outputs": [],
446502
"source": [
447-
"iofq_dataset.save('amor_reduced_iofq.ort')"
503+
"fileio.orso.save_orso(datasets=datasets, fname='amor_reduced_iofq.ort')"
448504
]
449505
},
450506
{

src/ess/reflectometry/tools.py

Lines changed: 69 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,17 @@
11
# SPDX-License-Identifier: BSD-3-Clause
22
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
3-
from collections.abc import Sequence
3+
from collections.abc import Mapping, Sequence
44
from itertools import chain
5+
from typing import Any
56

67
import numpy as np
8+
import sciline
79
import scipp as sc
810
import scipy.optimize as opt
11+
from orsopy.fileio.orso import OrsoDataset
12+
13+
from ess.reflectometry import orso
14+
from ess.reflectometry.types import NormalizedIofQ
915

1016
_STD_TO_FWHM = sc.scalar(2.0) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0)))
1117

@@ -286,3 +292,65 @@ def combine_curves(
286292
),
287293
coords={'Q': q_bin_edges},
288294
)
295+
296+
297+
def orso_datasets_from_measurements(
298+
workflow: sciline.Pipeline,
299+
runs: Sequence[Mapping[type, Any]],
300+
*,
301+
scale_to_overlap: bool = True,
302+
) -> list[OrsoDataset]:
303+
'''Produces a list of ORSO datasets containing one
304+
reflectivity curve for each of the provided runs.
305+
Each entry of :code:`runs` is a mapping of parameters and
306+
values needed to produce the dataset.
307+
308+
Optionally, the reflectivity curves can be scaled to overlap in
309+
the regions where they have the same Q-value.
310+
311+
Parameters
312+
-----------
313+
workflow:
314+
The sciline workflow used to compute `NormalizedIofQ` for each of the runs.
315+
316+
runs:
317+
The sciline parameters to be used for each run
318+
319+
scale_to_overlap:
320+
If True the curves will be scaled to overlap.
321+
Note that the curve of the first run is unscaled and
322+
the rest are scaled to match it.
323+
324+
Returns
325+
---------
326+
list of the computed ORSO datasets, containing one reflectivity curve each
327+
'''
328+
reflectivity_curves = []
329+
for parameters in runs:
330+
wf = workflow.copy()
331+
for name, value in parameters.items():
332+
wf[name] = value
333+
reflectivity_curves.append(wf.compute(NormalizedIofQ))
334+
335+
scale_factors = (
336+
scale_reflectivity_curves_to_overlap(
337+
[r.hist() for r in reflectivity_curves], return_scaling_factors=True
338+
)
339+
if scale_to_overlap
340+
else (1,) * len(runs)
341+
)
342+
343+
datasets = []
344+
for parameters, curve, scale_factor in zip(
345+
runs, reflectivity_curves, scale_factors, strict=True
346+
):
347+
wf = workflow.copy()
348+
for name, value in parameters.items():
349+
wf[name] = value
350+
wf[NormalizedIofQ] = scale_factor * curve
351+
dataset = wf.compute(orso.OrsoIofQDataset)
352+
dataset.info.reduction.corrections = orso.find_corrections(
353+
wf.get(orso.OrsoIofQDataset)
354+
)
355+
datasets.append(dataset)
356+
return datasets

tests/tools_test.py

Lines changed: 45 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,22 @@
11
# SPDX-License-Identifier: BSD-3-Clause
22
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
3+
import numpy as np
34
import pytest
5+
import sciline as sl
46
import scipp as sc
7+
58
from numpy.testing import assert_allclose as np_assert_allclose
9+
from orsopy.fileio import Orso, OrsoDataset
610
from scipp.testing import assert_allclose
711

8-
from ess.reflectometry import tools
9-
from ess.reflectometry.tools import combine_curves, scale_reflectivity_curves_to_overlap
12+
from ess.reflectometry.orso import OrsoIofQDataset
13+
from ess.reflectometry.tools import (
14+
combine_curves,
15+
orso_datasets_from_measurements,
16+
scale_reflectivity_curves_to_overlap,
17+
linlogspace,
18+
)
19+
from ess.reflectometry.types import Filename, ReflectivityOverQ, SampleRun
1020

1121

1222
def curve(d, qmin, qmax):
@@ -137,32 +147,33 @@ def test_combined_curves():
137147
)
138148

139149

150+
<<<<<<< HEAD
140151
def test_linlogspace_linear():
141-
q_lin = tools.linlogspace(
152+
q_lin = linlogspace(
142153
dim='qz', edges=[0.008, 0.08], scale='linear', num=50, unit='1/angstrom'
143154
)
144155
expected = sc.linspace(dim='qz', start=0.008, stop=0.08, num=50, unit='1/angstrom')
145156
assert sc.allclose(q_lin, expected)
146157

147158

148159
def test_linlogspace_linear_list_input():
149-
q_lin = tools.linlogspace(
160+
q_lin = linlogspace(
150161
dim='qz', edges=[0.008, 0.08], unit='1/angstrom', scale=['linear'], num=[50]
151162
)
152163
expected = sc.linspace(dim='qz', start=0.008, stop=0.08, num=50, unit='1/angstrom')
153164
assert sc.allclose(q_lin, expected)
154165

155166

156167
def test_linlogspace_log():
157-
q_log = tools.linlogspace(
168+
q_log = linlogspace(
158169
dim='qz', edges=[0.008, 0.08], unit='1/angstrom', scale='log', num=50
159170
)
160171
expected = sc.geomspace(dim='qz', start=0.008, stop=0.08, num=50, unit='1/angstrom')
161172
assert sc.allclose(q_log, expected)
162173

163174

164175
def test_linlogspace_linear_log():
165-
q_linlog = tools.linlogspace(
176+
q_linlog = linlogspace(
166177
dim='qz',
167178
edges=[0.008, 0.03, 0.08],
168179
unit='1/angstrom',
@@ -176,7 +187,7 @@ def test_linlogspace_linear_log():
176187

177188

178189
def test_linlogspace_log_linear():
179-
q_loglin = tools.linlogspace(
190+
q_loglin = linlogspace(
180191
dim='qz',
181192
edges=[0.008, 0.03, 0.08],
182193
unit='1/angstrom',
@@ -190,7 +201,7 @@ def test_linlogspace_log_linear():
190201

191202

192203
def test_linlogspace_linear_log_linear():
193-
q_linloglin = tools.linlogspace(
204+
q_linloglin = linlogspace(
194205
dim='qz',
195206
edges=[0.008, 0.03, 0.08, 0.12],
196207
unit='1/angstrom',
@@ -206,10 +217,35 @@ def test_linlogspace_linear_log_linear():
206217

207218
def test_linlogspace_bad_input():
208219
with pytest.raises(ValueError, match="Sizes do not match"):
209-
_ = tools.linlogspace(
220+
_ = linlogspace(
210221
dim='qz',
211222
edges=[0.008, 0.03, 0.08, 0.12],
212223
unit='1/angstrom',
213224
scale=['linear', 'log'],
214225
num=[16, 20],
215226
)
227+
228+
229+
@pytest.mark.filterwarnings("ignore:No suitable")
230+
def test_orso_datasets_tool():
231+
def normalized_ioq(filename: Filename[SampleRun]) -> ReflectivityOverQ:
232+
return filename
233+
234+
def orso_dataset(filename: Filename[SampleRun]) -> OrsoIofQDataset:
235+
class Reduction:
236+
corrections = [] # noqa: RUF012
237+
238+
return OrsoDataset(
239+
Orso({}, Reduction, [], name=f'{filename}.orso'), np.ones((0, 0))
240+
)
241+
242+
workflow = sl.Pipeline(
243+
[normalized_ioq, orso_dataset], params={Filename[SampleRun]: 'default'}
244+
)
245+
datasets = orso_datasets_from_measurements(
246+
workflow,
247+
[{}, {Filename[SampleRun]: 'special'}],
248+
scale_to_overlap=False,
249+
)
250+
assert len(datasets) == 2
251+
assert tuple(d.info.name for d in datasets) == ('default.orso', 'special.orso')

0 commit comments

Comments
 (0)