|
44 | 44 | "## Create and configure the workflow\n", |
45 | 45 | "\n", |
46 | 46 | "We begin by creating the Amor workflow object which is a skeleton for reducing Amor data,\n", |
47 | | - "with pre-configured steps." |
48 | | - ] |
49 | | - }, |
50 | | - { |
51 | | - "cell_type": "code", |
52 | | - "execution_count": null, |
53 | | - "metadata": {}, |
54 | | - "outputs": [], |
55 | | - "source": [ |
56 | | - "workflow = amor.AmorWorkflow()" |
57 | | - ] |
58 | | - }, |
59 | | - { |
60 | | - "cell_type": "markdown", |
61 | | - "metadata": {}, |
62 | | - "source": [ |
63 | | - "We then need to set the missing parameters which are specific to each experiment:" |
| 47 | + "with pre-configured steps, and then set the missing parameters which are specific to each experiment:" |
64 | 48 | ] |
65 | 49 | }, |
66 | 50 | { |
|
69 | 53 | "metadata": {}, |
70 | 54 | "outputs": [], |
71 | 55 | "source": [ |
| 56 | + "workflow = amor.AmorWorkflow()\n", |
72 | 57 | "workflow[SampleSize[SampleRun]] = sc.scalar(10.0, unit='mm')\n", |
73 | 58 | "workflow[SampleSize[ReferenceRun]] = sc.scalar(10.0, unit='mm')\n", |
74 | 59 | "\n", |
75 | 60 | "workflow[ChopperPhase[ReferenceRun]] = sc.scalar(-7.5, unit='deg')\n", |
76 | 61 | "workflow[ChopperPhase[SampleRun]] = sc.scalar(-7.5, unit='deg')\n", |
77 | 62 | "\n", |
78 | 63 | "workflow[QBins] = sc.geomspace(dim='Q', start=0.005, stop=0.3, num=391, unit='1/angstrom')\n", |
79 | | - "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12, 301, unit='angstrom')\n", |
| 64 | + "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12.5, 2001, unit='angstrom')\n", |
80 | 65 | "\n", |
81 | 66 | "# The YIndexLimits and ZIndexLimits define ranges on the detector where\n", |
82 | 67 | "# data is considered to be valid signal.\n", |
83 | 68 | "# They represent the lower and upper boundaries of a range of pixel indices.\n", |
84 | | - "workflow[YIndexLimits] = sc.scalar(11, unit=None), sc.scalar(41, unit=None)\n", |
85 | | - "workflow[ZIndexLimits] = sc.scalar(80, unit=None), sc.scalar(370, unit=None)" |
| 69 | + "workflow[YIndexLimits] = sc.scalar(11), sc.scalar(41)\n", |
| 70 | + "workflow[ZIndexLimits] = sc.scalar(80), sc.scalar(370)\n", |
| 71 | + "workflow[BeamDivergenceLimits] = sc.scalar(-0.75, unit='deg'), sc.scalar(0.75, unit='deg')" |
86 | 72 | ] |
87 | 73 | }, |
88 | 74 | { |
|
116 | 102 | "# The sample rotation value in the file is slightly off, so we set it manually\n", |
117 | 103 | "workflow[SampleRotation[ReferenceRun]] = sc.scalar(0.65, unit='deg')\n", |
118 | 104 | "\n", |
119 | | - "reference_result = workflow.compute(IdealReferenceIntensity)\n", |
| 105 | + "reference_result = workflow.compute(ReducedReference)\n", |
120 | 106 | "# Set the result back onto the pipeline to cache it\n", |
121 | | - "workflow[IdealReferenceIntensity] = reference_result" |
| 107 | + "workflow[ReducedReference] = reference_result" |
122 | 108 | ] |
123 | 109 | }, |
124 | 110 | { |
|
198 | 184 | " },\n", |
199 | 185 | "}\n", |
200 | 186 | "\n", |
| 187 | + "\n", |
201 | 188 | "reflectivity = {}\n", |
202 | 189 | "for run_number, params in runs.items():\n", |
203 | 190 | " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", |
|
282 | 269 | "for run_number, params in runs.items():\n", |
283 | 270 | " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", |
284 | 271 | " workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n", |
285 | | - " diagnostics[run_number] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n", |
| 272 | + " diagnostics[run_number] = workflow.compute((ReflectivityOverZW, ThetaBins[SampleRun]))\n", |
286 | 273 | "\n", |
287 | 274 | "# Scale the results using the scale factors computed earlier\n", |
288 | 275 | "for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", |
289 | | - " diagnostics[run_number][ReflectivityData] *= scale_factor" |
| 276 | + " diagnostics[run_number][ReflectivityOverZW] *= scale_factor" |
| 277 | + ] |
| 278 | + }, |
| 279 | + { |
| 280 | + "cell_type": "code", |
| 281 | + "execution_count": null, |
| 282 | + "metadata": {}, |
| 283 | + "outputs": [], |
| 284 | + "source": [ |
| 285 | + "diagnostics['608'][ReflectivityOverZW].hist().flatten(('blade', 'wire'), to='z').plot(norm='log')" |
290 | 286 | ] |
291 | 287 | }, |
292 | 288 | { |
|
306 | 302 | "from ess.amor.figures import wavelength_theta_figure\n", |
307 | 303 | "\n", |
308 | 304 | "wavelength_theta_figure(\n", |
309 | | - " [result[ReflectivityData] for result in diagnostics.values()],\n", |
| 305 | + " [result[ReflectivityOverZW] for result in diagnostics.values()],\n", |
310 | 306 | " theta_bins=[result[ThetaBins[SampleRun]] for result in diagnostics.values()],\n", |
311 | 307 | " q_edges_to_display=(sc.scalar(0.018, unit='1/angstrom'), sc.scalar(0.113, unit='1/angstrom'))\n", |
312 | 308 | ")" |
|
336 | 332 | "from ess.amor.figures import q_theta_figure\n", |
337 | 333 | "\n", |
338 | 334 | "q_theta_figure(\n", |
339 | | - " [res[ReflectivityData] for res in diagnostics.values()],\n", |
| 335 | + " [res[ReflectivityOverZW] for res in diagnostics.values()],\n", |
340 | 336 | " theta_bins=[res[ThetaBins[SampleRun]] for res in diagnostics.values()],\n", |
341 | 337 | " q_bins=workflow.compute(QBins)\n", |
342 | 338 | ")" |
|
364 | 360 | "workflow[Filename[SampleRun]] = runs['608'][Filename[SampleRun]]\n", |
365 | 361 | "workflow[SampleRotation[SampleRun]] = runs['608'][SampleRotation[SampleRun]]\n", |
366 | 362 | "wavelength_z_figure(\n", |
367 | | - " workflow.compute(FootprintCorrectedData[SampleRun]),\n", |
| 363 | + " workflow.compute(Sample),\n", |
368 | 364 | " wavelength_bins=workflow.compute(WavelengthBins),\n", |
369 | 365 | " grid=False\n", |
370 | 366 | ") + wavelength_z_figure(\n", |
371 | | - " reference_result,\n", |
| 367 | + " workflow.compute(Reference),\n", |
372 | 368 | " grid=False\n", |
373 | 369 | ")" |
374 | 370 | ] |
|
455 | 451 | ")" |
456 | 452 | ] |
457 | 453 | }, |
458 | | - { |
459 | | - "cell_type": "markdown", |
460 | | - "metadata": {}, |
461 | | - "source": [ |
462 | | - "To support tracking provenance, we also list the corrections that were done by the workflow and store them in the dataset:" |
463 | | - ] |
464 | | - }, |
465 | | - { |
466 | | - "cell_type": "code", |
467 | | - "execution_count": null, |
468 | | - "metadata": {}, |
469 | | - "outputs": [], |
470 | | - "source": [ |
471 | | - "iofq_dataset.info.reduction.corrections = orso.find_corrections(\n", |
472 | | - " workflow.get(orso.OrsoIofQDataset)\n", |
473 | | - ")" |
474 | | - ] |
475 | | - }, |
476 | 454 | { |
477 | 455 | "cell_type": "markdown", |
478 | 456 | "metadata": {}, |
|
0 commit comments