|
143 | 143 | "source": [ |
144 | 144 | "## Computing sample reflectivity\n", |
145 | 145 | "\n", |
146 | | - "We now compute the sample reflectivity from 3 runs that used different sample rotation angles.\n", |
147 | | - "The different rotation angles cover different ranges in $Q$." |
| 146 | + "We now compute the sample reflectivity from 4 runs that used different sample rotation angles.\n", |
| 147 | + "The measurements at different rotation angles cover different ranges of $Q$." |
| 148 | + ] |
| 149 | + }, |
| 150 | + { |
| 151 | + "cell_type": "markdown", |
| 152 | + "metadata": {}, |
| 153 | + "source": [ |
| 154 | + "In this tutorial we use some Amor data files we have received.\n", |
| 155 | + "The file paths to the tutorial files are obtained by calling:" |
| 156 | + ] |
| 157 | + }, |
| 158 | + { |
| 159 | + "cell_type": "code", |
| 160 | + "execution_count": null, |
| 161 | + "metadata": {}, |
| 162 | + "outputs": [], |
| 163 | + "source": [ |
| 164 | + "amor.data.amor_sample_run(608)" |
| 165 | + ] |
| 166 | + }, |
| 167 | + { |
| 168 | + "cell_type": "markdown", |
| 169 | + "metadata": {}, |
| 170 | + "source": [ |
| 171 | + "When you encounter `amor.data.amor_sample_run` you should imagining replacing that with a path to your own dataset." |
148 | 172 | ] |
149 | 173 | }, |
150 | 174 | { |
|
154 | 178 | "outputs": [], |
155 | 179 | "source": [ |
156 | 180 | "runs = {\n", |
157 | | - " '608': sc.scalar(0.85, unit='deg'),\n", |
158 | | - " '609': sc.scalar(2.25, unit='deg'),\n", |
159 | | - " '610': sc.scalar(3.65, unit='deg'),\n", |
160 | | - " '611': sc.scalar(5.05, unit='deg'),\n", |
| 181 | + " run_number: {\n", |
| 182 | + " # The sample rotation values in the files are slightly off, so we replace\n", |
| 183 | + " # them with corrected values.\n", |
| 184 | + " SampleRotation[SampleRun]: sc.scalar(sample_rotation_angle, unit='deg'),\n", |
| 185 | + " Filename[SampleRun]: amor.data.amor_sample_run(run_number),\n", |
| 186 | + " }\n", |
| 187 | + " for run_number, sample_rotation_angle in (\n", |
| 188 | + " ('608', 0.85),\n", |
| 189 | + " ('609', 2.25),\n", |
| 190 | + " ('610', 3.65),\n", |
| 191 | + " ('611', 5.05),\n", |
| 192 | + " )\n", |
161 | 193 | "}\n", |
162 | 194 | "\n", |
163 | 195 | "reflectivity = {}\n", |
164 | | - "for file, angle in runs.items():\n", |
165 | | - " workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n", |
166 | | - " workflow[SampleRotation[SampleRun]] = angle\n", |
167 | | - " reflectivity[file] = workflow.compute(ReflectivityOverQ).hist()\n", |
| 196 | + "for run_number, params in runs.items():\n", |
| 197 | + " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", |
| 198 | + " workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n", |
| 199 | + " reflectivity[run_number] = workflow.compute(ReflectivityOverQ).hist()\n", |
168 | 200 | "\n", |
169 | 201 | "sc.plot(reflectivity, norm='log', vmin=1e-4)" |
170 | 202 | ] |
|
186 | 218 | "source": [ |
187 | 219 | "from ess.reflectometry.tools import scale_reflectivity_curves_to_overlap\n", |
188 | 220 | "\n", |
189 | | - "scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(reflectivity.values())\n", |
| 221 | + "scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(reflectivity.values(), critical_edge_interval=(sc.scalar(0.01, unit='1/angstrom'), sc.scalar(0.014, unit='1/angstrom')))\n", |
190 | 222 | "sc.plot(dict(zip(reflectivity.keys(), scaled_reflectivity_curves, strict=True)), norm='log', vmin=1e-5)" |
191 | 223 | ] |
192 | 224 | }, |
|
236 | 268 | "source": [ |
237 | 269 | "# Start by computing the `ReflectivityData` for each of the files\n", |
238 | 270 | "diagnostics = {}\n", |
239 | | - "for file, angle in runs.items():\n", |
240 | | - " workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n", |
241 | | - " workflow[SampleRotation[SampleRun]] = angle\n", |
242 | | - " diagnostics[file] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n", |
| 271 | + "for run_number, params in runs.items():\n", |
| 272 | + " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", |
| 273 | + " workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n", |
| 274 | + " diagnostics[run_number] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n", |
243 | 275 | "\n", |
244 | 276 | "# Scale the results using the scale factors computed earlier\n", |
245 | | - "for key, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", |
246 | | - " diagnostics[key][ReflectivityData] *= scale_factor" |
| 277 | + "for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", |
| 278 | + " diagnostics[run_number][ReflectivityData] *= scale_factor" |
247 | 279 | ] |
248 | 280 | }, |
249 | 281 | { |
|
263 | 295 | "from ess.amor.figures import wavelength_theta_figure\n", |
264 | 296 | "\n", |
265 | 297 | "wavelength_theta_figure(\n", |
266 | | - " [res[ReflectivityData] for res in diagnostics.values()],\n", |
267 | | - " theta_bins=[res[ThetaBins[SampleRun]] for res in diagnostics.values()],\n", |
| 298 | + " [result[ReflectivityData] for result in diagnostics.values()],\n", |
| 299 | + " theta_bins=[result[ThetaBins[SampleRun]] for result in diagnostics.values()],\n", |
268 | 300 | " q_edges_to_display=(sc.scalar(0.018, unit='1/angstrom'), sc.scalar(0.113, unit='1/angstrom'))\n", |
269 | 301 | ")" |
270 | 302 | ] |
|
430 | 462 | ")" |
431 | 463 | ] |
432 | 464 | }, |
| 465 | + { |
| 466 | + "cell_type": "markdown", |
| 467 | + "metadata": {}, |
| 468 | + "source": [ |
| 469 | + "Now let's repeat this for all the sample measurements!\n", |
| 470 | + "To do that we can use an utility in `ess.reflectometry.tools`:" |
| 471 | + ] |
| 472 | + }, |
| 473 | + { |
| 474 | + "cell_type": "code", |
| 475 | + "execution_count": null, |
| 476 | + "metadata": {}, |
| 477 | + "outputs": [], |
| 478 | + "source": [ |
| 479 | + "from ess.reflectometry.tools import orso_datasets_from_measurements\n", |
| 480 | + "\n", |
| 481 | + "datasets = orso_datasets_from_measurements(\n", |
| 482 | + " workflow,\n", |
| 483 | + " runs.values(),\n", |
| 484 | + " # Optionally scale the curves to overlap using `scale_reflectivity_curves_to_overlap`\n", |
| 485 | + " scale_to_overlap=True\n", |
| 486 | + ")" |
| 487 | + ] |
| 488 | + }, |
433 | 489 | { |
434 | 490 | "cell_type": "markdown", |
435 | 491 | "metadata": {}, |
|
444 | 500 | "metadata": {}, |
445 | 501 | "outputs": [], |
446 | 502 | "source": [ |
447 | | - "iofq_dataset.save('amor_reduced_iofq.ort')" |
| 503 | + "fileio.orso.save_orso(datasets=datasets, fname='amor_reduced_iofq.ort')" |
448 | 504 | ] |
449 | 505 | }, |
450 | 506 | { |
|
0 commit comments