|
143 | 143 | "source": [ |
144 | 144 | "## Computing sample reflectivity\n", |
145 | 145 | "\n", |
146 | | - "We now compute the sample reflectivity from 3 runs that used different sample rotation angles.\n", |
147 | | - "The different rotation angles cover different ranges in $Q$." |
| 146 | + "We now compute the sample reflectivity from 4 runs that used different sample rotation angles.\n", |
| 147 | + "The measurements at different rotation angles cover different ranges of $Q$." |
| 148 | + ] |
| 149 | + }, |
| 150 | + { |
| 151 | + "cell_type": "markdown", |
| 152 | + "metadata": {}, |
| 153 | + "source": [ |
| 154 | + "In this tutorial we use some Amor data files we have received.\n", |
| 155 | + "The file paths to the tutorial files are obtained by calling:" |
| 156 | + ] |
| 157 | + }, |
| 158 | + { |
| 159 | + "cell_type": "code", |
| 160 | + "execution_count": null, |
| 161 | + "metadata": {}, |
| 162 | + "outputs": [], |
| 163 | + "source": [ |
| 164 | + "amor.data.amor_sample_run('608')" |
| 165 | + ] |
| 166 | + }, |
| 167 | + { |
| 168 | + "cell_type": "markdown", |
| 169 | + "metadata": {}, |
| 170 | + "source": [ |
| 171 | + "When you encounter `amor.data.amor_sample_run` you should imagining replacing that with a path to your own dataset." |
148 | 172 | ] |
149 | 173 | }, |
150 | 174 | { |
|
154 | 178 | "outputs": [], |
155 | 179 | "source": [ |
156 | 180 | "runs = {\n", |
157 | | - " '608': sc.scalar(0.85, unit='deg'),\n", |
158 | | - " '609': sc.scalar(2.25, unit='deg'),\n", |
159 | | - " '610': sc.scalar(3.65, unit='deg'),\n", |
160 | | - " '611': sc.scalar(5.05, unit='deg'),\n", |
| 181 | + " '608': {\n", |
| 182 | + " # The sample rotation values in the files are slightly off, so we replace\n", |
| 183 | + " # them with corrected values.\n", |
| 184 | + " SampleRotation[SampleRun]: sc.scalar(0.85, unit='deg'),\n", |
| 185 | + " Filename[SampleRun]: amor.data.amor_sample_run('608'),\n", |
| 186 | + " },\n", |
| 187 | + " '609': {\n", |
| 188 | + " SampleRotation[SampleRun]: sc.scalar(2.25, unit='deg'),\n", |
| 189 | + " Filename[SampleRun]: amor.data.amor_sample_run('609'),\n", |
| 190 | + " },\n", |
| 191 | + " '610': {\n", |
| 192 | + " SampleRotation[SampleRun]: sc.scalar(3.65, unit='deg'),\n", |
| 193 | + " Filename[SampleRun]: amor.data.amor_sample_run('610'),\n", |
| 194 | + " },\n", |
| 195 | + " '611': {\n", |
| 196 | + " SampleRotation[SampleRun]: sc.scalar(5.05, unit='deg'),\n", |
| 197 | + " Filename[SampleRun]: amor.data.amor_sample_run('611'),\n", |
| 198 | + " },\n", |
161 | 199 | "}\n", |
162 | 200 | "\n", |
163 | 201 | "reflectivity = {}\n", |
164 | | - "for file, angle in runs.items():\n", |
165 | | - " workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n", |
166 | | - " workflow[SampleRotation[SampleRun]] = angle\n", |
167 | | - " reflectivity[file] = workflow.compute(ReflectivityOverQ).hist()\n", |
| 202 | + "for run_number, params in runs.items():\n", |
| 203 | + " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", |
| 204 | + " workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n", |
| 205 | + " reflectivity[run_number] = workflow.compute(ReflectivityOverQ).hist()\n", |
168 | 206 | "\n", |
169 | 207 | "sc.plot(reflectivity, norm='log', vmin=1e-4)" |
170 | 208 | ] |
|
186 | 224 | "source": [ |
187 | 225 | "from ess.reflectometry.tools import scale_reflectivity_curves_to_overlap\n", |
188 | 226 | "\n", |
189 | | - "scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(reflectivity.values())\n", |
| 227 | + "scaled_reflectivity_curves, scale_factors = scale_reflectivity_curves_to_overlap(\n", |
| 228 | + " reflectivity.values(),\n", |
| 229 | + " # Optionally specify a Q-interval where the reflectivity is known to be 1.0\n", |
| 230 | + " critical_edge_interval=(sc.scalar(0.01, unit='1/angstrom'), sc.scalar(0.014, unit='1/angstrom'))\n", |
| 231 | + ")\n", |
| 232 | + "\n", |
190 | 233 | "sc.plot(dict(zip(reflectivity.keys(), scaled_reflectivity_curves, strict=True)), norm='log', vmin=1e-5)" |
191 | 234 | ] |
192 | 235 | }, |
|
236 | 279 | "source": [ |
237 | 280 | "# Start by computing the `ReflectivityData` for each of the files\n", |
238 | 281 | "diagnostics = {}\n", |
239 | | - "for file, angle in runs.items():\n", |
240 | | - " workflow[Filename[SampleRun]] = amor.data.amor_sample_run(file)\n", |
241 | | - " workflow[SampleRotation[SampleRun]] = angle\n", |
242 | | - " diagnostics[file] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n", |
| 282 | + "for run_number, params in runs.items():\n", |
| 283 | + " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", |
| 284 | + " workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n", |
| 285 | + " diagnostics[run_number] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n", |
243 | 286 | "\n", |
244 | 287 | "# Scale the results using the scale factors computed earlier\n", |
245 | | - "for key, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", |
246 | | - " diagnostics[key][ReflectivityData] *= scale_factor" |
| 288 | + "for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", |
| 289 | + " diagnostics[run_number][ReflectivityData] *= scale_factor" |
247 | 290 | ] |
248 | 291 | }, |
249 | 292 | { |
|
263 | 306 | "from ess.amor.figures import wavelength_theta_figure\n", |
264 | 307 | "\n", |
265 | 308 | "wavelength_theta_figure(\n", |
266 | | - " [res[ReflectivityData] for res in diagnostics.values()],\n", |
267 | | - " theta_bins=[res[ThetaBins[SampleRun]] for res in diagnostics.values()],\n", |
| 309 | + " [result[ReflectivityData] for result in diagnostics.values()],\n", |
| 310 | + " theta_bins=[result[ThetaBins[SampleRun]] for result in diagnostics.values()],\n", |
268 | 311 | " q_edges_to_display=(sc.scalar(0.018, unit='1/angstrom'), sc.scalar(0.113, unit='1/angstrom'))\n", |
269 | 312 | ")" |
270 | 313 | ] |
|
318 | 361 | "source": [ |
319 | 362 | "from ess.amor.figures import wavelength_z_figure\n", |
320 | 363 | "\n", |
321 | | - "workflow[Filename[SampleRun]] = amor.data.amor_sample_run('608')\n", |
322 | | - "workflow[SampleRotation[SampleRun]] = runs['608']\n", |
| 364 | + "workflow[Filename[SampleRun]] = runs['608'][Filename[SampleRun]]\n", |
| 365 | + "workflow[SampleRotation[SampleRun]] = runs['608'][SampleRotation[SampleRun]]\n", |
323 | 366 | "wavelength_z_figure(\n", |
324 | 367 | " workflow.compute(FootprintCorrectedData[SampleRun]),\n", |
325 | 368 | " wavelength_bins=workflow.compute(WavelengthBins),\n", |
|
430 | 473 | ")" |
431 | 474 | ] |
432 | 475 | }, |
| 476 | + { |
| 477 | + "cell_type": "markdown", |
| 478 | + "metadata": {}, |
| 479 | + "source": [ |
| 480 | + "Now let's repeat this for all the sample measurements!\n", |
| 481 | + "To do that we can use an utility in `ess.reflectometry.tools`:" |
| 482 | + ] |
| 483 | + }, |
| 484 | + { |
| 485 | + "cell_type": "code", |
| 486 | + "execution_count": null, |
| 487 | + "metadata": {}, |
| 488 | + "outputs": [], |
| 489 | + "source": [ |
| 490 | + "from ess.reflectometry.tools import orso_datasets_from_measurements\n", |
| 491 | + "\n", |
| 492 | + "datasets = orso_datasets_from_measurements(\n", |
| 493 | + " workflow,\n", |
| 494 | + " runs.values(),\n", |
| 495 | + " # Optionally scale the curves to overlap using `scale_reflectivity_curves_to_overlap`\n", |
| 496 | + " scale_to_overlap=True\n", |
| 497 | + ")" |
| 498 | + ] |
| 499 | + }, |
433 | 500 | { |
434 | 501 | "cell_type": "markdown", |
435 | 502 | "metadata": {}, |
|
444 | 511 | "metadata": {}, |
445 | 512 | "outputs": [], |
446 | 513 | "source": [ |
447 | | - "iofq_dataset.save('amor_reduced_iofq.ort')" |
| 514 | + "fileio.orso.save_orso(datasets=datasets, fname='amor_reduced_iofq.ort')" |
448 | 515 | ] |
449 | 516 | }, |
450 | 517 | { |
|
0 commit comments