Skip to content

Commit 8dca8e5

Browse files
committed
style notebook
1 parent f959566 commit 8dca8e5

File tree

1 file changed

+78
-41
lines changed

1 file changed

+78
-41
lines changed

example-notebook.ipynb

Lines changed: 78 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
"outputs": [],
2525
"source": [
2626
"import adaptive\n",
27+
"\n",
2728
"adaptive.notebook_extension()\n",
2829
"\n",
2930
"# Import modules that are used in multiple cells\n",
@@ -57,12 +58,13 @@
5758
"source": [
5859
"offset = random.uniform(-0.5, 0.5)\n",
5960
"\n",
61+
"\n",
6062
"def peak(x, offset=offset, wait=True):\n",
6163
" from time import sleep\n",
6264
" from random import random\n",
6365
"\n",
6466
" a = 0.01\n",
65-
" if wait: \n",
67+
" if wait:\n",
6668
" # we pretend that this is a slow function\n",
6769
" sleep(random())\n",
6870
"\n",
@@ -180,7 +182,7 @@
180182
" sleep(random() / 10)\n",
181183
" x, y = xy\n",
182184
" a = 0.2\n",
183-
" return x + np.exp(-(x ** 2 + y ** 2 - 0.75 ** 2) ** 2 / a ** 4)\n",
185+
" return x + np.exp(-((x**2 + y**2 - 0.75**2)**2) / a**4)\n",
184186
"\n",
185187
"\n",
186188
"learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])"
@@ -206,6 +208,7 @@
206208
" plot = learner.plot(tri_alpha=0.2)\n",
207209
" return plot.Image + plot.EdgePaths.I + plot\n",
208210
"\n",
211+
"\n",
209212
"runner.live_plot(plotter=plot, update_interval=0.1)"
210213
]
211214
},
@@ -219,7 +222,7 @@
219222
"\n",
220223
"# Create a learner and add data on homogeneous grid, so that we can plot it\n",
221224
"learner2 = adaptive.Learner2D(ring, bounds=learner.bounds)\n",
222-
"n = int(learner.npoints ** 0.5)\n",
225+
"n = int(learner.npoints**0.5)\n",
223226
"xs, ys = [np.linspace(*bounds, n) for bounds in learner.bounds]\n",
224227
"xys = list(itertools.product(xs, ys))\n",
225228
"zs = [ring(xy, wait=False) for xy in xys]\n",
@@ -259,6 +262,7 @@
259262
"def g(n):\n",
260263
" import random\n",
261264
" from time import sleep\n",
265+
"\n",
262266
" sleep(random.random() / 1000)\n",
263267
" # Properly save and restore the RNG state\n",
264268
" state = random.getstate()\n",
@@ -312,7 +316,7 @@
312316
"source": [
313317
"def noisy_peak(seed_x, sigma=0, peak_width=0.05, offset=-0.5):\n",
314318
" seed, x = seed_x\n",
315-
" y = x ** 3 - x + 3 * peak_width ** 2 / (peak_width ** 2 + (x - offset) ** 2)\n",
319+
" y = x**3 - x + 3 * peak_width**2 / (peak_width**2 + (x - offset)**2)\n",
316320
" rng = np.random.RandomState(int(seed))\n",
317321
" noise = rng.normal(scale=sigma)\n",
318322
" return y + noise"
@@ -418,6 +422,7 @@
418422
"def f24(x):\n",
419423
" return np.floor(np.exp(x))\n",
420424
"\n",
425+
"\n",
421426
"xs = np.linspace(0, 3, 200)\n",
422427
"hv.Scatter((xs, [f24(x) for x in xs]))"
423428
]
@@ -436,6 +441,7 @@
436441
"outputs": [],
437442
"source": [
438443
"import scipy.integrate\n",
444+
"\n",
439445
"scipy.integrate.quad(f24, 0, 3)"
440446
]
441447
},
@@ -479,7 +485,10 @@
479485
"if runner.status() != \"finished\":\n",
480486
" print(\"WARINING: The runner hasn't reached it goal yet!\")\n",
481487
"\n",
482-
"print('The integral value is {} with the corresponding error of {}'.format(learner.igral, learner.err))\n",
488+
"print(\n",
489+
" f\"The integral value is {learner.igral} \"\n",
490+
" f\"with a corresponding error of {learner.err}\"\n",
491+
")\n",
483492
"learner.plot()"
484493
]
485494
},
@@ -559,7 +568,8 @@
559568
"def sphere(xyz):\n",
560569
" x, y, z = xyz\n",
561570
" a = 0.4\n",
562-
" return x + z**2 + np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4)\n",
571+
" return x + z**2 + np.exp(-((x**2 + y**2 + z**2 - 0.75**2)**2) / a**4)\n",
572+
"\n",
563573
"\n",
564574
"learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n",
565575
"runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 2000)\n",
@@ -580,11 +590,12 @@
580590
"outputs": [],
581591
"source": [
582592
"def plot_cut(x, direction, learner=learner):\n",
583-
" cut_mapping = {'xyz'.index(direction): x}\n",
593+
" cut_mapping = {\"xyz\".index(direction): x}\n",
584594
" return learner.plot_slice(cut_mapping, n=100)\n",
585595
"\n",
586-
"dm = hv.DynamicMap(plot_cut, kdims=['value', 'direction'])\n",
587-
"dm.redim.values(value=np.linspace(-1, 1), direction=list('xyz'))"
596+
"\n",
597+
"dm = hv.DynamicMap(plot_cut, kdims=[\"value\", \"direction\"])\n",
598+
"dm.redim.values(value=np.linspace(-1, 1), direction=list(\"xyz\"))"
588599
]
589600
},
590601
{
@@ -662,10 +673,14 @@
662673
" dx = xs[1] - xs[0]\n",
663674
" return dx\n",
664675
"\n",
676+
"\n",
665677
"def f_divergent_1d(x):\n",
666678
" return 1 / x**2\n",
667679
"\n",
668-
"learner = adaptive.Learner1D(f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d)\n",
680+
"\n",
681+
"learner = adaptive.Learner1D(\n",
682+
" f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d\n",
683+
")\n",
669684
"runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n",
670685
"learner.plot().select(y=(0, 10000))"
671686
]
@@ -688,7 +703,7 @@
688703
"\n",
689704
"def f_divergent_2d(xy):\n",
690705
" x, y = xy\n",
691-
" return 1 / (x ** 2 + y ** 2)\n",
706+
" return 1 / (x**2 + y**2)\n",
692707
"\n",
693708
"\n",
694709
"def plot_logz(learner):\n",
@@ -749,18 +764,19 @@
749764
" # It represents the deviation of the function value from a linear estimate\n",
750765
" # over each triangular subdomain.\n",
751766
" dev = deviations(ip)[0]\n",
752-
" \n",
767+
"\n",
753768
" # we add terms of the same dimension: dev == [distance], A == [distance**2]\n",
754769
" loss = np.sqrt(A) * dev + A\n",
755-
" \n",
770+
"\n",
756771
" # Setting areas with a small area to zero such that they won't be chosen again\n",
757-
" loss[A < min_distance**2] = 0 \n",
758-
" \n",
772+
" loss[A < min_distance**2] = 0\n",
773+
"\n",
759774
" # Setting triangles that have a size larger than max_distance to infinite loss\n",
760775
" loss[A > max_distance**2] = np.inf\n",
761776
"\n",
762777
" return loss\n",
763778
"\n",
779+
"\n",
764780
"loss = partial(resolution_loss, min_distance=0.01)\n",
765781
"\n",
766782
"learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)\n",
@@ -803,8 +819,11 @@
803819
" a = 0.01\n",
804820
" return x + a**2 / (a**2 + (x - offset)**2)\n",
805821
"\n",
806-
"learners = [adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)),\n",
807-
" bounds=(-1, 1)) for i in range(10)]\n",
822+
"\n",
823+
"learners = [\n",
824+
" adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)), bounds=(-1, 1))\n",
825+
" for i in range(10)\n",
826+
"]\n",
808827
"\n",
809828
"bal_learner = adaptive.BalancingLearner(learners)\n",
810829
"runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01)\n",
@@ -836,23 +855,26 @@
836855
"source": [
837856
"from scipy.special import eval_jacobi\n",
838857
"\n",
858+
"\n",
839859
"def jacobi(x, n, alpha, beta):\n",
840860
" return eval_jacobi(n, alpha, beta, x)\n",
841861
"\n",
862+
"\n",
842863
"combos = {\n",
843-
" 'n': [1, 2, 4, 8],\n",
844-
" 'alpha': np.linspace(0, 2, 3),\n",
845-
" 'beta': np.linspace(0, 1, 5),\n",
864+
" \"n\": [1, 2, 4, 8],\n",
865+
" \"alpha\": np.linspace(0, 2, 3),\n",
866+
" \"beta\": np.linspace(0, 1, 5),\n",
846867
"}\n",
847868
"\n",
848869
"learner = adaptive.BalancingLearner.from_product(\n",
849-
" jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos)\n",
870+
" jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos\n",
871+
")\n",
850872
"\n",
851873
"runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n",
852874
"\n",
853875
"# The `cdims` will automatically be set when using `from_product`, so\n",
854876
"# `plot()` will return a HoloMap with correctly labeled sliders.\n",
855-
"learner.plot().overlay('beta').grid().select(y=(-1, 3))"
877+
"learner.plot().overlay(\"beta\").grid().select(y=(-1, 3))"
856878
]
857879
},
858880
{
@@ -879,6 +901,7 @@
879901
"source": [
880902
"from operator import itemgetter\n",
881903
"\n",
904+
"\n",
882905
"def f_dict(x):\n",
883906
" \"\"\"The function evaluation takes roughly the time we `sleep`.\"\"\"\n",
884907
" import random\n",
@@ -888,14 +911,15 @@
888911
" sleep(waiting_time)\n",
889912
" a = 0.01\n",
890913
" y = x + a**2 / (a**2 + x**2)\n",
891-
" return {'y': y, 'waiting_time': waiting_time}\n",
914+
" return {\"y\": y, \"waiting_time\": waiting_time}\n",
915+
"\n",
892916
"\n",
893917
"# Create the learner with the function that returns a 'dict'\n",
894918
"# This learner cannot be run directly, as Learner1D does not know what to do with the 'dict'\n",
895919
"_learner = adaptive.Learner1D(f_dict, bounds=(-1, 1))\n",
896920
"\n",
897921
"# Wrapping the learner with 'adaptive.DataSaver' and tell it which key it needs to learn\n",
898-
"learner = adaptive.DataSaver(_learner, arg_picker=itemgetter('y'))"
922+
"learner = adaptive.DataSaver(_learner, arg_picker=itemgetter(\"y\"))"
899923
]
900924
},
901925
{
@@ -965,8 +989,7 @@
965989
"outputs": [],
966990
"source": [
967991
"def F(x, noise_level=0.1):\n",
968-
" return (np.sin(5 * x) * (1 - np.tanh(x ** 2))\n",
969-
" + np.random.randn() * noise_level)"
992+
" return np.sin(5 * x) * (1 - np.tanh(x**2)) + np.random.randn() * noise_level"
970993
]
971994
},
972995
{
@@ -975,11 +998,13 @@
975998
"metadata": {},
976999
"outputs": [],
9771000
"source": [
978-
"learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)],\n",
979-
" base_estimator=\"GP\",\n",
980-
" acq_func=\"gp_hedge\",\n",
981-
" acq_optimizer=\"lbfgs\",\n",
982-
" )\n",
1001+
"learner = adaptive.SKOptLearner(\n",
1002+
" F,\n",
1003+
" dimensions=[(-2.0, 2.0)],\n",
1004+
" base_estimator=\"GP\",\n",
1005+
" acq_func=\"gp_hedge\",\n",
1006+
" acq_optimizer=\"lbfgs\",\n",
1007+
")\n",
9831008
"runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40)\n",
9841009
"runner.live_info()"
9851010
]
@@ -992,9 +1017,9 @@
9921017
"source": [
9931018
"%%opts Overlay [legend_position='top']\n",
9941019
"xs = np.linspace(*learner.space.bounds[0])\n",
995-
"to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label='to learn')\n",
1020+
"to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label=\"to learn\")\n",
9961021
"\n",
997-
"runner.live_plot().relabel('prediction', depth=2) * to_learn"
1022+
"runner.live_plot().relabel(\"prediction\", depth=2) * to_learn"
9981023
]
9991024
},
10001025
{
@@ -1155,7 +1180,7 @@
11551180
"metadata": {},
11561181
"outputs": [],
11571182
"source": [
1158-
"fname = 'data/example_file.p'\n",
1183+
"fname = \"data/example_file.p\"\n",
11591184
"learner.save(fname)"
11601185
]
11611186
},
@@ -1173,7 +1198,7 @@
11731198
"outputs": [],
11741199
"source": [
11751200
"control.load(fname)\n",
1176-
"learner.plot().relabel('saved learner') + control.plot().relabel('loaded learner')"
1201+
"learner.plot().relabel(\"saved learner\") + control.plot().relabel(\"loaded learner\")"
11771202
]
11781203
},
11791204
{
@@ -1208,13 +1233,17 @@
12081233
"source": [
12091234
"def slow_f(x):\n",
12101235
" from time import sleep\n",
1236+
"\n",
12111237
" sleep(5)\n",
12121238
" return x\n",
12131239
"\n",
1240+
"\n",
12141241
"learner = adaptive.Learner1D(slow_f, bounds=[0, 1])\n",
12151242
"runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100)\n",
12161243
"\n",
1217-
"runner.start_periodic_saving(save_kwargs=dict(fname='data/periodic_example.p'), interval=6)\n",
1244+
"runner.start_periodic_saving(\n",
1245+
" save_kwargs=dict(fname=\"data/periodic_example.p\"), interval=6\n",
1246+
")\n",
12181247
"\n",
12191248
"runner.live_info()"
12201249
]
@@ -1328,7 +1357,9 @@
13281357
"learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n",
13291358
"\n",
13301359
"# blocks until completion\n",
1331-
"runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002)\n",
1360+
"runner = adaptive.Runner(\n",
1361+
" learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002\n",
1362+
")\n",
13321363
"runner.live_info()\n",
13331364
"runner.live_plot(update_interval=0.1)"
13341365
]
@@ -1406,14 +1437,17 @@
14061437
"def will_raise(x):\n",
14071438
" from random import random\n",
14081439
" from time import sleep\n",
1409-
" \n",
1440+
"\n",
14101441
" sleep(random())\n",
14111442
" if random() < 0.1:\n",
1412-
" raise RuntimeError('something went wrong!')\n",
1443+
" raise RuntimeError(\"something went wrong!\")\n",
14131444
" return x**2\n",
1414-
" \n",
1445+
"\n",
1446+
"\n",
14151447
"learner = adaptive.Learner1D(will_raise, (-1, 1))\n",
1416-
"runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled\n",
1448+
"runner = adaptive.Runner(\n",
1449+
" learner\n",
1450+
") # without 'goal' the runner will run forever unless cancelled\n",
14171451
"runner.live_info()\n",
14181452
"runner.live_plot()"
14191453
]
@@ -1546,12 +1580,15 @@
15461580
"source": [
15471581
"import asyncio\n",
15481582
"\n",
1583+
"\n",
15491584
"async def time(runner):\n",
15501585
" from datetime import datetime\n",
1586+
"\n",
15511587
" now = datetime.now()\n",
15521588
" await runner.task\n",
15531589
" return datetime.now() - now\n",
15541590
"\n",
1591+
"\n",
15551592
"ioloop = asyncio.get_event_loop()\n",
15561593
"\n",
15571594
"learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n",

0 commit comments

Comments
 (0)