|
24 | 24 | "outputs": [],
|
25 | 25 | "source": [
|
26 | 26 | "import adaptive\n",
|
| 27 | + "\n", |
27 | 28 | "adaptive.notebook_extension()\n",
|
28 | 29 | "\n",
|
29 | 30 | "# Import modules that are used in multiple cells\n",
|
|
57 | 58 | "source": [
|
58 | 59 | "offset = random.uniform(-0.5, 0.5)\n",
|
59 | 60 | "\n",
|
| 61 | + "\n", |
60 | 62 | "def peak(x, offset=offset, wait=True):\n",
|
61 | 63 | " from time import sleep\n",
|
62 | 64 | " from random import random\n",
|
63 | 65 | "\n",
|
64 | 66 | " a = 0.01\n",
|
65 |
| - " if wait: \n", |
| 67 | + " if wait:\n", |
66 | 68 | " # we pretend that this is a slow function\n",
|
67 | 69 | " sleep(random())\n",
|
68 | 70 | "\n",
|
|
180 | 182 | " sleep(random() / 10)\n",
|
181 | 183 | " x, y = xy\n",
|
182 | 184 | " a = 0.2\n",
|
183 |
| - " return x + np.exp(-(x ** 2 + y ** 2 - 0.75 ** 2) ** 2 / a ** 4)\n", |
| 185 | + " return x + np.exp(-((x**2 + y**2 - 0.75**2)**2) / a**4)\n", |
184 | 186 | "\n",
|
185 | 187 | "\n",
|
186 | 188 | "learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])"
|
|
206 | 208 | " plot = learner.plot(tri_alpha=0.2)\n",
|
207 | 209 | " return plot.Image + plot.EdgePaths.I + plot\n",
|
208 | 210 | "\n",
|
| 211 | + "\n", |
209 | 212 | "runner.live_plot(plotter=plot, update_interval=0.1)"
|
210 | 213 | ]
|
211 | 214 | },
|
|
219 | 222 | "\n",
|
220 | 223 | "# Create a learner and add data on homogeneous grid, so that we can plot it\n",
|
221 | 224 | "learner2 = adaptive.Learner2D(ring, bounds=learner.bounds)\n",
|
222 |
| - "n = int(learner.npoints ** 0.5)\n", |
| 225 | + "n = int(learner.npoints**0.5)\n", |
223 | 226 | "xs, ys = [np.linspace(*bounds, n) for bounds in learner.bounds]\n",
|
224 | 227 | "xys = list(itertools.product(xs, ys))\n",
|
225 | 228 | "zs = [ring(xy, wait=False) for xy in xys]\n",
|
|
259 | 262 | "def g(n):\n",
|
260 | 263 | " import random\n",
|
261 | 264 | " from time import sleep\n",
|
| 265 | + "\n", |
262 | 266 | " sleep(random.random() / 1000)\n",
|
263 | 267 | " # Properly save and restore the RNG state\n",
|
264 | 268 | " state = random.getstate()\n",
|
|
312 | 316 | "source": [
|
313 | 317 | "def noisy_peak(seed_x, sigma=0, peak_width=0.05, offset=-0.5):\n",
|
314 | 318 | " seed, x = seed_x\n",
|
315 |
| - " y = x ** 3 - x + 3 * peak_width ** 2 / (peak_width ** 2 + (x - offset) ** 2)\n", |
| 319 | + " y = x**3 - x + 3 * peak_width**2 / (peak_width**2 + (x - offset)**2)\n", |
316 | 320 | " rng = np.random.RandomState(int(seed))\n",
|
317 | 321 | " noise = rng.normal(scale=sigma)\n",
|
318 | 322 | " return y + noise"
|
|
418 | 422 | "def f24(x):\n",
|
419 | 423 | " return np.floor(np.exp(x))\n",
|
420 | 424 | "\n",
|
| 425 | + "\n", |
421 | 426 | "xs = np.linspace(0, 3, 200)\n",
|
422 | 427 | "hv.Scatter((xs, [f24(x) for x in xs]))"
|
423 | 428 | ]
|
|
436 | 441 | "outputs": [],
|
437 | 442 | "source": [
|
438 | 443 | "import scipy.integrate\n",
|
| 444 | + "\n", |
439 | 445 | "scipy.integrate.quad(f24, 0, 3)"
|
440 | 446 | ]
|
441 | 447 | },
|
|
479 | 485 | "if runner.status() != \"finished\":\n",
|
480 | 486 | " print(\"WARINING: The runner hasn't reached it goal yet!\")\n",
|
481 | 487 | "\n",
|
482 |
| - "print('The integral value is {} with the corresponding error of {}'.format(learner.igral, learner.err))\n", |
| 488 | + "print(\n", |
| 489 | + " f\"The integral value is {learner.igral} \"\n", |
| 490 | + " f\"with a corresponding error of {learner.err}\"\n", |
| 491 | + ")\n", |
483 | 492 | "learner.plot()"
|
484 | 493 | ]
|
485 | 494 | },
|
|
559 | 568 | "def sphere(xyz):\n",
|
560 | 569 | " x, y, z = xyz\n",
|
561 | 570 | " a = 0.4\n",
|
562 |
| - " return x + z**2 + np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4)\n", |
| 571 | + " return x + z**2 + np.exp(-((x**2 + y**2 + z**2 - 0.75**2)**2) / a**4)\n", |
| 572 | + "\n", |
563 | 573 | "\n",
|
564 | 574 | "learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n",
|
565 | 575 | "runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 2000)\n",
|
|
580 | 590 | "outputs": [],
|
581 | 591 | "source": [
|
582 | 592 | "def plot_cut(x, direction, learner=learner):\n",
|
583 |
| - " cut_mapping = {'xyz'.index(direction): x}\n", |
| 593 | + " cut_mapping = {\"xyz\".index(direction): x}\n", |
584 | 594 | " return learner.plot_slice(cut_mapping, n=100)\n",
|
585 | 595 | "\n",
|
586 |
| - "dm = hv.DynamicMap(plot_cut, kdims=['value', 'direction'])\n", |
587 |
| - "dm.redim.values(value=np.linspace(-1, 1), direction=list('xyz'))" |
| 596 | + "\n", |
| 597 | + "dm = hv.DynamicMap(plot_cut, kdims=[\"value\", \"direction\"])\n", |
| 598 | + "dm.redim.values(value=np.linspace(-1, 1), direction=list(\"xyz\"))" |
588 | 599 | ]
|
589 | 600 | },
|
590 | 601 | {
|
|
662 | 673 | " dx = xs[1] - xs[0]\n",
|
663 | 674 | " return dx\n",
|
664 | 675 | "\n",
|
| 676 | + "\n", |
665 | 677 | "def f_divergent_1d(x):\n",
|
666 | 678 | " return 1 / x**2\n",
|
667 | 679 | "\n",
|
668 |
| - "learner = adaptive.Learner1D(f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d)\n", |
| 680 | + "\n", |
| 681 | + "learner = adaptive.Learner1D(\n", |
| 682 | + " f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d\n", |
| 683 | + ")\n", |
669 | 684 | "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n",
|
670 | 685 | "learner.plot().select(y=(0, 10000))"
|
671 | 686 | ]
|
|
688 | 703 | "\n",
|
689 | 704 | "def f_divergent_2d(xy):\n",
|
690 | 705 | " x, y = xy\n",
|
691 |
| - " return 1 / (x ** 2 + y ** 2)\n", |
| 706 | + " return 1 / (x**2 + y**2)\n", |
692 | 707 | "\n",
|
693 | 708 | "\n",
|
694 | 709 | "def plot_logz(learner):\n",
|
|
749 | 764 | " # It represents the deviation of the function value from a linear estimate\n",
|
750 | 765 | " # over each triangular subdomain.\n",
|
751 | 766 | " dev = deviations(ip)[0]\n",
|
752 |
| - " \n", |
| 767 | + "\n", |
753 | 768 | " # we add terms of the same dimension: dev == [distance], A == [distance**2]\n",
|
754 | 769 | " loss = np.sqrt(A) * dev + A\n",
|
755 |
| - " \n", |
| 770 | + "\n", |
756 | 771 | " # Setting areas with a small area to zero such that they won't be chosen again\n",
|
757 |
| - " loss[A < min_distance**2] = 0 \n", |
758 |
| - " \n", |
| 772 | + " loss[A < min_distance**2] = 0\n", |
| 773 | + "\n", |
759 | 774 | " # Setting triangles that have a size larger than max_distance to infinite loss\n",
|
760 | 775 | " loss[A > max_distance**2] = np.inf\n",
|
761 | 776 | "\n",
|
762 | 777 | " return loss\n",
|
763 | 778 | "\n",
|
| 779 | + "\n", |
764 | 780 | "loss = partial(resolution_loss, min_distance=0.01)\n",
|
765 | 781 | "\n",
|
766 | 782 | "learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)\n",
|
|
803 | 819 | " a = 0.01\n",
|
804 | 820 | " return x + a**2 / (a**2 + (x - offset)**2)\n",
|
805 | 821 | "\n",
|
806 |
| - "learners = [adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)),\n", |
807 |
| - " bounds=(-1, 1)) for i in range(10)]\n", |
| 822 | + "\n", |
| 823 | + "learners = [\n", |
| 824 | + " adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)), bounds=(-1, 1))\n", |
| 825 | + " for i in range(10)\n", |
| 826 | + "]\n", |
808 | 827 | "\n",
|
809 | 828 | "bal_learner = adaptive.BalancingLearner(learners)\n",
|
810 | 829 | "runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01)\n",
|
|
836 | 855 | "source": [
|
837 | 856 | "from scipy.special import eval_jacobi\n",
|
838 | 857 | "\n",
|
| 858 | + "\n", |
839 | 859 | "def jacobi(x, n, alpha, beta):\n",
|
840 | 860 | " return eval_jacobi(n, alpha, beta, x)\n",
|
841 | 861 | "\n",
|
| 862 | + "\n", |
842 | 863 | "combos = {\n",
|
843 |
| - " 'n': [1, 2, 4, 8],\n", |
844 |
| - " 'alpha': np.linspace(0, 2, 3),\n", |
845 |
| - " 'beta': np.linspace(0, 1, 5),\n", |
| 864 | + " \"n\": [1, 2, 4, 8],\n", |
| 865 | + " \"alpha\": np.linspace(0, 2, 3),\n", |
| 866 | + " \"beta\": np.linspace(0, 1, 5),\n", |
846 | 867 | "}\n",
|
847 | 868 | "\n",
|
848 | 869 | "learner = adaptive.BalancingLearner.from_product(\n",
|
849 |
| - " jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos)\n", |
| 870 | + " jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos\n", |
| 871 | + ")\n", |
850 | 872 | "\n",
|
851 | 873 | "runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n",
|
852 | 874 | "\n",
|
853 | 875 | "# The `cdims` will automatically be set when using `from_product`, so\n",
|
854 | 876 | "# `plot()` will return a HoloMap with correctly labeled sliders.\n",
|
855 |
| - "learner.plot().overlay('beta').grid().select(y=(-1, 3))" |
| 877 | + "learner.plot().overlay(\"beta\").grid().select(y=(-1, 3))" |
856 | 878 | ]
|
857 | 879 | },
|
858 | 880 | {
|
|
879 | 901 | "source": [
|
880 | 902 | "from operator import itemgetter\n",
|
881 | 903 | "\n",
|
| 904 | + "\n", |
882 | 905 | "def f_dict(x):\n",
|
883 | 906 | " \"\"\"The function evaluation takes roughly the time we `sleep`.\"\"\"\n",
|
884 | 907 | " import random\n",
|
|
888 | 911 | " sleep(waiting_time)\n",
|
889 | 912 | " a = 0.01\n",
|
890 | 913 | " y = x + a**2 / (a**2 + x**2)\n",
|
891 |
| - " return {'y': y, 'waiting_time': waiting_time}\n", |
| 914 | + " return {\"y\": y, \"waiting_time\": waiting_time}\n", |
| 915 | + "\n", |
892 | 916 | "\n",
|
893 | 917 | "# Create the learner with the function that returns a 'dict'\n",
|
894 | 918 | "# This learner cannot be run directly, as Learner1D does not know what to do with the 'dict'\n",
|
895 | 919 | "_learner = adaptive.Learner1D(f_dict, bounds=(-1, 1))\n",
|
896 | 920 | "\n",
|
897 | 921 | "# Wrapping the learner with 'adaptive.DataSaver' and tell it which key it needs to learn\n",
|
898 |
| - "learner = adaptive.DataSaver(_learner, arg_picker=itemgetter('y'))" |
| 922 | + "learner = adaptive.DataSaver(_learner, arg_picker=itemgetter(\"y\"))" |
899 | 923 | ]
|
900 | 924 | },
|
901 | 925 | {
|
|
965 | 989 | "outputs": [],
|
966 | 990 | "source": [
|
967 | 991 | "def F(x, noise_level=0.1):\n",
|
968 |
| - " return (np.sin(5 * x) * (1 - np.tanh(x ** 2))\n", |
969 |
| - " + np.random.randn() * noise_level)" |
| 992 | + " return np.sin(5 * x) * (1 - np.tanh(x**2)) + np.random.randn() * noise_level" |
970 | 993 | ]
|
971 | 994 | },
|
972 | 995 | {
|
|
975 | 998 | "metadata": {},
|
976 | 999 | "outputs": [],
|
977 | 1000 | "source": [
|
978 |
| - "learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)],\n", |
979 |
| - " base_estimator=\"GP\",\n", |
980 |
| - " acq_func=\"gp_hedge\",\n", |
981 |
| - " acq_optimizer=\"lbfgs\",\n", |
982 |
| - " )\n", |
| 1001 | + "learner = adaptive.SKOptLearner(\n", |
| 1002 | + " F,\n", |
| 1003 | + " dimensions=[(-2.0, 2.0)],\n", |
| 1004 | + " base_estimator=\"GP\",\n", |
| 1005 | + " acq_func=\"gp_hedge\",\n", |
| 1006 | + " acq_optimizer=\"lbfgs\",\n", |
| 1007 | + ")\n", |
983 | 1008 | "runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40)\n",
|
984 | 1009 | "runner.live_info()"
|
985 | 1010 | ]
|
|
992 | 1017 | "source": [
|
993 | 1018 | "%%opts Overlay [legend_position='top']\n",
|
994 | 1019 | "xs = np.linspace(*learner.space.bounds[0])\n",
|
995 |
| - "to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label='to learn')\n", |
| 1020 | + "to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label=\"to learn\")\n", |
996 | 1021 | "\n",
|
997 |
| - "runner.live_plot().relabel('prediction', depth=2) * to_learn" |
| 1022 | + "runner.live_plot().relabel(\"prediction\", depth=2) * to_learn" |
998 | 1023 | ]
|
999 | 1024 | },
|
1000 | 1025 | {
|
|
1155 | 1180 | "metadata": {},
|
1156 | 1181 | "outputs": [],
|
1157 | 1182 | "source": [
|
1158 |
| - "fname = 'data/example_file.p'\n", |
| 1183 | + "fname = \"data/example_file.p\"\n", |
1159 | 1184 | "learner.save(fname)"
|
1160 | 1185 | ]
|
1161 | 1186 | },
|
|
1173 | 1198 | "outputs": [],
|
1174 | 1199 | "source": [
|
1175 | 1200 | "control.load(fname)\n",
|
1176 |
| - "learner.plot().relabel('saved learner') + control.plot().relabel('loaded learner')" |
| 1201 | + "learner.plot().relabel(\"saved learner\") + control.plot().relabel(\"loaded learner\")" |
1177 | 1202 | ]
|
1178 | 1203 | },
|
1179 | 1204 | {
|
|
1208 | 1233 | "source": [
|
1209 | 1234 | "def slow_f(x):\n",
|
1210 | 1235 | " from time import sleep\n",
|
| 1236 | + "\n", |
1211 | 1237 | " sleep(5)\n",
|
1212 | 1238 | " return x\n",
|
1213 | 1239 | "\n",
|
| 1240 | + "\n", |
1214 | 1241 | "learner = adaptive.Learner1D(slow_f, bounds=[0, 1])\n",
|
1215 | 1242 | "runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100)\n",
|
1216 | 1243 | "\n",
|
1217 |
| - "runner.start_periodic_saving(save_kwargs=dict(fname='data/periodic_example.p'), interval=6)\n", |
| 1244 | + "runner.start_periodic_saving(\n", |
| 1245 | + " save_kwargs=dict(fname=\"data/periodic_example.p\"), interval=6\n", |
| 1246 | + ")\n", |
1218 | 1247 | "\n",
|
1219 | 1248 | "runner.live_info()"
|
1220 | 1249 | ]
|
|
1328 | 1357 | "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n",
|
1329 | 1358 | "\n",
|
1330 | 1359 | "# blocks until completion\n",
|
1331 |
| - "runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002)\n", |
| 1360 | + "runner = adaptive.Runner(\n", |
| 1361 | + " learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002\n", |
| 1362 | + ")\n", |
1332 | 1363 | "runner.live_info()\n",
|
1333 | 1364 | "runner.live_plot(update_interval=0.1)"
|
1334 | 1365 | ]
|
|
1406 | 1437 | "def will_raise(x):\n",
|
1407 | 1438 | " from random import random\n",
|
1408 | 1439 | " from time import sleep\n",
|
1409 |
| - " \n", |
| 1440 | + "\n", |
1410 | 1441 | " sleep(random())\n",
|
1411 | 1442 | " if random() < 0.1:\n",
|
1412 |
| - " raise RuntimeError('something went wrong!')\n", |
| 1443 | + " raise RuntimeError(\"something went wrong!\")\n", |
1413 | 1444 | " return x**2\n",
|
1414 |
| - " \n", |
| 1445 | + "\n", |
| 1446 | + "\n", |
1415 | 1447 | "learner = adaptive.Learner1D(will_raise, (-1, 1))\n",
|
1416 |
| - "runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled\n", |
| 1448 | + "runner = adaptive.Runner(\n", |
| 1449 | + " learner\n", |
| 1450 | + ") # without 'goal' the runner will run forever unless cancelled\n", |
1417 | 1451 | "runner.live_info()\n",
|
1418 | 1452 | "runner.live_plot()"
|
1419 | 1453 | ]
|
|
1546 | 1580 | "source": [
|
1547 | 1581 | "import asyncio\n",
|
1548 | 1582 | "\n",
|
| 1583 | + "\n", |
1549 | 1584 | "async def time(runner):\n",
|
1550 | 1585 | " from datetime import datetime\n",
|
| 1586 | + "\n", |
1551 | 1587 | " now = datetime.now()\n",
|
1552 | 1588 | " await runner.task\n",
|
1553 | 1589 | " return datetime.now() - now\n",
|
1554 | 1590 | "\n",
|
| 1591 | + "\n", |
1555 | 1592 | "ioloop = asyncio.get_event_loop()\n",
|
1556 | 1593 | "\n",
|
1557 | 1594 | "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n",
|
|
0 commit comments