Skip to content

Commit

Permalink
style notebook
Browse files Browse the repository at this point in the history
  • Loading branch information
basnijholt committed Sep 23, 2021
1 parent f959566 commit 8dca8e5
Showing 1 changed file with 78 additions and 41 deletions.
119 changes: 78 additions & 41 deletions example-notebook.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
"outputs": [],
"source": [
"import adaptive\n",
"\n",
"adaptive.notebook_extension()\n",
"\n",
"# Import modules that are used in multiple cells\n",
Expand Down Expand Up @@ -57,12 +58,13 @@
"source": [
"offset = random.uniform(-0.5, 0.5)\n",
"\n",
"\n",
"def peak(x, offset=offset, wait=True):\n",
" from time import sleep\n",
" from random import random\n",
"\n",
" a = 0.01\n",
" if wait: \n",
" if wait:\n",
" # we pretend that this is a slow function\n",
" sleep(random())\n",
"\n",
Expand Down Expand Up @@ -180,7 +182,7 @@
" sleep(random() / 10)\n",
" x, y = xy\n",
" a = 0.2\n",
" return x + np.exp(-(x ** 2 + y ** 2 - 0.75 ** 2) ** 2 / a ** 4)\n",
" return x + np.exp(-((x**2 + y**2 - 0.75**2)**2) / a**4)\n",
"\n",
"\n",
"learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])"
Expand All @@ -206,6 +208,7 @@
" plot = learner.plot(tri_alpha=0.2)\n",
" return plot.Image + plot.EdgePaths.I + plot\n",
"\n",
"\n",
"runner.live_plot(plotter=plot, update_interval=0.1)"
]
},
Expand All @@ -219,7 +222,7 @@
"\n",
"# Create a learner and add data on homogeneous grid, so that we can plot it\n",
"learner2 = adaptive.Learner2D(ring, bounds=learner.bounds)\n",
"n = int(learner.npoints ** 0.5)\n",
"n = int(learner.npoints**0.5)\n",
"xs, ys = [np.linspace(*bounds, n) for bounds in learner.bounds]\n",
"xys = list(itertools.product(xs, ys))\n",
"zs = [ring(xy, wait=False) for xy in xys]\n",
Expand Down Expand Up @@ -259,6 +262,7 @@
"def g(n):\n",
" import random\n",
" from time import sleep\n",
"\n",
" sleep(random.random() / 1000)\n",
" # Properly save and restore the RNG state\n",
" state = random.getstate()\n",
Expand Down Expand Up @@ -312,7 +316,7 @@
"source": [
"def noisy_peak(seed_x, sigma=0, peak_width=0.05, offset=-0.5):\n",
" seed, x = seed_x\n",
" y = x ** 3 - x + 3 * peak_width ** 2 / (peak_width ** 2 + (x - offset) ** 2)\n",
" y = x**3 - x + 3 * peak_width**2 / (peak_width**2 + (x - offset)**2)\n",
" rng = np.random.RandomState(int(seed))\n",
" noise = rng.normal(scale=sigma)\n",
" return y + noise"
Expand Down Expand Up @@ -418,6 +422,7 @@
"def f24(x):\n",
" return np.floor(np.exp(x))\n",
"\n",
"\n",
"xs = np.linspace(0, 3, 200)\n",
"hv.Scatter((xs, [f24(x) for x in xs]))"
]
Expand All @@ -436,6 +441,7 @@
"outputs": [],
"source": [
"import scipy.integrate\n",
"\n",
"scipy.integrate.quad(f24, 0, 3)"
]
},
Expand Down Expand Up @@ -479,7 +485,10 @@
"if runner.status() != \"finished\":\n",
" print(\"WARINING: The runner hasn't reached it goal yet!\")\n",
"\n",
"print('The integral value is {} with the corresponding error of {}'.format(learner.igral, learner.err))\n",
"print(\n",
" f\"The integral value is {learner.igral} \"\n",
" f\"with a corresponding error of {learner.err}\"\n",
")\n",
"learner.plot()"
]
},
Expand Down Expand Up @@ -559,7 +568,8 @@
"def sphere(xyz):\n",
" x, y, z = xyz\n",
" a = 0.4\n",
" return x + z**2 + np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4)\n",
" return x + z**2 + np.exp(-((x**2 + y**2 + z**2 - 0.75**2)**2) / a**4)\n",
"\n",
"\n",
"learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n",
"runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 2000)\n",
Expand All @@ -580,11 +590,12 @@
"outputs": [],
"source": [
"def plot_cut(x, direction, learner=learner):\n",
" cut_mapping = {'xyz'.index(direction): x}\n",
" cut_mapping = {\"xyz\".index(direction): x}\n",
" return learner.plot_slice(cut_mapping, n=100)\n",
"\n",
"dm = hv.DynamicMap(plot_cut, kdims=['value', 'direction'])\n",
"dm.redim.values(value=np.linspace(-1, 1), direction=list('xyz'))"
"\n",
"dm = hv.DynamicMap(plot_cut, kdims=[\"value\", \"direction\"])\n",
"dm.redim.values(value=np.linspace(-1, 1), direction=list(\"xyz\"))"
]
},
{
Expand Down Expand Up @@ -662,10 +673,14 @@
" dx = xs[1] - xs[0]\n",
" return dx\n",
"\n",
"\n",
"def f_divergent_1d(x):\n",
" return 1 / x**2\n",
"\n",
"learner = adaptive.Learner1D(f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d)\n",
"\n",
"learner = adaptive.Learner1D(\n",
" f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d\n",
")\n",
"runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n",
"learner.plot().select(y=(0, 10000))"
]
Expand All @@ -688,7 +703,7 @@
"\n",
"def f_divergent_2d(xy):\n",
" x, y = xy\n",
" return 1 / (x ** 2 + y ** 2)\n",
" return 1 / (x**2 + y**2)\n",
"\n",
"\n",
"def plot_logz(learner):\n",
Expand Down Expand Up @@ -749,18 +764,19 @@
" # It represents the deviation of the function value from a linear estimate\n",
" # over each triangular subdomain.\n",
" dev = deviations(ip)[0]\n",
" \n",
"\n",
" # we add terms of the same dimension: dev == [distance], A == [distance**2]\n",
" loss = np.sqrt(A) * dev + A\n",
" \n",
"\n",
" # Setting areas with a small area to zero such that they won't be chosen again\n",
" loss[A < min_distance**2] = 0 \n",
" \n",
" loss[A < min_distance**2] = 0\n",
"\n",
" # Setting triangles that have a size larger than max_distance to infinite loss\n",
" loss[A > max_distance**2] = np.inf\n",
"\n",
" return loss\n",
"\n",
"\n",
"loss = partial(resolution_loss, min_distance=0.01)\n",
"\n",
"learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)\n",
Expand Down Expand Up @@ -803,8 +819,11 @@
" a = 0.01\n",
" return x + a**2 / (a**2 + (x - offset)**2)\n",
"\n",
"learners = [adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)),\n",
" bounds=(-1, 1)) for i in range(10)]\n",
"\n",
"learners = [\n",
" adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)), bounds=(-1, 1))\n",
" for i in range(10)\n",
"]\n",
"\n",
"bal_learner = adaptive.BalancingLearner(learners)\n",
"runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01)\n",
Expand Down Expand Up @@ -836,23 +855,26 @@
"source": [
"from scipy.special import eval_jacobi\n",
"\n",
"\n",
"def jacobi(x, n, alpha, beta):\n",
" return eval_jacobi(n, alpha, beta, x)\n",
"\n",
"\n",
"combos = {\n",
" 'n': [1, 2, 4, 8],\n",
" 'alpha': np.linspace(0, 2, 3),\n",
" 'beta': np.linspace(0, 1, 5),\n",
" \"n\": [1, 2, 4, 8],\n",
" \"alpha\": np.linspace(0, 2, 3),\n",
" \"beta\": np.linspace(0, 1, 5),\n",
"}\n",
"\n",
"learner = adaptive.BalancingLearner.from_product(\n",
" jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos)\n",
" jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos\n",
")\n",
"\n",
"runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)\n",
"\n",
"# The `cdims` will automatically be set when using `from_product`, so\n",
"# `plot()` will return a HoloMap with correctly labeled sliders.\n",
"learner.plot().overlay('beta').grid().select(y=(-1, 3))"
"learner.plot().overlay(\"beta\").grid().select(y=(-1, 3))"
]
},
{
Expand All @@ -879,6 +901,7 @@
"source": [
"from operator import itemgetter\n",
"\n",
"\n",
"def f_dict(x):\n",
" \"\"\"The function evaluation takes roughly the time we `sleep`.\"\"\"\n",
" import random\n",
Expand All @@ -888,14 +911,15 @@
" sleep(waiting_time)\n",
" a = 0.01\n",
" y = x + a**2 / (a**2 + x**2)\n",
" return {'y': y, 'waiting_time': waiting_time}\n",
" return {\"y\": y, \"waiting_time\": waiting_time}\n",
"\n",
"\n",
"# Create the learner with the function that returns a 'dict'\n",
"# This learner cannot be run directly, as Learner1D does not know what to do with the 'dict'\n",
"_learner = adaptive.Learner1D(f_dict, bounds=(-1, 1))\n",
"\n",
"# Wrapping the learner with 'adaptive.DataSaver' and tell it which key it needs to learn\n",
"learner = adaptive.DataSaver(_learner, arg_picker=itemgetter('y'))"
"learner = adaptive.DataSaver(_learner, arg_picker=itemgetter(\"y\"))"
]
},
{
Expand Down Expand Up @@ -965,8 +989,7 @@
"outputs": [],
"source": [
"def F(x, noise_level=0.1):\n",
" return (np.sin(5 * x) * (1 - np.tanh(x ** 2))\n",
" + np.random.randn() * noise_level)"
" return np.sin(5 * x) * (1 - np.tanh(x**2)) + np.random.randn() * noise_level"
]
},
{
Expand All @@ -975,11 +998,13 @@
"metadata": {},
"outputs": [],
"source": [
"learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)],\n",
" base_estimator=\"GP\",\n",
" acq_func=\"gp_hedge\",\n",
" acq_optimizer=\"lbfgs\",\n",
" )\n",
"learner = adaptive.SKOptLearner(\n",
" F,\n",
" dimensions=[(-2.0, 2.0)],\n",
" base_estimator=\"GP\",\n",
" acq_func=\"gp_hedge\",\n",
" acq_optimizer=\"lbfgs\",\n",
")\n",
"runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40)\n",
"runner.live_info()"
]
Expand All @@ -992,9 +1017,9 @@
"source": [
"%%opts Overlay [legend_position='top']\n",
"xs = np.linspace(*learner.space.bounds[0])\n",
"to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label='to learn')\n",
"to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label=\"to learn\")\n",
"\n",
"runner.live_plot().relabel('prediction', depth=2) * to_learn"
"runner.live_plot().relabel(\"prediction\", depth=2) * to_learn"
]
},
{
Expand Down Expand Up @@ -1155,7 +1180,7 @@
"metadata": {},
"outputs": [],
"source": [
"fname = 'data/example_file.p'\n",
"fname = \"data/example_file.p\"\n",
"learner.save(fname)"
]
},
Expand All @@ -1173,7 +1198,7 @@
"outputs": [],
"source": [
"control.load(fname)\n",
"learner.plot().relabel('saved learner') + control.plot().relabel('loaded learner')"
"learner.plot().relabel(\"saved learner\") + control.plot().relabel(\"loaded learner\")"
]
},
{
Expand Down Expand Up @@ -1208,13 +1233,17 @@
"source": [
"def slow_f(x):\n",
" from time import sleep\n",
"\n",
" sleep(5)\n",
" return x\n",
"\n",
"\n",
"learner = adaptive.Learner1D(slow_f, bounds=[0, 1])\n",
"runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100)\n",
"\n",
"runner.start_periodic_saving(save_kwargs=dict(fname='data/periodic_example.p'), interval=6)\n",
"runner.start_periodic_saving(\n",
" save_kwargs=dict(fname=\"data/periodic_example.p\"), interval=6\n",
")\n",
"\n",
"runner.live_info()"
]
Expand Down Expand Up @@ -1328,7 +1357,9 @@
"learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n",
"\n",
"# blocks until completion\n",
"runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002)\n",
"runner = adaptive.Runner(\n",
" learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.002\n",
")\n",
"runner.live_info()\n",
"runner.live_plot(update_interval=0.1)"
]
Expand Down Expand Up @@ -1406,14 +1437,17 @@
"def will_raise(x):\n",
" from random import random\n",
" from time import sleep\n",
" \n",
"\n",
" sleep(random())\n",
" if random() < 0.1:\n",
" raise RuntimeError('something went wrong!')\n",
" raise RuntimeError(\"something went wrong!\")\n",
" return x**2\n",
" \n",
"\n",
"\n",
"learner = adaptive.Learner1D(will_raise, (-1, 1))\n",
"runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled\n",
"runner = adaptive.Runner(\n",
" learner\n",
") # without 'goal' the runner will run forever unless cancelled\n",
"runner.live_info()\n",
"runner.live_plot()"
]
Expand Down Expand Up @@ -1546,12 +1580,15 @@
"source": [
"import asyncio\n",
"\n",
"\n",
"async def time(runner):\n",
" from datetime import datetime\n",
"\n",
" now = datetime.now()\n",
" await runner.task\n",
" return datetime.now() - now\n",
"\n",
"\n",
"ioloop = asyncio.get_event_loop()\n",
"\n",
"learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n",
Expand Down

0 comments on commit 8dca8e5

Please sign in to comment.