Skip to content

Commit

Permalink
Use new auto goal functionality
Browse files Browse the repository at this point in the history
  • Loading branch information
basnijholt committed Nov 14, 2022
1 parent e7f2179 commit 30bccb7
Show file tree
Hide file tree
Showing 15 changed files with 60 additions and 59 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def peak(x, a=0.01):


learner = Learner1D(peak, bounds=(-1, 1))
runner = Runner(learner, goal=lambda l: l.loss() < 0.01)
runner = Runner(learner, goal=0.01)
runner.live_info()
runner.live_plot()
```
Expand Down
2 changes: 1 addition & 1 deletion adaptive/tests/test_average_learner.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def constant_function(seed):
learner = AverageLearner(
constant_function, atol=0.01, rtol=0.01, min_npoints=min_npoints
)
simple(learner, lambda l: l.loss() < 1)
simple(learner, 1.0)
assert learner.npoints >= max(2, min_npoints)


Expand Down
6 changes: 3 additions & 3 deletions adaptive/tests/test_balancing_learner.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,10 @@ def test_ask_0(strategy):
@pytest.mark.parametrize(
"strategy, goal",
[
("loss", lambda l: l.loss() < 0.1),
("loss_improvements", lambda l: l.loss() < 0.1),
("loss", 0.1),
("loss_improvements", 0.1),
("npoints", lambda bl: all(l.npoints > 10 for l in bl.learners)),
("cycle", lambda l: l.loss() < 0.1),
("cycle", 0.1),
],
)
def test_strategies(strategy, goal):
Expand Down
6 changes: 3 additions & 3 deletions adaptive/tests/test_learnernd.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ def test_interior_vs_bbox_gives_same_result():
hull = scipy.spatial.ConvexHull(control._bounds_points)
learner = LearnerND(f, bounds=hull)

simple(control, goal=lambda l: l.loss() < 0.1)
simple(learner, goal=lambda l: l.loss() < 0.1)
simple(control, goal=0.1)
simple(learner, goal=0.1)

assert learner.data == control.data

Expand All @@ -47,4 +47,4 @@ def test_vector_return_with_a_flat_layer():
h3 = lambda xy: np.array([0 * f(xy), g(xy)]) # noqa: E731
for function in [h1, h2, h3]:
learner = LearnerND(function, bounds=[(-1, 1), (-1, 1)])
simple(learner, goal=lambda l: l.loss() < 0.1)
simple(learner, goal=0.1)
2 changes: 1 addition & 1 deletion docs/logo.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def ring(xy):
return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4)

learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01)
adaptive.runner.simple(learner, goal=0.01)
return learner


Expand Down
2 changes: 1 addition & 1 deletion docs/source/logo.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def create_and_run_learner():
return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4)
learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.005)
adaptive.runner.simple(learner, goal=0.005)
return learner
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorial/tutorial.AverageLearner.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def g(n):

```{code-cell} ipython3
learner = adaptive.AverageLearner(g, atol=None, rtol=0.01)
# `loss < 1` means that we reached the `rtol` or `atol`
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1)
# `loss < 1.0` means that we reached the `rtol` or `atol`
runner = adaptive.Runner(learner, goal=1.0)
```

```{code-cell} ipython3
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorial/tutorial.BalancingLearner.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ learners = [
]
bal_learner = adaptive.BalancingLearner(learners)
runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(bal_learner, goal=0.01)
```

```{code-cell} ipython3
Expand Down Expand Up @@ -86,7 +86,7 @@ learner = adaptive.BalancingLearner.from_product(
jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos
)
runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.BlockingRunner(learner, goal=0.01)
# The `cdims` will automatically be set when using `from_product`, so
# `plot()` will return a HoloMap with correctly labeled sliders.
Expand Down
6 changes: 3 additions & 3 deletions docs/source/tutorial/tutorial.Learner1D.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ A {class}`~concurrent.futures.ProcessPoolExecutor` cannot be used on Windows for
```{code-cell} ipython3
# The end condition is when the "loss" is less than 0.1. In the context of the
# 1D learner this means that we will resolve features in 'func' with width 0.1 or wider.
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, goal=0.01)
```

```{code-cell} ipython3
Expand Down Expand Up @@ -124,7 +124,7 @@ The `Learner1D` can be used for such functions:

```{code-cell} ipython3
learner = adaptive.Learner1D(f_levels, bounds=(-1, 1))
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, goal=0.01)
```

```{code-cell} ipython3
Expand Down Expand Up @@ -156,7 +156,7 @@ from adaptive.learner.learner1D import (
curvature_loss = curvature_loss_function()
learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=curvature_loss)
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, goal=0.01)
```

```{code-cell} ipython3
Expand Down
2 changes: 1 addition & 1 deletion docs/source/tutorial/tutorial.Learner2D.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
```

```{code-cell} ipython3
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, goal=0.01)
```

```{code-cell} ipython3
Expand Down
2 changes: 1 addition & 1 deletion docs/source/tutorial/tutorial.LearnerND.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def sphere(xyz):
learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 1e-3)
runner = adaptive.Runner(learner, goal=1e-3)
```

```{code-cell} ipython3
Expand Down
14 changes: 7 additions & 7 deletions docs/source/tutorial/tutorial.advanced-topics.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ learner = adaptive.Learner1D(f, bounds=(-1, 1))
control = adaptive.Learner1D(f, bounds=(-1, 1))
# Let's only run the learner
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, goal=0.01)
```

```{code-cell} ipython3
Expand Down Expand Up @@ -134,7 +134,7 @@ The simplest way to accomplish this is to use {class}`adaptive.BlockingRunner`:

```{code-cell} ipython3
learner = adaptive.Learner1D(f, bounds=(-1, 1))
adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)
adaptive.BlockingRunner(learner, goal=0.01)
# This will only get run after the runner has finished
learner.plot()
```
Expand All @@ -155,7 +155,7 @@ The simplest way is to use {class}`adaptive.runner.simple` to run your learner:
learner = adaptive.Learner1D(f, bounds=(-1, 1))
# blocks until completion
adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01)
adaptive.runner.simple(learner, goal=0.01)
learner.plot()
```
Expand All @@ -169,7 +169,7 @@ from adaptive.runner import SequentialExecutor
learner = adaptive.Learner1D(f, bounds=(-1, 1))
runner = adaptive.Runner(
learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01
learner, executor=SequentialExecutor(), goal=0.01
)
```

Expand Down Expand Up @@ -292,7 +292,7 @@ One way to inspect runners is to instantiate one with `log=True`:

```{code-cell} ipython3
learner = adaptive.Learner1D(f, bounds=(-1, 1))
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, log=True)
runner = adaptive.Runner(learner, goal=0.01, log=True)
```

```{code-cell} ipython3
Expand Down Expand Up @@ -351,7 +351,7 @@ async def time(runner):
ioloop = asyncio.get_event_loop()
learner = adaptive.Learner1D(f, bounds=(-1, 1))
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, goal=0.01)
timer = ioloop.create_task(time(runner))
```
Expand Down Expand Up @@ -462,7 +462,7 @@ def f(x):

learner = adaptive.Learner1D(f, (-1, 1))

adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.1)
adaptive.BlockingRunner(learner, goal=0.1)
```

If you use `asyncio` already in your script and want to integrate `adaptive` into it, then you can use the default {class}`~adaptive.Runner` as you would from a notebook.
Expand Down
6 changes: 3 additions & 3 deletions docs/source/tutorial/tutorial.custom_loss.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def f_divergent_1d(x):
learner = adaptive.Learner1D(
f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d
)
runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01)
runner = adaptive.BlockingRunner(learner, goal=0.01)
learner.plot().select(y=(0, 10000))
```

Expand All @@ -99,7 +99,7 @@ learner = adaptive.Learner2D(
)
# this takes a while, so use the async Runner so we know *something* is happening
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.03 or l.npoints > 1000)
runner = adaptive.Runner(learner, goal= l.loss() < 0.03 or l.npoints > 1000)
```

```{code-cell} ipython3
Expand Down Expand Up @@ -154,7 +154,7 @@ def resolution_loss_function(min_distance=0, max_distance=1):
loss = resolution_loss_function(min_distance=0.01)
learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)
runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02)
runner = adaptive.BlockingRunner(learner, goal=0.02)
learner.plot(tri_alpha=0.3).relabel("1 / (x^2 + y^2) in log scale").opts(
hv.opts.EdgePaths(color="w"), hv.opts.Image(logz=True, colorbar=True)
)
Expand Down
10 changes: 5 additions & 5 deletions docs/source/tutorial/tutorial.parallelism.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ from concurrent.futures import ProcessPoolExecutor
executor = ProcessPoolExecutor(max_workers=4)

learner = adaptive.Learner1D(f, bounds=(-1, 1))
runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.loss() < 0.05)
runner = adaptive.Runner(learner, executor=executor, goal=0.05)
runner.live_info()
runner.live_plot(update_interval=0.1)
```
Expand All @@ -37,7 +37,7 @@ import ipyparallel
client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work

learner = adaptive.Learner1D(f, bounds=(-1, 1))
runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, executor=client, goal=0.01)
runner.live_info()
runner.live_plot()
```
Expand All @@ -52,7 +52,7 @@ import distributed
client = distributed.Client()

learner = adaptive.Learner1D(f, bounds=(-1, 1))
runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01)
runner = adaptive.Runner(learner, executor=client, goal=0.01)
runner.live_info()
runner.live_plot(update_interval=0.1)
```
Expand Down Expand Up @@ -80,7 +80,7 @@ if __name__ == "__main__":
learner,
executor=MPIPoolExecutor(),
shutdown_executor=True,
goal=lambda l: l.loss() < 0.01,
goal=0.01,
)

# periodically save the data (in case the job dies)
Expand Down Expand Up @@ -132,6 +132,6 @@ ex = get_reusable_executor()
f = lambda x: x
learner = adaptive.Learner1D(f, bounds=(-1, 1))

runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, executor=ex)
runner = adaptive.Runner(learner, goal=0.01, executor=ex)
runner.live_info()
```
Loading

0 comments on commit 30bccb7

Please sign in to comment.