diff --git a/.github/workflows/auto-changelog-generator.yml b/.github/workflows/auto-changelog-generator.yml index 5cb0f7d12..6f68c3f83 100644 --- a/.github/workflows/auto-changelog-generator.yml +++ b/.github/workflows/auto-changelog-generator.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index f39544953..4c177878f 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -7,14 +7,14 @@ jobs: coverage: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: 3.11 - - name: Install dependencies - run: pip install nox + python-version: 3.13 + - name: Install uv + uses: astral-sh/setup-uv@v6 - name: Test with nox - run: nox -e coverage + run: uv run --group nox nox -e coverage - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 diff --git a/.github/workflows/matchers/pytest.json b/.github/workflows/matchers/pytest.json deleted file mode 100644 index 3e5d8d5b8..000000000 --- a/.github/workflows/matchers/pytest.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "problemMatcher": [ - { - "owner": "python", - "pattern": [ - { - "regexp": "^\\s*File\\s\\\"(.*)\\\",\\sline\\s(\\d+),\\sin\\s(.*)$", - "file": 1, - "line": 2 - }, - { - "regexp": "^\\s*raise\\s(.*)\\(\\'(.*)\\'\\)$", - "message": 2 - } - ] - } - ] -} diff --git a/.github/workflows/nox.yml b/.github/workflows/nox.yml index 42326d4b4..a03b2d7b2 100644 --- a/.github/workflows/nox.yml +++ b/.github/workflows/nox.yml @@ -12,19 +12,17 @@ jobs: fail-fast: false matrix: platform: [ubuntu-latest, macos-latest, windows-latest] - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.11", "3.12", "3.13"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Register Python problem matcher - run: echo "::add-matcher::.github/workflows/matchers/pytest.json" - - name: Install dependencies - run: pip install nox pytest-github-actions-annotate-failures + - name: Install uv + uses: astral-sh/setup-uv@v6 - name: Test with nox using minimal dependencies - run: nox -e "pytest-${{ matrix.python-version }}(all_deps=False)" + run: uv run --group nox nox -e "pytest_min_deps-${{ matrix.python-version }}" - name: Test with nox with all dependencies - run: nox -e "pytest-${{ matrix.python-version }}(all_deps=True)" + run: uv run --group nox nox -e "pytest_all_deps-${{ matrix.python-version }}" diff --git a/.github/workflows/pythonpublish.yml b/.github/workflows/pythonpublish.yml index 6ccfc7898..48251f61c 100644 --- a/.github/workflows/pythonpublish.yml +++ b/.github/workflows/pythonpublish.yml @@ -13,9 +13,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.x' - name: Install dependencies diff --git a/.github/workflows/typeguard.yml b/.github/workflows/typeguard.yml index d3c1da10e..c89a44425 100644 --- a/.github/workflows/typeguard.yml +++ b/.github/workflows/typeguard.yml @@ -1,19 +1,20 @@ name: typeguard -# TODO: enable this once typeguard=4 is released and issues are fixed. -# on: -# - push +on: + pull_request: + push: + branches: [main] jobs: typeguard: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.11" - - name: Install dependencies - run: pip install nox + python-version: "3.13" + - name: Install uv + uses: astral-sh/setup-uv@v6 - name: Test with nox - run: nox -e pytest_typeguard + run: uv run --group nox nox -e pytest_typeguard diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2f94c9f1c..2715666ef 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v5.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -8,17 +8,14 @@ repos: - id: check-yaml - id: debug-statements - id: check-ast - - repo: https://github.com/psf/black - rev: 23.3.0 - hooks: - - id: black-jupyter - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: "v0.0.265" + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.11.9" hooks: - id: ruff args: ["--fix"] + - id: ruff-format - repo: https://github.com/nbQA-dev/nbQA - rev: 1.7.0 + rev: 1.9.1 hooks: - id: nbqa-black additional_dependencies: [jupytext, black] @@ -26,7 +23,7 @@ repos: args: ["ruff", "--fix", "--ignore=E402,B018,F704"] additional_dependencies: [jupytext, ruff] - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.2.0" + rev: "v1.15.0" hooks: - id: mypy exclude: ipynb_filter.py|docs/source/conf.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 03f7b4811..339c9db0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,90 @@ # 🗞️ Changelog -## [v1.0.0](https://github.com/python-adaptive/adaptive/tree/v1.0.0) +## [v1.4.0](https://github.com/python-adaptive/adaptive/tree/v1.3.0) (2025-05-13) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v1.3.2...v.1.4.0) + +**Merged pull requests:** + +- Bump `mypy` and `ruff` in `pre-commit` [\#479](https://github.com/python-adaptive/adaptive/pull/479) ([basnijholt](https://github.com/basnijholt)) +- Enable runtime type checking in tests with typeguard [\#478](https://github.com/python-adaptive/adaptive/pull/478) ([basnijholt](https://github.com/basnijholt)) +- Follow SPEC 0 and drop support for Python 3.9 and 3.10 [\#477](https://github.com/python-adaptive/adaptive/pull/477) ([basnijholt](https://github.com/basnijholt)) +- Use `uv` as Nox backend and several related improvements [\#476](https://github.com/python-adaptive/adaptive/pull/476) ([basnijholt](https://github.com/basnijholt)) +- Prevent SciPy deprecation warning for `estimate_gradients_2d_global` [\#475](https://github.com/python-adaptive/adaptive/pull/475) ([basnijholt](https://github.com/basnijholt)) + +## [v1.3.2](https://github.com/python-adaptive/adaptive/tree/v1.3.2) (2025-03-03) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v1.3.1...v1.3.2) + +**Closed issues:** + +- Runner slows down visual studio code on linux [\#471](https://github.com/python-adaptive/adaptive/issues/471) + +**Merged pull requests:** + +- Fix readthedocs.yml [\#474](https://github.com/python-adaptive/adaptive/pull/474) ([basnijholt](https://github.com/basnijholt)) +- Remove async activation magic in notebook\_integration.py [\#473](https://github.com/python-adaptive/adaptive/pull/473) ([basnijholt](https://github.com/basnijholt)) + +## [v1.3.1](https://github.com/python-adaptive/adaptive/tree/v1.3.1) (2025-01-07) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v1.3.0...v1.3.1) + +**Merged pull requests:** + +- Fix scipy deprecation warning for LinearNDInterpolator [\#465](https://github.com/python-adaptive/adaptive/pull/465) ([eendebakpt](https://github.com/eendebakpt)) +- Remove Azure Pipelines badge in README.md [\#462](https://github.com/python-adaptive/adaptive/pull/462) ([basnijholt](https://github.com/basnijholt)) + +## [v1.3.0](https://github.com/python-adaptive/adaptive/tree/v1.3.0) (2024-05-31) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v1.2.0...v1.3.0) + +**Merged pull requests:** + +- Release v1.3.0 [\#459](https://github.com/python-adaptive/adaptive/pull/459) ([basnijholt](https://github.com/basnijholt)) +- Replace deprecated numpy aliases [\#458](https://github.com/python-adaptive/adaptive/pull/458) ([eendebakpt](https://github.com/eendebakpt)) +- Remove `SKOptLearner` because `scikit-optimize` is unmaintained [\#404](https://github.com/python-adaptive/adaptive/pull/404) ([basnijholt](https://github.com/basnijholt)) + +## [v1.2.0](https://github.com/python-adaptive/adaptive/tree/v1.2.0) (2024-04-10) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v1.1.0...v1.2.0) + +**Closed issues:** + +- Issues with Multiprocess and AsyncRunner in adaptive for Phase Diagram Illustration [\#449](https://github.com/python-adaptive/adaptive/issues/449) +- Create API for just signle process \(No pickle\) [\#442](https://github.com/python-adaptive/adaptive/issues/442) +- Handling with regions unreachable inside the `ConvexHull` in `LearnerND` [\#438](https://github.com/python-adaptive/adaptive/issues/438) +- Use in script with BlockingRunner: get log and/or feedback on progress [\#436](https://github.com/python-adaptive/adaptive/issues/436) + +**Merged pull requests:** + +- Update CHANGELOG.md for v1.2.0 [\#454](https://github.com/python-adaptive/adaptive/pull/454) ([basnijholt](https://github.com/basnijholt)) +- Test Python 3.12 and fix its installation [\#453](https://github.com/python-adaptive/adaptive/pull/453) ([basnijholt](https://github.com/basnijholt)) +- \[pre-commit.ci\] pre-commit autoupdate [\#447](https://github.com/python-adaptive/adaptive/pull/447) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci)) +- Use ruff-format instead of black [\#446](https://github.com/python-adaptive/adaptive/pull/446) ([basnijholt](https://github.com/basnijholt)) +- Bump versions to compatible packages in `docs/environment.yml` [\#445](https://github.com/python-adaptive/adaptive/pull/445) ([basnijholt](https://github.com/basnijholt)) +- Add `AsyncRunner.block_until_done` [\#444](https://github.com/python-adaptive/adaptive/pull/444) ([basnijholt](https://github.com/basnijholt)) +- Add `live_info_terminal`, closes \#436 [\#441](https://github.com/python-adaptive/adaptive/pull/441) ([basnijholt](https://github.com/basnijholt)) +- \[pre-commit.ci\] pre-commit autoupdate [\#434](https://github.com/python-adaptive/adaptive/pull/434) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci)) +- Add benchmarks page for Learner1D and Learner2D functions [\#405](https://github.com/python-adaptive/adaptive/pull/405) ([basnijholt](https://github.com/basnijholt)) + +## [v1.1.0](https://github.com/python-adaptive/adaptive/tree/v1.1.0) (2023-08-14) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v1.0.0...v1.1.0) + +**Closed issues:** + +- large delay when using start\_periodic\_saving [\#439](https://github.com/python-adaptive/adaptive/issues/439) +- Target function returns NaN [\#435](https://github.com/python-adaptive/adaptive/issues/435) + +**Merged pull requests:** + +- Ensure periodic saving fires immediately after runner task is finished [\#440](https://github.com/python-adaptive/adaptive/pull/440) ([jbweston](https://github.com/jbweston)) +- Add Learner2D loss function 'thresholded\_loss\_factory' [\#437](https://github.com/python-adaptive/adaptive/pull/437) ([basnijholt](https://github.com/basnijholt)) +- \[pre-commit.ci\] pre-commit autoupdate [\#433](https://github.com/python-adaptive/adaptive/pull/433) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci)) +- \[pre-commit.ci\] pre-commit autoupdate [\#431](https://github.com/python-adaptive/adaptive/pull/431) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci)) +- Add adaptive.utils.daskify [\#422](https://github.com/python-adaptive/adaptive/pull/422) ([basnijholt](https://github.com/basnijholt)) + +## [v1.0.0](https://github.com/python-adaptive/adaptive/tree/v1.0.0) (2023-05-15) [Full Changelog](https://github.com/python-adaptive/adaptive/compare/v0.15.0...v1.0.0) @@ -11,6 +95,7 @@ **Merged pull requests:** +- Update CHANGELOG for v1.0.0 [\#429](https://github.com/python-adaptive/adaptive/pull/429) ([basnijholt](https://github.com/basnijholt)) - \[pre-commit.ci\] pre-commit autoupdate [\#426](https://github.com/python-adaptive/adaptive/pull/426) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci)) - Allow storing the full sequence in SequenceLearner.to\_dataframe [\#425](https://github.com/python-adaptive/adaptive/pull/425) ([basnijholt](https://github.com/basnijholt)) - \[pre-commit.ci\] pre-commit autoupdate [\#423](https://github.com/python-adaptive/adaptive/pull/423) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci)) @@ -40,9 +125,13 @@ - Add nbQA for notebook and docs linting [\#361](https://github.com/python-adaptive/adaptive/pull/361) ([basnijholt](https://github.com/basnijholt)) - Fix HoloViews opts deprecation warnings [\#357](https://github.com/python-adaptive/adaptive/pull/357) ([basnijholt](https://github.com/basnijholt)) -## [v0.15.0](https://github.com/python-adaptive/adaptive/tree/v0.15.0) (2022-11-30) +## [v0.15.0](https://github.com/python-adaptive/adaptive/tree/v0.15.0) (2022-12-02) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v0.15.1...v0.15.0) -[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v0.14.2...v0.15.0) +## [v0.15.1](https://github.com/python-adaptive/adaptive/tree/v0.15.1) (2022-12-02) + +[Full Changelog](https://github.com/python-adaptive/adaptive/compare/v0.14.2...v0.15.1) **Closed issues:** @@ -50,10 +139,8 @@ **Merged pull requests:** -- Add support for Python 3.11 and test on it [\#387](https://github.com/python-adaptive/adaptive/pull/387) ([basnijholt](https://github.com/basnijholt)) - Rename master -\> main [\#384](https://github.com/python-adaptive/adaptive/pull/384) ([basnijholt](https://github.com/basnijholt)) - Add loss\_goal, npoints\_goal, and an auto\_goal function and use it in the runners [\#382](https://github.com/python-adaptive/adaptive/pull/382) ([basnijholt](https://github.com/basnijholt)) -- Add type-hints to Runner [\#370](https://github.com/python-adaptive/adaptive/pull/370) ([basnijholt](https://github.com/basnijholt)) - Add docs section about executing coroutines [\#364](https://github.com/python-adaptive/adaptive/pull/364) ([juandaanieel](https://github.com/juandaanieel)) ## [v0.14.2](https://github.com/python-adaptive/adaptive/tree/v0.14.2) (2022-10-14) @@ -238,7 +325,7 @@ - bump pre-commit filter dependencies [\#293](https://github.com/python-adaptive/adaptive/pull/293) ([basnijholt](https://github.com/basnijholt)) - fix docs [\#291](https://github.com/python-adaptive/adaptive/pull/291) ([basnijholt](https://github.com/basnijholt)) - update to miniver 0.7.0 [\#290](https://github.com/python-adaptive/adaptive/pull/290) ([basnijholt](https://github.com/basnijholt)) -- add `runner.live\_plot\(\)` in README example [\#288](https://github.com/python-adaptive/adaptive/pull/288) ([basnijholt](https://github.com/basnijholt)) +- add `runner.live_plot()` in README example [\#288](https://github.com/python-adaptive/adaptive/pull/288) ([basnijholt](https://github.com/basnijholt)) - Update pre commit [\#287](https://github.com/python-adaptive/adaptive/pull/287) ([basnijholt](https://github.com/basnijholt)) - Use m2r2 [\#286](https://github.com/python-adaptive/adaptive/pull/286) ([basnijholt](https://github.com/basnijholt)) - temporarily pin scikit-learn\<=0.23.1 [\#285](https://github.com/python-adaptive/adaptive/pull/285) ([basnijholt](https://github.com/basnijholt)) @@ -283,7 +370,6 @@ **Closed issues:** - - add minimum number of points parameter to AverageLearner [\#273](https://github.com/python-adaptive/adaptive/issues/273) - Release v0.10 [\#258](https://github.com/python-adaptive/adaptive/issues/258) @@ -473,7 +559,7 @@ - Gracefully handle exceptions when evaluating the function to be learned [\#125](https://github.com/python-adaptive/adaptive/issues/125) - Allow BalancingLearner to return arbitrary number of points from 'choose\_points' [\#124](https://github.com/python-adaptive/adaptive/issues/124) - Increase the default refresh rate for 'live\_plot' [\#120](https://github.com/python-adaptive/adaptive/issues/120) -- remove default number of points to choose in `choose\_points` [\#118](https://github.com/python-adaptive/adaptive/issues/118) +- remove default number of points to choose in `choose_points` [\#118](https://github.com/python-adaptive/adaptive/issues/118) - Consider using Gaussian process optimization as a learner [\#115](https://github.com/python-adaptive/adaptive/issues/115) - Make `distributed.Client` work with automatic scaling of the cluster [\#104](https://github.com/python-adaptive/adaptive/issues/104) - Improve plotting for learners [\#83](https://github.com/python-adaptive/adaptive/issues/83) @@ -560,7 +646,7 @@ - Remove public 'fname' learner attribute [\#17](https://github.com/python-adaptive/adaptive/issues/17) - Release v0.7.0 [\#14](https://github.com/python-adaptive/adaptive/issues/14) - \(Learner1D\) improve time complexity [\#13](https://github.com/python-adaptive/adaptive/issues/13) -- Typo in documentation for` adaptive.learner.learner2D.uniform\_loss\(ip\)` [\#12](https://github.com/python-adaptive/adaptive/issues/12) +- Typo in documentation for` adaptive.learner.learner2D.uniform_loss(ip)` [\#12](https://github.com/python-adaptive/adaptive/issues/12) - \(LearnerND\) fix plotting of scaled domains [\#11](https://github.com/python-adaptive/adaptive/issues/11) - suggested points lie outside of domain [\#7](https://github.com/python-adaptive/adaptive/issues/7) - DEVELOPMENT IS ON GITLAB: https://gitlab.kwant-project.org/qt/adaptive [\#5](https://github.com/python-adaptive/adaptive/issues/5) diff --git a/README.md b/README.md index 8510b7ca1..e3a83a688 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ [![Downloads](https://img.shields.io/conda/dn/conda-forge/adaptive.svg)](https://anaconda.org/conda-forge/adaptive) [![GitHub](https://img.shields.io/github/stars/python-adaptive/adaptive.svg?style=social)](https://github.com/python-adaptive/adaptive/stargazers) [![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/python-adaptive/adaptive) -[![Pipeline-status](https://dev.azure.com/python-adaptive/adaptive/_apis/build/status/python-adaptive.adaptive?branchName=main)](https://dev.azure.com/python-adaptive/adaptive/_build/latest?definitionId=6?branchName=main) [![PyPI](https://img.shields.io/pypi/v/adaptive.svg)](https://pypi.python.org/pypi/adaptive) @@ -23,7 +22,7 @@ With minimal code, you can perform evaluations on a computing cluster, display l Adaptive is most efficient for computations where each function evaluation takes at least ≈50ms due to the overhead of selecting potentially interesting points. -To see Adaptive in action, try the [example notebook on Binder](https://mybinder.org/v2/gh/python-adaptive/adaptive/main?filepath=example-notebook.ipynb) or explore the [tutorial on Read the Docs](https://adaptive.readthedocs.io/en/latest/tutorial/tutorial.html). +To see Adaptive in action, try the [example notebook on Binder](https://mybinder.org/v2/gh/python-adaptive/adaptive/main?filepath=example-notebook.ipynb) or explore the [tutorial on Read the Docs](https://adaptive.readthedocs.io/en/latest/tutorial/tutorial). @@ -161,12 +160,12 @@ jupyter labextension install @pyviz/jupyterlab_pyviz ## :wrench: Development -Clone the repository and run `pip install -e ".[notebook,testing,other]"` to add a link to the cloned repo into your Python path: +Clone the repository and run `pip install -e ".[notebook,test,other]"` to add a link to the cloned repo into your Python path: ```bash git clone git@github.com:python-adaptive/adaptive.git cd adaptive -pip install -e ".[notebook,testing,other]" +pip install -e ".[notebook,test,other]" ``` We recommend using a Conda environment or a virtualenv for package management during Adaptive development. diff --git a/adaptive/__init__.py b/adaptive/__init__.py index 4f99d67e7..34f75d9ac 100644 --- a/adaptive/__init__.py +++ b/adaptive/__init__.py @@ -1,5 +1,3 @@ -from contextlib import suppress - from adaptive._version import __version__ from adaptive.learner import ( AverageLearner, @@ -47,11 +45,5 @@ "Runner", ] -with suppress(ImportError): - # Only available if 'scikit-optimize' is installed - from adaptive.learner import SKOptLearner # noqa: F401 - - __all__.append("SKOptLearner") - # to avoid confusion with `notebook_extension` del notebook_integration # type: ignore[name-defined] # noqa: F821 diff --git a/adaptive/_types.py b/adaptive/_types.py new file mode 100644 index 000000000..3f56bf59b --- /dev/null +++ b/adaptive/_types.py @@ -0,0 +1,24 @@ +# Only used for static type checkers, should only be imported in `if TYPE_CHECKING` block +# Workaround described in https://github.com/agronholm/typeguard/issues/456 + +import concurrent.futures as concurrent +from typing import TypeAlias + +import distributed +import ipyparallel +import loky +import mpi4py.futures + +from adaptive.utils import SequentialExecutor + +ExecutorTypes: TypeAlias = ( + concurrent.ProcessPoolExecutor + | concurrent.ThreadPoolExecutor + | SequentialExecutor + | loky.reusable_executor._ReusablePoolExecutor + | distributed.Client + | distributed.cfexecutor.ClientExecutor + | mpi4py.futures.MPIPoolExecutor + | ipyparallel.Client + | ipyparallel.client.view.ViewExecutor +) diff --git a/adaptive/learner/__init__.py b/adaptive/learner/__init__.py index 74564773a..2b2586731 100644 --- a/adaptive/learner/__init__.py +++ b/adaptive/learner/__init__.py @@ -1,5 +1,3 @@ -from contextlib import suppress - from adaptive.learner.average_learner import AverageLearner from adaptive.learner.average_learner1D import AverageLearner1D from adaptive.learner.balancing_learner import BalancingLearner @@ -24,9 +22,3 @@ "AverageLearner1D", "SequenceLearner", ] - -with suppress(ImportError): - # Only available if 'scikit-optimize' is installed - from adaptive.learner.skopt_learner import SKOptLearner # noqa: F401 - - __all__.append("SKOptLearner") diff --git a/adaptive/learner/average_learner.py b/adaptive/learner/average_learner.py index c3d4892b4..3252530b4 100644 --- a/adaptive/learner/average_learner.py +++ b/adaptive/learner/average_learner.py @@ -1,7 +1,7 @@ from __future__ import annotations +from collections.abc import Callable from math import sqrt -from typing import Callable import cloudpickle import numpy as np diff --git a/adaptive/learner/average_learner1D.py b/adaptive/learner/average_learner1D.py index 12c7c9a6c..82c76f5d9 100644 --- a/adaptive/learner/average_learner1D.py +++ b/adaptive/learner/average_learner1D.py @@ -3,10 +3,9 @@ import math import sys from collections import defaultdict -from collections.abc import Iterable, Sequence +from collections.abc import Callable, Iterable, Sequence from copy import deepcopy from math import hypot -from typing import Callable import numpy as np import scipy.stats @@ -310,7 +309,7 @@ def _ask_for_new_point(self, n: int) -> tuple[Points, list[float]]: new point, since in general n << min_samples and this point will need to be resampled many more times""" points, (loss_improvement,) = self._ask_points_without_adding(1) - seed_points = [(seed, x) for seed, x in zip(range(n), n * points)] + seed_points = list(zip(range(n), n * points)) loss_improvements = [loss_improvement / n] * n return seed_points, loss_improvements # type: ignore[return-value] @@ -500,8 +499,7 @@ def tell_many( # type: ignore[override] # but ignore it going forward. if not np.prod([x >= self.bounds[0] and x <= self.bounds[1] for _, x in xs]): raise ValueError( - "x value out of bounds, " - "remove x or enlarge the bounds of the learner" + "x value out of bounds, remove x or enlarge the bounds of the learner" ) # Create a mapping of points to a list of samples @@ -534,8 +532,7 @@ def tell_many_at_point(self, x: Real, seed_y_mapping: dict[int, Real]) -> None: # Check x is within the bounds if not np.prod(x >= self.bounds[0] and x <= self.bounds[1]): raise ValueError( - "x value out of bounds, " - "remove x or enlarge the bounds of the learner" + "x value out of bounds, remove x or enlarge the bounds of the learner" ) # If x is a new point: diff --git a/adaptive/learner/balancing_learner.py b/adaptive/learner/balancing_learner.py index e9a4a661e..43f7dc1f3 100644 --- a/adaptive/learner/balancing_learner.py +++ b/adaptive/learner/balancing_learner.py @@ -1,13 +1,12 @@ from __future__ import annotations import itertools -import sys from collections import defaultdict -from collections.abc import Iterable, Sequence +from collections.abc import Callable, Iterable, Sequence from contextlib import suppress from functools import partial from operator import itemgetter -from typing import Any, Callable, Union, cast +from typing import Any, Literal, TypeAlias, cast import numpy as np @@ -16,13 +15,6 @@ from adaptive.types import Int, Real from adaptive.utils import cache_latest, named_product, restore -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - -from typing import Literal - try: import pandas @@ -38,11 +30,9 @@ def dispatch(child_functions: list[Callable], arg: Any) -> Any: STRATEGY_TYPE: TypeAlias = Literal["loss_improvements", "loss", "npoints", "cycle"] -CDIMS_TYPE: TypeAlias = Union[ - Sequence[dict[str, Any]], - tuple[Sequence[str], Sequence[tuple[Any, ...]]], - None, -] +CDIMS_TYPE: TypeAlias = ( + Sequence[dict[str, Any]] | tuple[Sequence[str], Sequence[tuple[Any, ...]]] | None +) class BalancingLearner(BaseLearner): @@ -116,9 +106,7 @@ def __init__( self._cdims_default = cdims if len({learner.__class__ for learner in self.learners}) > 1: - raise TypeError( - "A BalacingLearner can handle only one type" " of learners." - ) + raise TypeError("A BalacingLearner can handle only one type of learners.") self.strategy: STRATEGY_TYPE = strategy @@ -279,17 +267,17 @@ def ask( return self._ask_and_tell(n) def tell(self, x: tuple[Int, Any], y: Any) -> None: - index, x = x + index, x_ = x self._ask_cache.pop(index, None) self._loss.pop(index, None) self._pending_loss.pop(index, None) - self.learners[index].tell(x, y) + self.learners[index].tell(x_, y) def tell_pending(self, x: tuple[Int, Any]) -> None: - index, x = x + index, x_ = x self._ask_cache.pop(index, None) self._loss.pop(index, None) - self.learners[index].tell_pending(x) + self.learners[index].tell_pending(x_) def _losses(self, real: bool = True) -> list[float]: losses = [] diff --git a/adaptive/learner/base_learner.py b/adaptive/learner/base_learner.py index 73720dd52..f5ef73eca 100644 --- a/adaptive/learner/base_learner.py +++ b/adaptive/learner/base_learner.py @@ -1,8 +1,9 @@ from __future__ import annotations import abc +from collections.abc import Callable from contextlib import suppress -from typing import TYPE_CHECKING, Any, Callable, TypeVar +from typing import TYPE_CHECKING, Any, TypeVar import cloudpickle diff --git a/adaptive/learner/data_saver.py b/adaptive/learner/data_saver.py index a69807389..2deafe2cb 100644 --- a/adaptive/learner/data_saver.py +++ b/adaptive/learner/data_saver.py @@ -2,7 +2,8 @@ import functools from collections import OrderedDict -from typing import Any, Callable +from collections.abc import Callable +from typing import Any from adaptive.learner.base_learner import BaseLearner, LearnerType from adaptive.utils import copy_docstring_from @@ -161,7 +162,7 @@ def _set_data( self.learner._set_data(learner_data) def __getstate__(self) -> tuple[LearnerType, Callable, OrderedDict]: - return ( + return ( # type: ignore[return-value] self.learner, self.arg_picker, self.extra_data, diff --git a/adaptive/learner/integrator_learner.py b/adaptive/learner/integrator_learner.py index 74aebe1ca..d6ee9ef1d 100644 --- a/adaptive/learner/integrator_learner.py +++ b/adaptive/learner/integrator_learner.py @@ -3,9 +3,10 @@ import sys from collections import defaultdict +from collections.abc import Callable from math import sqrt from operator import attrgetter -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import cloudpickle import numpy as np @@ -71,7 +72,6 @@ class DivergentIntegralError(ValueError): class _Interval: - """ Attributes ---------- diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py index 8385e67b8..685834222 100644 --- a/adaptive/learner/learner1D.py +++ b/adaptive/learner/learner1D.py @@ -3,10 +3,9 @@ import collections.abc import itertools import math -import sys -from collections.abc import Sequence +from collections.abc import Callable, Sequence from copy import copy, deepcopy -from typing import TYPE_CHECKING, Any, Callable, Optional, Union +from typing import Any, TypeAlias import cloudpickle import numpy as np @@ -24,12 +23,6 @@ partial_function_from_dataframe, ) -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - - try: import pandas @@ -38,32 +31,21 @@ except ModuleNotFoundError: with_pandas = False -if TYPE_CHECKING: - # -- types -- - - # Commonly used types - Interval: TypeAlias = Union[tuple[float, float], tuple[float, float, int]] - NeighborsType: TypeAlias = SortedDict[float, list[Optional[float]]] - - # Types for loss_per_interval functions - XsType0: TypeAlias = tuple[float, float] - YsType0: TypeAlias = Union[tuple[float, float], tuple[np.ndarray, np.ndarray]] - XsType1: TypeAlias = tuple[ - Optional[float], Optional[float], Optional[float], Optional[float] - ] - YsType1: TypeAlias = Union[ - tuple[Optional[float], Optional[float], Optional[float], Optional[float]], - tuple[ - Optional[np.ndarray], - Optional[np.ndarray], - Optional[np.ndarray], - Optional[np.ndarray], - ], - ] - XsTypeN: TypeAlias = tuple[Optional[float], ...] - YsTypeN: TypeAlias = Union[ - tuple[Optional[float], ...], tuple[Optional[np.ndarray], ...] - ] + +# Commonly used types +Interval: TypeAlias = tuple[float, float] | tuple[float, float, int] +NeighborsType: TypeAlias = SortedDict[float, list[float | None]] + +# Types for loss_per_interval functions +XsType0: TypeAlias = tuple[float, float] +YsType0: TypeAlias = tuple[float, float] | tuple[np.ndarray, np.ndarray] +XsType1: TypeAlias = tuple[float | None, float | None, float | None, float | None] +YsType1: TypeAlias = ( + tuple[float | None, float | None, float | None, float | None] + | tuple[np.ndarray | None, np.ndarray | None, np.ndarray | None, np.ndarray | None] +) +XsTypeN: TypeAlias = tuple[float | None, ...] +YsTypeN: TypeAlias = tuple[float | None, ...] | tuple[np.ndarray | None, ...] __all__ = [ @@ -124,18 +106,18 @@ def abs_min_log_loss(xs: XsType0, ys: YsType0) -> Float: @uses_nth_neighbors(1) def triangle_loss(xs: XsType1, ys: YsType1) -> Float: assert len(xs) == 4 - xs = [x for x in xs if x is not None] # type: ignore[assignment] - ys = [y for y in ys if y is not None] # type: ignore[assignment] + x = [x for x in xs if x is not None] + y = [y for y in ys if y is not None] - if len(xs) == 2: # we do not have enough points for a triangle - return xs[1] - xs[0] # type: ignore[operator] + if len(x) == 2: # we do not have enough points for a triangle + return x[1] - x[0] # type: ignore[operator] - N = len(xs) - 2 # number of constructed triangles - if isinstance(ys[0], collections.abc.Iterable): - pts = [(x, *y) for x, y in zip(xs, ys)] # type: ignore[misc] + N = len(x) - 2 # number of constructed triangles + if isinstance(y[0], collections.abc.Iterable): + pts = [(x, *y) for x, y in zip(x, y)] # type: ignore[misc] vol = simplex_volume_in_embedding else: - pts = [(x, y) for x, y in zip(xs, ys)] + pts = list(zip(x, y)) vol = volume return sum(vol(pts[i : i + 3]) for i in range(N)) / N @@ -598,7 +580,7 @@ def tell(self, x: float, y: Float | Sequence[Float] | np.ndarray) -> None: ) # either it is a float/int, if not, try casting to a np.array - if not isinstance(y, (float, int)): + if not isinstance(y, float | int): y = np.asarray(y, dtype=float) # Add point to the real data dict @@ -633,10 +615,12 @@ def tell_pending(self, x: float) -> None: def tell_many( self, xs: Sequence[Float] | np.ndarray, - ys: Sequence[Float] - | Sequence[Sequence[Float]] - | Sequence[np.ndarray] - | np.ndarray, + ys: ( + Sequence[Float] + | Sequence[Sequence[Float]] + | Sequence[np.ndarray] + | np.ndarray + ), *, force: bool = False, ) -> None: @@ -777,8 +761,9 @@ def _ask_points_without_adding(self, n: int) -> tuple[list[float], list[float]]: ival is not None and self._loss(self.losses_combined, ival) >= self._loss(quals, qual) ): + assert ival is not None i += 1 - quals[(*ival, 2)] = loss_ival / 2 + quals[(ival[0], ival[1], 2)] = loss_ival / 2 else: quals.pop(qual, None) *xs, n = qual diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py index a2aec2069..125fc055f 100644 --- a/adaptive/learner/learner2D.py +++ b/adaptive/learner/learner2D.py @@ -3,15 +3,14 @@ import itertools import warnings from collections import OrderedDict -from collections.abc import Iterable +from collections.abc import Callable, Iterable from copy import copy from math import sqrt -from typing import Callable import cloudpickle import numpy as np from scipy import interpolate -from scipy.interpolate.interpnd import LinearNDInterpolator +from scipy.interpolate import CloughTocher2DInterpolator, LinearNDInterpolator from adaptive.learner.base_learner import BaseLearner from adaptive.learner.triangulation import simplex_volume_in_embedding @@ -34,7 +33,7 @@ # Learner2D and helper functions. -def deviations(ip: LinearNDInterpolator) -> list[np.ndarray]: +def deviations(ip: LinearNDInterpolator) -> np.ndarray: """Returns the deviation of the linear estimate. Is useful when defining custom loss functions. @@ -45,31 +44,25 @@ def deviations(ip: LinearNDInterpolator) -> list[np.ndarray]: Returns ------- - deviations : list + deviations : numpy.ndarray The deviation per triangle. """ - values = ip.values / (ip.values.ptp(axis=0).max() or 1) - gradients = interpolate.interpnd.estimate_gradients_2d_global( - ip.tri, values, tol=1e-6 - ) + values = ip.values / (np.ptp(ip.values, axis=0).max() or 1) + gradients = CloughTocher2DInterpolator(ip.tri, values, tol=1e-6).grad simplices = ip.tri.simplices p = ip.tri.points[simplices] vs = values[simplices] gs = gradients[simplices] - def deviation(p, v, g): - dev = 0 - for j in range(3): - vest = v[:, j, None] + ( - (p[:, :, :] - p[:, j, None, :]) * g[:, j, None, :] - ).sum(axis=-1) - dev += abs(vest - v).max(axis=1) - return dev + p = np.expand_dims(p, axis=2) + + p_diff = p[:, None] - p[:, :, None] + p_diff_scaled = p_diff * gs[:, :, None] + vest = vs[:, :, None] + p_diff_scaled.sum(axis=-1) + devs = np.sum(np.max(np.abs(vest - vs[:, None]), axis=2), axis=1) - n_levels = vs.shape[2] - devs = [deviation(p, vs[:, :, i], gs[:, :, i]) for i in range(n_levels)] - return devs + return np.swapaxes(devs, 0, 1) def areas(ip: LinearNDInterpolator) -> np.ndarray: @@ -195,7 +188,7 @@ def minimize_triangle_surface_loss(ip: LinearNDInterpolator) -> np.ndarray: tri = ip.tri points = tri.points[tri.simplices] values = ip.values[tri.simplices] - values = values / (ip.values.ptp(axis=0).max() or 1) + values = values / (np.ptp(ip.values, axis=0).max() or 1) def _get_vectors(points): delta = points - points[:, -1, :][:, None, :] @@ -231,6 +224,69 @@ def default_loss(ip: LinearNDInterpolator) -> np.ndarray: return losses +def thresholded_loss_function( + lower_threshold: float | None = None, + upper_threshold: float | None = None, + priority_factor: float = 0.1, +) -> Callable[[LinearNDInterpolator], np.ndarray]: + """ + Factory function to create a custom loss function that deprioritizes + values above an upper threshold and below a lower threshold. + + Parameters + ---------- + lower_threshold : float, optional + The lower threshold for deprioritizing values. If None (default), + there is no lower threshold. + upper_threshold : float, optional + The upper threshold for deprioritizing values. If None (default), + there is no upper threshold. + priority_factor : float, default: 0.1 + The factor by which the loss is multiplied for values outside + the specified thresholds. + + Returns + ------- + custom_loss : Callable[[LinearNDInterpolator], np.ndarray] + A custom loss function that can be used with Learner2D. + """ + + def custom_loss(ip: LinearNDInterpolator) -> np.ndarray: + """Loss function that deprioritizes values outside an upper and lower threshold. + + Parameters + ---------- + ip : `scipy.interpolate.LinearNDInterpolator` instance + + Returns + ------- + losses : numpy.ndarray + Loss per triangle in ``ip.tri``. + """ + losses = default_loss(ip) + + if lower_threshold is not None or upper_threshold is not None: + simplices = ip.tri.simplices + values = ip.values[simplices] + if lower_threshold is not None: + mask_lower = (values < lower_threshold).all(axis=(1, -1)) + if mask_lower.any(): + losses[mask_lower] *= priority_factor + + if upper_threshold is not None: + mask_upper = (values > upper_threshold).all(axis=(1, -1)) + if mask_upper.any(): + losses[mask_upper] *= priority_factor + + return losses + + return custom_loss + + +def _cross_2d(x, y): + return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] + + def choose_point_in_triangle(triangle: np.ndarray, max_badness: int) -> np.ndarray: """Choose a new point in inside a triangle. @@ -254,7 +310,7 @@ def choose_point_in_triangle(triangle: np.ndarray, max_badness: int) -> np.ndarr The x and y coordinate of the suggested new point. """ a, b, c = triangle - area = 0.5 * np.cross(b - a, c - a) + area = 0.5 * _cross_2d(b - a, c - a) triangle_roll = np.roll(triangle, 1, axis=0) edge_lengths = np.linalg.norm(triangle - triangle_roll, axis=1) i = edge_lengths.argmax() @@ -392,7 +448,7 @@ def __init__( self.aspect_ratio = 1 self._bounds_points = list(itertools.product(*bounds)) - self._stack.update({p: np.inf for p in self._bounds_points}) + self._stack.update(dict.fromkeys(self._bounds_points, np.inf)) self.function = function # type: ignore self._ip = self._ip_combined = None @@ -761,7 +817,7 @@ def remove_unfinished(self) -> None: if p not in self.data: self._stack[p] = np.inf - def plot(self, n=None, tri_alpha=0): + def plot(self, n=None, tri_alpha=0.0): r"""Plot the Learner2D's current state. This plot function interpolates the data on a regular grid. diff --git a/adaptive/learner/learnerND.py b/adaptive/learner/learnerND.py index edc839d8d..33bbbbb07 100644 --- a/adaptive/learner/learnerND.py +++ b/adaptive/learner/learnerND.py @@ -2,6 +2,7 @@ import functools import itertools +import math import random from collections import OrderedDict from collections.abc import Iterable @@ -50,7 +51,7 @@ def volume(simplex, ys=None): # See https://www.jstor.org/stable/2315353 dim = len(simplex) - 1 - vol = np.abs(fast_det(matrix)) / np.math.factorial(dim) + vol = np.abs(fast_det(matrix)) / math.factorial(dim) return vol @@ -725,9 +726,10 @@ def _compute_loss(self, simplex): if self.nth_neighbors == 0: # compute the loss on the scaled simplex - return float( - self.loss_per_simplex(vertices, values, self._output_multiplier) - ) + loss = self.loss_per_simplex(vertices, values, self._output_multiplier) + if isinstance(loss, np.ndarray): + return float(loss.item()) + return float(loss) # We do need the neighbors neighbors = self.tri.get_opposing_vertices(simplex) @@ -987,9 +989,11 @@ def plot_slice(self, cut_mapping, n=None): xs = ys = np.linspace(0, 1, n) xys = [xs[:, None], ys[None, :]] values = [ - cut_mapping[i] - if i in cut_mapping - else xys.pop(0) * (b[1] - b[0]) + b[0] + ( + cut_mapping[i] + if i in cut_mapping + else xys.pop(0) * (b[1] - b[0]) + b[0] + ) for i, b in enumerate(self._bbox) ] @@ -1095,8 +1099,7 @@ def _get_iso(self, level=0.0, which="surface"): if which == "surface": if self.ndim != 3 or self.vdim != 1: raise Exception( - "Isosurface plotting is only supported" - " for a 3D input and 1D output" + "Isosurface plotting is only supported for a 3D input and 1D output" ) get_surface = True get_line = False diff --git a/adaptive/learner/sequence_learner.py b/adaptive/learner/sequence_learner.py index c307744fd..2209ff875 100644 --- a/adaptive/learner/sequence_learner.py +++ b/adaptive/learner/sequence_learner.py @@ -1,8 +1,7 @@ from __future__ import annotations -import sys from copy import copy -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, TypeAlias import cloudpickle from sortedcontainers import SortedDict, SortedSet @@ -16,8 +15,7 @@ ) if TYPE_CHECKING: - from collections.abc import Sequence - from typing import Callable + from collections.abc import Callable, Sequence try: import pandas @@ -27,10 +25,6 @@ except ModuleNotFoundError: with_pandas = False -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias PointType: TypeAlias = tuple[Int, Any] @@ -140,13 +134,13 @@ def remove_unfinished(self) -> None: self.pending_points = set() def tell(self, point: PointType, value: Any) -> None: - index, point = point + index, _ = point self.data[index] = value self.pending_points.discard(index) self._to_do_indices.discard(index) def tell_pending(self, point: PointType) -> None: - index, point = point + index, _ = point self.pending_points.add(index) self._to_do_indices.discard(index) diff --git a/adaptive/learner/skopt_learner.py b/adaptive/learner/skopt_learner.py deleted file mode 100644 index b1cb18840..000000000 --- a/adaptive/learner/skopt_learner.py +++ /dev/null @@ -1,185 +0,0 @@ -from __future__ import annotations - -import collections -from typing import TYPE_CHECKING - -import numpy as np -from skopt import Optimizer - -from adaptive.learner.base_learner import BaseLearner -from adaptive.notebook_integration import ensure_holoviews -from adaptive.utils import cache_latest - -if TYPE_CHECKING: - import pandas - - -class SKOptLearner(Optimizer, BaseLearner): - """Learn a function minimum using ``skopt.Optimizer``. - - This is an ``Optimizer`` from ``scikit-optimize``, - with the necessary methods added to make it conform - to the ``adaptive`` learner interface. - - Parameters - ---------- - function : callable - The function to learn. - **kwargs : - Arguments to pass to ``skopt.Optimizer``. - """ - - def __init__(self, function, **kwargs): - self.function = function - self.pending_points = set() - self.data = collections.OrderedDict() - self._kwargs = kwargs - super().__init__(**kwargs) - - def new(self) -> SKOptLearner: - """Return a new `~adaptive.SKOptLearner` without the data.""" - return SKOptLearner(self.function, **self._kwargs) - - def tell(self, x, y, fit=True): - if isinstance(x, collections.abc.Iterable): - self.pending_points.discard(tuple(x)) - self.data[tuple(x)] = y - super().tell(x, y, fit) - else: - self.pending_points.discard(x) - self.data[x] = y - super().tell([x], y, fit) - - def tell_pending(self, x): - # 'skopt.Optimizer' takes care of points we - # have not got results for. - self.pending_points.add(tuple(x)) - - def remove_unfinished(self): - pass - - @cache_latest - def loss(self, real=True): - if not self.models: - return np.inf - else: - model = self.models[-1] - # Return the in-sample error (i.e. test the model - # with the training data). This is not the best - # estimator of loss, but it is the cheapest. - return 1 - model.score(self.Xi, self.yi) - - def ask(self, n, tell_pending=True): - if not tell_pending: - raise NotImplementedError( - "Asking points is an irreversible " - "action, so use `ask(n, tell_pending=True`." - ) - points = super().ask(n) - # TODO: Choose a better estimate for the loss improvement. - if self.space.n_dims > 1: - return points, [self.loss() / n] * n - else: - return [p[0] for p in points], [self.loss() / n] * n - - @property - def npoints(self): - """Number of evaluated points.""" - return len(self.Xi) - - def plot(self, nsamples=200): - hv = ensure_holoviews() - if self.space.n_dims > 1: - raise ValueError("Can only plot 1D functions") - bounds = self.space.bounds[0] - if not self.Xi: - p = hv.Scatter([]) * hv.Curve([]) * hv.Area([]) - else: - scatter = hv.Scatter(([p[0] for p in self.Xi], self.yi)) - if self.models: - model = self.models[-1] - xs = np.linspace(*bounds, nsamples) - xsp = self.space.transform(xs.reshape(-1, 1).tolist()) - y_pred, sigma = model.predict(xsp, return_std=True) - # Plot model prediction for function - curve = hv.Curve((xs, y_pred)).opts(line_dash="dashed") - # Plot 95% confidence interval as colored area around points - area = hv.Area( - (xs, y_pred - 1.96 * sigma, y_pred + 1.96 * sigma), - vdims=["y", "y2"], - ).opts(alpha=0.5, line_alpha=0) - - else: - area = hv.Area([]) - curve = hv.Curve([]) - p = scatter * curve * area - - # Plot with 5% empty margins such that the boundary points are visible - margin = 0.05 * (bounds[1] - bounds[0]) - plot_bounds = (bounds[0] - margin, bounds[1] + margin) - - return p.redim(x={"range": plot_bounds}) - - def _get_data(self): - return [x[0] for x in self.Xi], self.yi - - def _set_data(self, data): - xs, ys = data - self.tell_many(xs, ys) - - def to_dataframe( # type: ignore[override] - self, - with_default_function_args: bool = True, - function_prefix: str = "function.", - seed_name: str = "seed", - y_name: str = "y", - ) -> pandas.DataFrame: - """Return the data as a `pandas.DataFrame`. - - Parameters - ---------- - with_default_function_args : bool, optional - Include the ``learner.function``'s default arguments as a - column, by default True - function_prefix : str, optional - Prefix to the ``learner.function``'s default arguments' names, - by default "function." - TODO - - Returns - ------- - pandas.DataFrame - - Raises - ------ - ImportError - If `pandas` is not installed. - """ - raise NotImplementedError - - def load_dataframe( # type: ignore[override] - self, - df: pandas.DataFrame, - with_default_function_args: bool = True, - function_prefix: str = "function.", - seed_name: str = "seed", - y_name: str = "y", - ): - """Load data from a `pandas.DataFrame`. - - If ``with_default_function_args`` is True, then ``learner.function``'s - default arguments are set (using `functools.partial`) from the values - in the `pandas.DataFrame`. - - Parameters - ---------- - df : pandas.DataFrame - The data to load. - with_default_function_args : bool, optional - The ``with_default_function_args`` used in ``to_dataframe()``, - by default True - function_prefix : str, optional - The ``function_prefix`` used in ``to_dataframe``, by default "function." - TODO - """ - raise NotImplementedError diff --git a/adaptive/learner/triangulation.py b/adaptive/learner/triangulation.py index 03455e3b7..26a5ebc2a 100644 --- a/adaptive/learner/triangulation.py +++ b/adaptive/learner/triangulation.py @@ -336,8 +336,7 @@ def __init__(self, coords): vectors = subtract(coords[1:], coords[0]) if matrix_rank(vectors) < dim: raise ValueError( - "Initial simplex has zero volumes " - "(the points are linearly dependent)" + "Initial simplex has zero volumes (the points are linearly dependent)" ) self.vertices = list(coords) diff --git a/adaptive/notebook_integration.py b/adaptive/notebook_integration.py index 165a84d82..5eb5c6d25 100644 --- a/adaptive/notebook_integration.py +++ b/adaptive/notebook_integration.py @@ -7,7 +7,6 @@ import warnings from contextlib import suppress -_async_enabled = False _holoviews_enabled = False _ipywidgets_enabled = False _plotly_enabled = False @@ -17,11 +16,10 @@ def notebook_extension(*, _inline_js=True): """Enable ipywidgets, holoviews, and asyncio notebook integration.""" if not in_ipynb(): raise RuntimeError( - '"adaptive.notebook_extension()" may only be run ' - "from a Jupyter notebook." + '"adaptive.notebook_extension()" may only be run from a Jupyter notebook.' ) - global _async_enabled, _holoviews_enabled, _ipywidgets_enabled + global _holoviews_enabled, _ipywidgets_enabled # Load holoviews try: @@ -51,11 +49,6 @@ def notebook_extension(*, _inline_js=True): stacklevel=2, ) - # Enable asyncio integration - if not _async_enabled: - get_ipython().magic("gui asyncio") # noqa: F821 - _async_enabled = True - def ensure_holoviews(): try: @@ -122,8 +115,7 @@ def live_plot(runner, *, plotter=None, update_interval=2, name=None, normalize=T """ if not _holoviews_enabled: raise RuntimeError( - "Live plotting is not enabled; did you run " - "'adaptive.notebook_extension()'?" + "Live plotting is not enabled; did you run 'adaptive.notebook_extension()'?" ) import holoviews as hv @@ -208,8 +200,7 @@ def live_info(runner, *, update_interval=0.5): """ if not _holoviews_enabled: raise RuntimeError( - "Live plotting is not enabled; did you run " - "'adaptive.notebook_extension()'?" + "Live plotting is not enabled; did you run 'adaptive.notebook_extension()'?" ) import ipywidgets @@ -274,7 +265,7 @@ def _info_html(runner): info.append(("# of samples", runner.learner.nsamples)) with suppress(Exception): - info.append(("latest loss", f'{runner.learner._cache["loss"]:.3f}')) + info.append(("latest loss", f"{runner.learner._cache['loss']:.3f}")) table = "\n".join(_table_row(i, k, v) for i, (k, v) in enumerate(info)) diff --git a/adaptive/runner.py b/adaptive/runner.py index d1f39e1e2..b2e5ec9a0 100644 --- a/adaptive/runner.py +++ b/adaptive/runner.py @@ -8,77 +8,34 @@ import itertools import pickle import platform -import sys import time import traceback import warnings +from collections.abc import Callable from contextlib import suppress from datetime import datetime, timedelta from importlib.util import find_spec -from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union +from typing import TYPE_CHECKING, Any, Literal, TypeAlias import loky -from adaptive import ( - BalancingLearner, - DataSaver, - IntegratorLearner, - SequenceLearner, -) +from adaptive import BalancingLearner, DataSaver, IntegratorLearner, SequenceLearner from adaptive.learner.base_learner import LearnerType from adaptive.notebook_integration import in_ipynb, live_info, live_plot from adaptive.utils import SequentialExecutor -ExecutorTypes: TypeAlias = Union[ - concurrent.ProcessPoolExecutor, - concurrent.ThreadPoolExecutor, - SequentialExecutor, - loky.reusable_executor._ReusablePoolExecutor, -] -FutureTypes: TypeAlias = Union[concurrent.Future, asyncio.Future, asyncio.Task] +FutureTypes: TypeAlias = concurrent.Future | asyncio.Future if TYPE_CHECKING: import holoviews - -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias + from ._types import ExecutorTypes with_ipyparallel = find_spec("ipyparallel") is not None with_distributed = find_spec("distributed") is not None with_mpi4py = find_spec("mpi4py") is not None -if TYPE_CHECKING: - ExecutorTypes = Optional[()] - FutureTypes = Optional[()] - - if with_distributed: - import distributed - - ExecutorTypes = Optional[ - Union[ - ExecutorTypes, distributed.Client, distributed.cfexecutor.ClientExecutor - ] - ] - - if with_mpi4py: - import mpi4py.futures - - ExecutorTypes = Optional[Union[ExecutorTypes, mpi4py.futures.MPIPoolExecutor]] - - if with_ipyparallel: - import ipyparallel - from ipyparallel.client.asyncresult import AsyncResult - - ExecutorTypes = Optional[ - Union[ - ExecutorTypes, ipyparallel.Client, ipyparallel.client.view.ViewExecutor - ] - ] - FutureTypes = Optional[Union[FutureTypes, AsyncResult]] with suppress(ModuleNotFoundError): import uvloop @@ -203,7 +160,7 @@ def __init__( self._max_tasks = ntasks - self._pending_tasks: dict[concurrent.Future, int] = {} + self._pending_tasks: dict[FutureTypes, int] = {} # if we instantiate our own executor, then we are also responsible # for calling 'shutdown' @@ -292,7 +249,8 @@ def _process_futures( pid = self._pending_tasks.pop(fut) try: y = fut.result() - t = time.time() - fut.start_time # total execution time + # total execution time + t = time.time() - fut.start_time # type: ignore[union-attr] except Exception as e: self._tracebacks[pid] = traceback.format_exc() self._to_retry[pid] = self._to_retry.get(pid, 0) + 1 @@ -470,7 +428,7 @@ def __init__( npoints_goal: int | None = None, end_time_goal: datetime | None = None, duration_goal: timedelta | int | float | None = None, - executor: (ExecutorTypes | None) = None, + executor: ExecutorTypes | None = None, ntasks: int | None = None, log: bool = False, shutdown_executor: bool = False, @@ -508,12 +466,12 @@ def _run(self) -> None: try: while not self.goal(self.learner): futures = self._get_futures() - done, _ = concurrent.wait(futures, return_when=first_completed) - self._process_futures(done) + done, _ = concurrent.wait(futures, return_when=first_completed) # type: ignore[arg-type] + self._process_futures(done) # type: ignore[arg-type] finally: remaining = self._remove_unfinished() if remaining: - concurrent.wait(remaining) + concurrent.wait(remaining) # type: ignore[arg-type] # Some futures get their result set, despite being cancelled. # see https://github.com/python-adaptive/adaptive/issues/319 with_result = {f for f in remaining if not f.cancelled() and f.done()} @@ -629,7 +587,7 @@ def __init__( npoints_goal: int | None = None, end_time_goal: datetime | None = None, duration_goal: timedelta | int | float | None = None, - executor: (ExecutorTypes | None) = None, + executor: ExecutorTypes | None = None, ntasks: int | None = None, log: bool = False, shutdown_executor: bool = False, @@ -722,6 +680,14 @@ def cancel(self) -> None: """ self.task.cancel() + def block_until_done(self) -> None: + if in_ipynb(): + raise RuntimeError( + "Cannot block the event loop when running in a Jupyter notebook." + " Use `await runner.task` instead." + ) + self.ioloop.run_until_complete(self.task) + def live_plot( self, *, @@ -768,6 +734,56 @@ def live_info(self, *, update_interval: float = 0.1) -> None: """ return live_info(self, update_interval=update_interval) + def live_info_terminal( + self, *, update_interval: float = 0.5, overwrite_previous: bool = True + ) -> asyncio.Task: + """ + Display live information about the runner in the terminal. + + This function provides a live update of the runner's status in the terminal. + The update can either overwrite the previous status or be printed on a new line. + + Parameters + ---------- + update_interval : float, optional + The time interval (in seconds) at which the runner's status is updated + in the terminal. Default is 0.5 seconds. + overwrite_previous : bool, optional + If True, each update will overwrite the previous status in the terminal. + If False, each update will be printed on a new line. + Default is True. + + Returns + ------- + asyncio.Task + The asynchronous task responsible for updating the runner's status in + the terminal. + + Examples + -------- + >>> runner = AsyncRunner(...) + >>> runner.live_info_terminal(update_interval=1.0, overwrite_previous=False) + + Notes + ----- + This function uses ANSI escape sequences to control the terminal's cursor + position. It might not work as expected on all terminal emulators. + """ + + async def _update(runner: AsyncRunner) -> None: + try: + while not runner.task.done(): + if overwrite_previous: + # Clear the terminal + print("\033[H\033[J", end="") + print(_info_text(runner, separator="\t")) + await asyncio.sleep(update_interval) + + except asyncio.CancelledError: + print("Live info display cancelled.") + + return self.ioloop.create_task(_update(self)) + async def _run(self) -> None: first_completed = asyncio.FIRST_COMPLETED @@ -777,13 +793,12 @@ async def _run(self) -> None: try: while not self.goal(self.learner): futures = self._get_futures() - kw = {"loop": self.ioloop} if sys.version_info[:2] < (3, 10) else {} - done, _ = await asyncio.wait(futures, return_when=first_completed, **kw) # type: ignore[arg-type] + done, _ = await asyncio.wait(futures, return_when=first_completed) # type: ignore[arg-type,type-var] self._process_futures(done) finally: remaining = self._remove_unfinished() if remaining: - await asyncio.wait(remaining) + await asyncio.wait(remaining) # type: ignore[type-var] self._cleanup() def elapsed_time(self) -> float: @@ -839,13 +854,51 @@ def default_save(learner): async def _saver(): while self.status() == "running": method(self.learner) - await asyncio.sleep(interval) + # No asyncio.shield needed, as 'wait' does not cancel any tasks. + await asyncio.wait([self.task], timeout=interval) method(self.learner) # one last time self.saving_task = self.ioloop.create_task(_saver()) return self.saving_task +def _info_text(runner, separator: str = "\n"): + status = runner.status() + + color_map = { + "cancelled": "\033[33m", # Yellow + "failed": "\033[31m", # Red + "running": "\033[34m", # Blue + "finished": "\033[32m", # Green + } + + overhead = runner.overhead() + if overhead < 50: + overhead_color = "\033[32m" # Green + else: + overhead_color = "\033[31m" # Red + + info = [ + ("time", str(datetime.now())), + ("status", f"{color_map[status]}{status}\033[0m"), + ("elapsed time", str(timedelta(seconds=runner.elapsed_time()))), + ("overhead", f"{overhead_color}{overhead:.2f}%\033[0m"), + ] + + with suppress(Exception): + info.append(("# of points", runner.learner.npoints)) + + with suppress(Exception): + info.append(("# of samples", runner.learner.nsamples)) + + with suppress(Exception): + info.append(("latest loss", f"{runner.learner._cache['loss']:.3f}")) + + width = 30 + formatted_info = [f"{k}: {v}".ljust(width) for i, (k, v) in enumerate(info)] + return separator.join(formatted_info) + + # Default runner Runner = AsyncRunner @@ -858,11 +911,12 @@ def simple( npoints_goal: int | None = None, end_time_goal: datetime | None = None, duration_goal: timedelta | int | float | None = None, + points_per_ask: int = 1, ): """Run the learner until the goal is reached. - Requests a single point from the learner, evaluates - the function to be learned, and adds the point to the + Requests points from the learner, evaluates + the function to be learned, and adds the points to the learner, until the goal is reached, blocking the current thread. @@ -893,6 +947,9 @@ def simple( calculation. Stop when the current time is larger or equal than ``start_time + duration_goal``. ``duration_goal`` can be a number indicating the number of seconds. + points_per_ask : int, optional + The number of points to ask for between every interpolation rerun. Defaults + to 1, which can introduce significant overhead on long runs. """ goal = _goal( learner, @@ -905,7 +962,7 @@ def simple( ) assert goal is not None while not goal(learner): - xs, _ = learner.ask(1) + xs, _ = learner.ask(points_per_ask) for x in xs: y = learner.function(x) learner.tell(x, y) @@ -955,7 +1012,7 @@ def _ensure_executor(executor: ExecutorTypes | None) -> concurrent.Executor: def _get_ncores( - ex: (ExecutorTypes), + ex: ExecutorTypes, ) -> int: """Return the maximum number of cores that an executor can use.""" if with_ipyparallel: @@ -966,9 +1023,7 @@ def _get_ncores( import mpi4py.futures if with_ipyparallel and isinstance(ex, ipyparallel.client.view.ViewExecutor): return len(ex.view) - elif isinstance( - ex, (concurrent.ProcessPoolExecutor, concurrent.ThreadPoolExecutor) - ): + elif isinstance(ex, concurrent.ProcessPoolExecutor | concurrent.ThreadPoolExecutor): return ex._max_workers # type: ignore[union-attr] elif isinstance(ex, loky.reusable_executor._ReusablePoolExecutor): return ex._max_workers # type: ignore[union-attr] @@ -1023,7 +1078,7 @@ def stop_after(*, seconds=0, minutes=0, hours=0) -> Callable[[LearnerType], bool class _TimeGoal: def __init__(self, dt: timedelta | datetime | int | float): - self.dt = dt if isinstance(dt, (timedelta, datetime)) else timedelta(seconds=dt) + self.dt = dt if isinstance(dt, timedelta | datetime) else timedelta(seconds=dt) self.start_time = None def __call__(self, _): @@ -1094,7 +1149,8 @@ def auto_goal( for lrn in learner.learners ] return lambda learner: all( - goal(lrn) for lrn, goal in zip(learner.learners, goals) # type: ignore[attr-defined] + goal(lrn) + for lrn, goal in zip(learner.learners, goals) # type: ignore[attr-defined] ) if npoints is not None: return lambda learner: learner.npoints >= npoints # type: ignore[operator] @@ -1105,7 +1161,7 @@ def auto_goal( if isinstance(learner, DataSaver): assert learner is not None return auto_goal( - learner=learner.learner, + learner=learner.learner, # type: ignore[arg-type] loss=loss, npoints=npoints, end_time=end_time, diff --git a/adaptive/tests/algorithm_4.py b/adaptive/tests/algorithm_4.py index b010b667a..8741fdc53 100644 --- a/adaptive/tests/algorithm_4.py +++ b/adaptive/tests/algorithm_4.py @@ -3,8 +3,8 @@ from __future__ import annotations from collections import defaultdict +from collections.abc import Callable from fractions import Fraction -from typing import Callable import numpy as np from numpy.testing import assert_allclose @@ -319,7 +319,11 @@ def refine(self, f: Callable) -> tuple[np.ndarray, bool, int]: def algorithm_4( - f: Callable, a: int, b: int, tol: float, N_loops: int = int(1e9) # noqa: B008 + f: Callable, + a: int, + b: int, + tol: float, + N_loops: int = int(1e9), # noqa: B008 ) -> tuple[float, float, int, list[_Interval]]: """ALGORITHM_4 evaluates an integral using adaptive quadrature. The algorithm uses Clenshaw-Curtis quadrature rules of increasing diff --git a/adaptive/tests/test_average_learner.py b/adaptive/tests/test_average_learner.py index d94933397..d0176858e 100644 --- a/adaptive/tests/test_average_learner.py +++ b/adaptive/tests/test_average_learner.py @@ -1,5 +1,4 @@ import random -from typing import TYPE_CHECKING import flaky import numpy as np @@ -7,9 +6,6 @@ from adaptive.learner import AverageLearner from adaptive.runner import simple -if TYPE_CHECKING: - pass - def f_unused(seed): raise NotImplementedError("This function shouldn't be used.") diff --git a/adaptive/tests/test_average_learner1d.py b/adaptive/tests/test_average_learner1d.py index c0148c5e9..619358e8c 100644 --- a/adaptive/tests/test_average_learner1d.py +++ b/adaptive/tests/test_average_learner1d.py @@ -1,5 +1,4 @@ from itertools import chain -from typing import TYPE_CHECKING import numpy as np @@ -10,9 +9,6 @@ simple_run, ) -if TYPE_CHECKING: - pass - def almost_equal_dicts(a, b): assert a.keys() == b.keys() @@ -21,7 +17,7 @@ def almost_equal_dicts(a, b): if ( v1 is None or v2 is None - or isinstance(v1, (tuple, list)) + or isinstance(v1, tuple | list) and any(x is None for x in chain(v1, v2)) ): assert v1 == v2 diff --git a/adaptive/tests/test_balancing_learner.py b/adaptive/tests/test_balancing_learner.py index 905a55e0c..c50b2105d 100644 --- a/adaptive/tests/test_balancing_learner.py +++ b/adaptive/tests/test_balancing_learner.py @@ -35,7 +35,7 @@ def test_distribute_first_points_over_learners(strategy): learner = BalancingLearner(learners, strategy=strategy) points = learner.ask(initial_points)[0] - learner.tell_many(points, points) + learner.tell_many(points, [x for i, x in points]) points, _ = learner.ask(100) i_learner, xs = zip(*points) diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py index 7dafbd3ab..e83629f35 100644 --- a/adaptive/tests/test_learner1d.py +++ b/adaptive/tests/test_learner1d.py @@ -2,7 +2,6 @@ import random import time -from typing import TYPE_CHECKING import flaky import numpy as np @@ -11,9 +10,6 @@ from adaptive.learner.learner1D import curvature_loss_function from adaptive.runner import BlockingRunner, simple -if TYPE_CHECKING: - pass - def flat_middle(x): x *= 1e7 @@ -399,7 +395,7 @@ def test_NaN_loss(): def f(x): a = 0.01 if random.random() < 0.2: - return np.NaN + return np.nan return x + a**2 / (a**2 + x**2) learner = Learner1D(f, bounds=(-1, 1)) diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py index 17af7f9b8..d8cb2eaf7 100644 --- a/adaptive/tests/test_learners.py +++ b/adaptive/tests/test_learners.py @@ -30,13 +30,6 @@ from adaptive.learner.learner1D import with_pandas from adaptive.runner import simple -try: - from adaptive.learner.skopt_learner import SKOptLearner -except (ModuleNotFoundError, ImportError): - # XXX: catch the ImportError because of https://github.com/scikit-optimize/scikit-optimize/issues/902 - SKOptLearner = None # type: ignore[assignment,misc] - - LOSS_FUNCTIONS = { Learner1D: ( "loss_per_interval", @@ -53,6 +46,7 @@ adaptive.learner.learner2D.uniform_loss, adaptive.learner.learner2D.minimize_triangle_surface_loss, adaptive.learner.learner2D.resolution_loss_function(), + adaptive.learner.learner2D.thresholded_loss_function(upper_threshold=0.5), ), ), LearnerND: ( @@ -285,6 +279,67 @@ def f(x): simple_run(learner, 10) +def test_learner2d_vector_valued_function(): + """Test that Learner2D handles vector-valued functions correctly. + + This test verifies that the deviations function works properly when + the function returns a vector (array/list) of values instead of a scalar. + """ + + def vector_function(xy): + """A 2D function that returns a 3-element vector.""" + x, y = xy + return [x + y, x * y, x - y] # Returns 3-element vector + + # Create learner with vector-valued function + learner = Learner2D(vector_function, bounds=((-1, 1), (-1, 1))) + + # Add some initial points + points = [ + (0.0, 0.0), + (1.0, 0.0), + (0.0, 1.0), + (1.0, 1.0), + (0.5, 0.5), + (-0.5, 0.5), + (0.5, -0.5), + (-1.0, -1.0), + ] + + for point in points: + value = vector_function(point) + learner.tell(point, value) + + # Run the learner to trigger deviations calculation + # This should not raise any errors + learner.ask(10) + + # Verify that the interpolator is created (ip is a property that may return a function) + assert hasattr(learner, "ip") + + # Check the internal interpolator if it exists + if hasattr(learner, "_ip") and learner._ip is not None: + # Check that values have the correct shape + assert learner._ip.values.shape[1] == 3 # 3 output dimensions + + # Test that we can evaluate the interpolated function + test_point = (0.25, 0.25) + ip_func = learner.interpolator(scaled=True) # Get the interpolator function + if ip_func is not None: + interpolated_value = ip_func(test_point) + assert len(interpolated_value) == 3 + + # Run more iterations to ensure deviations are computed correctly + simple_run(learner, 20) + + # Final verification + assert len(learner.data) > len(points) # Learner added more points + + # Check that all values in data are vectors + for _point, value in learner.data.items(): + assert len(value) == 3, f"Expected 3-element vector, got {value}" + + @run_with(Learner1D, Learner2D, LearnerND, SequenceLearner, AverageLearner1D) def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs): """Adding already existing data is an idempotent operation. @@ -573,7 +628,6 @@ def test_balancing_learner(learner_type, f, learner_kwargs): LearnerND, AverageLearner, AverageLearner1D, - maybe_skip(SKOptLearner), IntegratorLearner, SequenceLearner, with_all_loss_functions=False, @@ -606,7 +660,6 @@ def test_saving(learner_type, f, learner_kwargs): LearnerND, AverageLearner, AverageLearner1D, - maybe_skip(SKOptLearner), IntegratorLearner, SequenceLearner, with_all_loss_functions=False, @@ -645,7 +698,6 @@ def fname(learner): LearnerND, AverageLearner, AverageLearner1D, - maybe_skip(SKOptLearner), IntegratorLearner, with_all_loss_functions=False, ) diff --git a/adaptive/tests/test_notebook_integration.py b/adaptive/tests/test_notebook_integration.py index 3e4ddb298..45ee1c8c9 100644 --- a/adaptive/tests/test_notebook_integration.py +++ b/adaptive/tests/test_notebook_integration.py @@ -1,13 +1,7 @@ from __future__ import annotations -import os -import sys -from typing import TYPE_CHECKING - import pytest -if TYPE_CHECKING: - pass try: import ipykernel.iostream import zmq @@ -16,13 +10,9 @@ except ImportError: with_notebook_dependencies = False -# XXX: remove when is fixed https://github.com/ipython/ipykernel/issues/468 -skip_because_of_bug = os.name == "nt" and sys.version_info[:2] == (3, 8) - @pytest.mark.skipif( - not with_notebook_dependencies or skip_because_of_bug, - reason="notebook dependencies are not installed", + not with_notebook_dependencies, reason="notebook dependencies are not installed" ) def test_private_api_used_in_live_info(): """We are catching all errors in diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index e36abcbe1..0bb68c59d 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -1,4 +1,3 @@ -import asyncio import platform import sys import time @@ -34,7 +33,7 @@ def blocking_runner(learner, **kw): def async_runner(learner, **kw): runner = AsyncRunner(learner, executor=SequentialExecutor(), **kw) - asyncio.get_event_loop().run_until_complete(runner.task) + runner.block_until_done() runners = [simple, blocking_runner, async_runner] @@ -71,7 +70,7 @@ async def f(x): learner = Learner1D(f, (-1, 1)) runner = AsyncRunner(learner, npoints_goal=10) - asyncio.get_event_loop().run_until_complete(runner.task) + runner.block_until_done() # --- Test with different executors @@ -158,7 +157,7 @@ def test_loky_executor(loky_executor): def test_default_executor(): learner = Learner1D(linear, (-1, 1)) runner = AsyncRunner(learner, npoints_goal=10) - asyncio.get_event_loop().run_until_complete(runner.task) + runner.block_until_done() def test_auto_goal(): @@ -202,3 +201,65 @@ def test_auto_goal(): simple(learner, auto_goal(duration=1e-2, learner=learner)) t_end = time.time() assert t_end - t_start >= 1e-2 + + +def test_simple_points_per_ask(): + """Test that the simple runner respects the points_per_ask parameter (PR #484).""" + + def f(x): + return x**2 + + # Test with 1D learner asking for multiple points at once + learner1 = Learner1D(f, (-1, 1)) + simple(learner1, npoints_goal=20, points_per_ask=5) + assert learner1.npoints >= 20 + + # Test with 2D learner + def f2d(xy): + x, y = xy + return x**2 + y**2 + + learner2 = Learner2D(f2d, ((-1, 1), (-1, 1))) + simple(learner2, npoints_goal=32, points_per_ask=8) + assert learner2.npoints >= 32 + + # Test that default behavior (points_per_ask=1) is preserved + learner3 = Learner1D(f, (-1, 1)) + simple(learner3, npoints_goal=15) + assert learner3.npoints >= 15 + + # Test performance improvement: more points per ask = fewer ask calls + ask_count = 0 + original_ask = Learner1D.ask + + def counting_ask(self, n, tell_pending=True): + nonlocal ask_count + ask_count += 1 + return original_ask(self, n, tell_pending) + + # Monkey patch to count ask calls + Learner1D.ask = counting_ask + + try: + # Test with points_per_ask=1 (default) + learner4 = Learner1D(f, (-1, 1)) + ask_count = 0 + simple(learner4, npoints_goal=10, points_per_ask=1) + ask_count_single = ask_count + + # Test with points_per_ask=5 + learner5 = Learner1D(f, (-1, 1)) + ask_count = 0 + simple(learner5, npoints_goal=10, points_per_ask=5) + ask_count_batch = ask_count + + # When asking for 5 points at a time, we should have fewer ask calls + assert ask_count_batch < ask_count_single + + # Both learners should have reached their goal + assert learner4.npoints >= 10 + assert learner5.npoints >= 10 + + finally: + # Restore original method + Learner1D.ask = original_ask diff --git a/adaptive/tests/test_skopt_learner.py b/adaptive/tests/test_skopt_learner.py deleted file mode 100644 index babb617c7..000000000 --- a/adaptive/tests/test_skopt_learner.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy as np -import pytest - -try: - from adaptive.learner.skopt_learner import SKOptLearner - - with_scikit_optimize = True -except ModuleNotFoundError: - with_scikit_optimize = False - - -@pytest.mark.skipif(not with_scikit_optimize, reason="scikit-optimize is not installed") -def test_skopt_learner_runs(): - """The SKOptLearner provides very few guarantees about its - behaviour, so we only test the most basic usage - """ - - def g(x, noise_level=0.1): - return np.sin(5 * x) * (1 - np.tanh(x**2)) + np.random.randn() * noise_level - - learner = SKOptLearner(g, dimensions=[(-2.0, 2.0)]) - - for _ in range(11): - (x,), _ = learner.ask(1) - learner.tell(x, learner.function(x)) - - -@pytest.mark.skipif(not with_scikit_optimize, reason="scikit-optimize is not installed") -def test_skopt_learner_4D_runs(): - """The SKOptLearner provides very few guarantees about its - behaviour, so we only test the most basic usage - In this case we test also for 4D domain - """ - - def g(x, noise_level=0.1): - return ( - np.sin(5 * (x[0] + x[1] + x[2] + x[3])) - * (1 - np.tanh(x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3] ** 2)) - + np.random.randn() * noise_level - ) - - learner = SKOptLearner( - g, dimensions=[(-2.0, 2.0), (-2.0, 2.0), (-2.0, 2.0), (-2.0, 2.0)] - ) - - for _ in range(11): - (x,), _ = learner.ask(1) - learner.tell(x, learner.function(x)) diff --git a/adaptive/types.py b/adaptive/types.py index 8f908e087..e2869d46e 100644 --- a/adaptive/types.py +++ b/adaptive/types.py @@ -1,17 +1,11 @@ -import sys -from typing import Union +from typing import TypeAlias import numpy as np -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - -Float: TypeAlias = Union[float, np.float_] -Bool: TypeAlias = Union[bool, np.bool_] -Int: TypeAlias = Union[int, np.int_] -Real: TypeAlias = Union[Float, Int] +Float: TypeAlias = float | np.float64 +Bool: TypeAlias = bool | np.bool_ +Int: TypeAlias = int | np.int_ +Real: TypeAlias = Float | Int __all__ = ["Float", "Bool", "Int", "Real"] diff --git a/adaptive/utils.py b/adaptive/utils.py index 7b2826a38..2a1680cac 100644 --- a/adaptive/utils.py +++ b/adaptive/utils.py @@ -7,13 +7,17 @@ import os import pickle import warnings -from collections.abc import Iterator, Sequence +from collections.abc import Awaitable, Callable, Iterator, Sequence from contextlib import contextmanager +from functools import wraps from itertools import product -from typing import Any, Callable +from typing import TYPE_CHECKING, Any, TypeVar import cloudpickle +if TYPE_CHECKING: + from dask.distributed import Client as AsyncDaskClient + def named_product(**items: Sequence[Any]): names = items.keys() @@ -161,3 +165,43 @@ def map(self, fn, *iterable, timeout=None, chunksize=1): def shutdown(self, wait=True): pass + + +def _cache_key(args: tuple[Any], kwargs: dict[str, Any]) -> str: + arg_strings = [str(a) for a in args] + kwarg_strings = [f"{k}={v}" for k, v in sorted(kwargs.items())] + return "_".join(arg_strings + kwarg_strings) + + +T = TypeVar("T") + + +def daskify( + client: AsyncDaskClient, cache: bool = False +) -> Callable[[Callable[..., T]], Callable[..., Awaitable[T]]]: + from dask import delayed + + def _daskify(func: Callable[..., T]) -> Callable[..., Awaitable[T]]: + if cache: + func.cache = {} # type: ignore[attr-defined] + + delayed_func = delayed(func) + + @wraps(func) + async def wrapper(*args: Any, **kwargs: Any) -> T: + if cache: + key = _cache_key(args, kwargs) # type: ignore[arg-type] + future = func.cache.get(key) # type: ignore[attr-defined] + + if future is None: + future = client.compute(delayed_func(*args, **kwargs)) + func.cache[key] = future # type: ignore[attr-defined] + else: + future = client.compute(delayed_func(*args, **kwargs)) + + result = await future + return result + + return wrapper + + return _daskify diff --git a/docs/environment.yml b/docs/environment.yml index 67298496c..caa8badcf 100644 --- a/docs/environment.yml +++ b/docs/environment.yml @@ -4,14 +4,12 @@ channels: - conda-forge dependencies: - - python=3.10 + - python=3.11 - sortedcollections=2.1.0 - - scikit-optimize=0.9.0 - - scikit-learn=1.2.2 - scipy=1.10.1 - - holoviews=1.15.4 - - bokeh=2.4.3 - - panel=0.14.4 + - holoviews=1.18.3 + - bokeh=3.3.4 + - panel=1.3.8 - pandas=2.0.0 - plotly=5.14.1 - ipywidgets=8.0.6 @@ -23,6 +21,8 @@ dependencies: - loky=3.3.0 - furo=2023.3.27 - myst-parser=0.18.1 - - dask=2023.3.2 + - dask=2024.2.0 - emoji=2.2.0 - versioningit=2.2.0 + - distributed=2024.2.0 + - param=2.0.2 diff --git a/docs/source/CHANGELOG.md b/docs/source/CHANGELOG.md index 699cc9e7b..03cb73106 120000 --- a/docs/source/CHANGELOG.md +++ b/docs/source/CHANGELOG.md @@ -1 +1 @@ -../../CHANGELOG.md \ No newline at end of file +../../CHANGELOG.md diff --git a/docs/source/algorithms_and_examples.md b/docs/source/algorithms_and_examples.md index eda3c2fff..0aff9f1fa 100644 --- a/docs/source/algorithms_and_examples.md +++ b/docs/source/algorithms_and_examples.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.5 + jupytext_version: 1.17.1 kernelspec: display_name: python3 name: python3 diff --git a/docs/source/benchmarks.md b/docs/source/benchmarks.md new file mode 100644 index 000000000..1e8cc4441 --- /dev/null +++ b/docs/source/benchmarks.md @@ -0,0 +1,515 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: adaptive + language: python + name: python3 +--- + +# Benchmarks + +```{tip} +This page is a Jupyter notebook that can be downloaded and run locally. [^download] +``` + +Adaptive sampling is a powerful technique for approximating functions with varying degrees of complexity across their domain. +This approach is particularly useful for functions with sharp features or rapid changes, as it focuses on calculating more points around those areas. +By concentrating points where they are needed most, adaptive sampling can provide an accurate representation of the function with fewer points compared to uniform sampling. +This results in both faster convergence and a more accurate representation of the function. + +On this benchmark showcase, we will explore the effectiveness of adaptive sampling for various 1D and 2D functions, including sharp peaks, Gaussian, sinusoidal, exponential decay, and Lorentzian functions. +We will also present benchmarking results to highlight the advantages (and disadvantages) of adaptive sampling over uniform sampling in terms of an error ratio, which is the ratio of uniform error to learner error (see the note about the error below). + +Look below where we demonstrate the use of the adaptive package to perform adaptive sampling and visualize the results. +By the end of this benchmarking showcase, you should better understand of the benefits of adaptive sampling and in which cases you could to apply this technique to your own simulations or functions. + +```{note} +> Note on error estimates + +The error is estimated using the L1 norm of the difference between the true function values and the interpolated values. Here's a step-by-step explanation of how the error is calculated: + +1. For each benchmark function, two learners are created: the adaptive learner and a homogeneous learner. The adaptive learner uses adaptive sampling, while the homogeneous learner uses a uniform grid of points. + +2. After the adaptive learning is complete, the error is calculated by comparing the interpolated values obtained from the adaptive learner to the true function values evaluated at the points used by the homogeneous learner. + +3. To calculate the error, the L1 norm is used. The L1 norm represents the average of the absolute differences between the true function values and the interpolated values. Specifically, it is calculated as the square root of the mean of the squared differences between the true function values and the interpolated values. + +Note that the choice of the L1 norm is somewhat arbitrary. +**Please judge the results for yourself by looking at the plots** and observe the significantly better function approximation obtained by the adaptive learner. +``` + +```{warning} +> Note on benchmark functions + +The benchmark functions used in this tutorial are analytical and cheap to evaluate. +In real-world applications ([see the gallery](gallery)), adaptive sampling is often more beneficial for expensive simulations where function evaluations are computationally demanding or time-consuming. +``` + +## Benchmarks 1D + +```{code-cell} ipython3 +:tags: [hide-cell] + +from __future__ import annotations + +import itertools + +import holoviews as hv +import numpy as np +import pandas as pd +from scipy.interpolate import interp1d + +import adaptive + +adaptive.notebook_extension() + +benchmarks = {} +benchmarks_2d = {} + + +def homogeneous_learner(learner): + if isinstance(learner, adaptive.Learner1D): + xs = np.linspace(*learner.bounds, learner.npoints) + homo_learner = adaptive.Learner1D(learner.function, learner.bounds) + homo_learner.tell_many(xs, learner.function(xs)) + else: + homo_learner = adaptive.Learner2D(learner.function, bounds=learner.bounds) + n = int(learner.npoints**0.5) + xs, ys = (np.linspace(*bounds, n) for bounds in learner.bounds) + xys = list(itertools.product(xs, ys)) + zs = map(homo_learner.function, xys) + homo_learner.tell_many(xys, zs) + return homo_learner + + +def plot(learner, other_learner): + if isinstance(learner, adaptive.Learner1D): + return learner.plot() + other_learner.plot() + else: + n = int(learner.npoints**0.5) + return ( + ( + other_learner.plot(n).relabel("Homogeneous grid") + + learner.plot().relabel("With adaptive") + + other_learner.plot(n, tri_alpha=0.4) + + learner.plot(tri_alpha=0.4) + ) + .cols(2) + .options(hv.opts.EdgePaths(color="w")) + ) + + +def err(ys, ys_other): + abserr = np.abs(ys - ys_other) + return np.average(abserr**2) ** 0.5 + + +def l1_norm_error(learner, other_learner): + if isinstance(learner, adaptive.Learner1D): + ys_interp = interp1d(*learner.to_numpy().T) + xs, _ = other_learner.to_numpy().T + ys = ys_interp(xs) # interpolate the other learner's points + _, ys_other = other_learner.to_numpy().T + return err(ys, ys_other) + else: + xys = other_learner.to_numpy()[:, :2] + zs = learner.function(xys.T) + interpolator = learner.interpolator() + zs_interp = interpolator(xys) + # Compute the L1 norm error between the true function and the interpolator + return err(zs_interp, zs) + + +def run_and_plot(learner, **goal): + adaptive.runner.simple(learner, **goal) + homo_learner = homogeneous_learner(learner) + bms = benchmarks if isinstance(learner, adaptive.Learner1D) else benchmarks_2d + bm = { + "npoints": learner.npoints, + "error": l1_norm_error(learner, homo_learner), + "uniform_error": l1_norm_error(homo_learner, learner), + } + bm["error_ratio"] = bm["uniform_error"] / bm["error"] + bms[learner.function.__name__] = bm + display(pd.DataFrame([bm])) # noqa: F821 + return plot(learner, homo_learner).relabel( + f"{learner.function.__name__} function with {learner.npoints} points" + ) + + +def to_df(benchmarks): + df = pd.DataFrame(benchmarks).T + df.sort_values("error_ratio", ascending=False, inplace=True) + return df + + +def plot_benchmarks(df, max_ratio: float = 1000, *, log_scale: bool = True): + import matplotlib.pyplot as plt + import numpy as np + + df_hist = df.copy() + + # Replace infinite values with 1000 + df_hist.loc[np.isinf(df_hist.error_ratio), "error_ratio"] = max_ratio + + # Convert the DataFrame index (function names) into a column + df_hist.reset_index(inplace=True) + df_hist.rename(columns={"index": "function_name"}, inplace=True) + + # Create a list of colors based on the error_ratio values + bar_colors = ["green" if x > 1 else "red" for x in df_hist["error_ratio"]] + + # Create the bar chart + plt.figure(figsize=(12, 6)) + plt.bar(df_hist["function_name"], df_hist["error_ratio"], color=bar_colors) + + # Add a dashed horizontal line at 1 + plt.axhline(y=1, linestyle="--", color="gray", linewidth=1) + + if log_scale: + # Set the y-axis to log scale + plt.yscale("log") + + # Customize the plot + plt.xlabel("Function Name") + plt.ylabel("Error Ratio (uniform Error / Learner Error)") + plt.title("Error Ratio Comparison for Different Functions") + plt.xticks(rotation=45) + + # Show the plot + plt.show() +``` + +1. **Sharp peak function**: + +In the case of the sharp peak function, adaptive sampling performs very well because it can capture the peak by calculating more points around it, while still accurately representing the smoother regions of the function with fewer points. + +```{code-cell} ipython3 +def peak(x, offset=0.123): + a = 0.01 + return x + a**2 / (a**2 + (x - offset) ** 2) + + +learner = adaptive.Learner1D(peak, bounds=(-1, 1)) +run_and_plot(learner, loss_goal=0.1) +``` + +2. **Gaussian function**: + +For smoother functions, like the Gaussian function, adaptive sampling may not provide a significant advantage over uniform sampling. +Nonetheless, the algorithm still focuses on areas of the function that have more rapid changes, but the improvement over uniform sampling might be less noticeable. + +```{code-cell} ipython3 +def gaussian(x, mu=0, sigma=0.5): + return (1 / np.sqrt(2 * np.pi * sigma**2)) * np.exp( + -((x - mu) ** 2) / (2 * sigma**2) + ) + + +learner = adaptive.Learner1D(gaussian, bounds=(-5, 5)) +run_and_plot(learner, loss_goal=0.1) +``` + +3. **Sinusoidal function**: + +The sinusoidal function is another example of a smoother function where adaptive sampling doesn't provide a substantial advantage over uniform sampling. + +```{code-cell} ipython3 +def sinusoidal(x, amplitude=1, frequency=1, phase=0): + return amplitude * np.sin(frequency * x + phase) + + +learner = adaptive.Learner1D(sinusoidal, bounds=(-2 * np.pi, 2 * np.pi)) +run_and_plot(learner, loss_goal=0.1) +``` + +4. **Exponential decay function**: + +Adaptive sampling can be useful for the exponential decay function, as it focuses on the steeper part of the curve and allocates fewer points to the flatter region. + +```{code-cell} ipython3 +def exponential_decay(x, tau=1): + return np.exp(-x / tau) + + +learner = adaptive.Learner1D(exponential_decay, bounds=(0, 5)) +run_and_plot(learner, loss_goal=0.1) +``` + +5. **Lorentzian function**: + +The Lorentzian function is another example of a function with a sharp peak. +Adaptive sampling performs well in this case, as it concentrates points around the peak while calculating fewer points to the smoother regions of the function. + +```{code-cell} ipython3 +def lorentzian(x, x0=0, gamma=0.3): + return (1 / np.pi) * (gamma / 2) / ((x - x0) ** 2 + (gamma / 2) ** 2) + + +learner = adaptive.Learner1D(lorentzian, bounds=(-5, 5)) +run_and_plot(learner, loss_goal=0.1) +``` + +6. **Sinc function**: + +The sinc function has oscillatory behavior with varying amplitude. +Adaptive sampling is helpful in this case, as it can allocate more points around the oscillations, effectively capturing the shape of the function. + +```{code-cell} ipython3 +def sinc(x): + return np.sinc(x / np.pi) + + +learner = adaptive.Learner1D(sinc, bounds=(-10, 10)) +run_and_plot(learner, loss_goal=0.1) +``` + +7. **Step function (Heaviside)**: + +In the case of the step function, adaptive sampling efficiently allocates more points around the discontinuity, providing an accurate representation of the function. + +```{code-cell} ipython3 +import numpy as np + + +def step(x, x0=0): + return np.heaviside(x - x0, 0.5) + + +learner = adaptive.Learner1D(step, bounds=(-5, 5)) +run_and_plot(learner, npoints_goal=20) +``` + +8. **Damped oscillation function**: + +The damped oscillation function has both oscillatory behavior and a decay component. +Adaptive sampling can effectively capture the behavior of this function, calculating more points around the oscillations while using fewer points in the smoother regions. + +```{code-cell} ipython3 +def damped_oscillation(x, a=1, omega=1, gamma=0.1): + return a * np.exp(-gamma * x) * np.sin(omega * x) + + +learner = adaptive.Learner1D(damped_oscillation, bounds=(-10, 10)) +run_and_plot(learner, loss_goal=0.1) +``` + +9. **Bump function (smooth function with compact support)**: + +For the bump function, adaptive sampling concentrates points around the region of the bump, efficiently capturing its shape and calculating fewer points in the flatter regions. + +```{code-cell} ipython3 +def bump(x, a=1, x0=0, s=0.5): + z = (x - x0) / s + return np.where(np.abs(z) < 1, a * np.exp(-1 / (1 - z**2)), 0) + + +learner = adaptive.Learner1D(bump, bounds=(-5, 5)) +run_and_plot(learner, loss_goal=0.1) +``` + +### Results + ++++ + +In summary, adaptive sampling is a powerful approach for approximating functions with sharp features or varying degrees of complexity across their domain. +It can efficiently allocate points where they are needed most, providing an accurate representation of the function while reducing the total number of points required. +For smoother functions, adaptive sampling still focuses on areas with more rapid changes but may not provide significant advantages over uniform sampling. + +```{code-cell} ipython3 +df = to_df(benchmarks) +df +``` + +```{code-cell} ipython3 +plot_benchmarks(df) +``` + +## Benchmarks 2D + ++++ + +1. **Sharp ring**: + +This function has a ring structure in 2D. + +```{code-cell} ipython3 +def ring(xy, a=0.2): + x, y = xy + return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) + + +learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) +run_and_plot(learner, npoints_goal=1000) +``` + +1. **Gaussian surface**: +The Gaussian surface is a smooth, bell-shaped function in 2D. +It has a peak at the mean (mu) and spreads out with increasing standard deviation (sigma). +Adaptive sampling works well in this case because it can focus on the region around the peak where the function changes rapidly, while using fewer points in the flatter regions where the function changes slowly. + +```{code-cell} ipython3 +def gaussian_surface(xy, mu=(0, 0), sigma=(1, 1)): + x, y = xy + mu_x, mu_y = mu + sigma_x, sigma_y = sigma + return (1 / (2 * np.pi * sigma_x * sigma_y)) * np.exp( + -((x - mu_x) ** 2 / (2 * sigma_x**2) + (y - mu_y) ** 2 / (2 * sigma_y**2)) + ) + + +learner = adaptive.Learner2D(gaussian_surface, bounds=[(-5, 5), (-5, 5)]) +run_and_plot(learner, loss_goal=0.01) +``` + +2. **Sinusoidal surface**: +The sinusoidal surface is a product of two sinusoidal functions in the x and y directions. +The surface has a regular pattern of peaks and valleys. +Adaptive sampling works well in this case because it can adapt to the frequency of the sinusoidal pattern and allocate more points to areas with higher curvature, ensuring an accurate representation of the function. + +```{code-cell} ipython3 +def sinusoidal_surface(xy, amplitude=1, frequency=(0.3, 3)): + x, y = xy + freq_x, freq_y = frequency + return amplitude * np.sin(freq_x * x) * np.sin(freq_y * y) + + +learner = adaptive.Learner2D( + sinusoidal_surface, bounds=[(-2 * np.pi, 2 * np.pi), (-2 * np.pi, 2 * np.pi)] +) +run_and_plot(learner, loss_goal=0.01) +``` + +```{code-cell} ipython3 +def circular_peak(xy, x0=0, y0=0, a=0.01): + x, y = xy + r = np.sqrt((x - x0) ** 2 + (y - y0) ** 2) + return r + a**2 / (a**2 + r**2) + + +learner = adaptive.Learner2D(circular_peak, bounds=[(-1, 1), (-1, 1)]) +run_and_plot(learner, loss_goal=0.01) +``` + +4. **Paraboloid**: + +The paraboloid is a smooth, curved surface defined by a quadratic function in the x and y directions. +Adaptive sampling is less beneficial for this function compared to functions with sharp features, as the curvature is relatively constant across the entire surface. +However, the adaptive algorithm can still provide a good representation of the paraboloid with fewer points than a uniform grid. + +```{code-cell} ipython3 +def paraboloid(xy, a=1, b=1): + x, y = xy + return a * x**2 + b * y**2 + + +learner = adaptive.Learner2D(paraboloid, bounds=[(-5, 5), (-5, 5)]) +run_and_plot(learner, loss_goal=0.01) +``` + +5. **Cross-shaped function**: + +This function has a cross-shaped structure in 2D. + +```{code-cell} ipython3 +def cross(xy, a=0.2): + x, y = xy + return np.exp(-(x**2 + y**2) / a**2) * ( + np.cos(4 * np.pi * x) + np.cos(4 * np.pi * y) + ) + + +learner = adaptive.Learner2D(cross, bounds=[(-1, 1), (-1, 1)]) +run_and_plot(learner, npoints_goal=1000) +``` + +6. **Mexican hat function (Ricker wavelet)**: + +This function has a central peak surrounded by a circular trough. + +```{code-cell} ipython3 +def mexican_hat(xy, a=1): + x, y = xy + r2 = x**2 + y**2 + return a * (1 - r2) * np.exp(-r2 / 2) + + +learner = adaptive.Learner2D(mexican_hat, bounds=[(-2, 2), (-2, 2)]) +run_and_plot(learner, npoints_goal=1000) +``` + +7. **Saddle surface**: + +This function has a saddle shape with increasing curvature along the diagonal. + +```{code-cell} ipython3 +def saddle(xy, a=1, b=1): + x, y = xy + return a * x**2 - b * y**2 + + +learner = adaptive.Learner2D(saddle, bounds=[(-2, 2), (-2, 2)]) +run_and_plot(learner, npoints_goal=1000) +``` + +8. **Steep linear ramp**: + +This function has a steep linear ramp in a narrow region. + +```{code-cell} ipython3 +def steep_ramp(xy, width=0.1): + x, y = xy + result = np.where((-width / 2 < x) & (x < width / 2), 10 * x + y, y) + return result + + +learner = adaptive.Learner2D(steep_ramp, bounds=[(-1, 1), (-1, 1)]) +run_and_plot(learner, loss_goal=0.005) +``` + +9. **Localized sharp peak**: + +This function has a sharp peak in a small localized area. + +```{code-cell} ipython3 +def localized_sharp_peak(xy, x0=0, y0=0, a=0.01): + x, y = xy + r = np.sqrt((x - x0) ** 2 + (y - y0) ** 2) + return r + a**4 / (a**4 + r**4) + + +learner = adaptive.Learner2D(localized_sharp_peak, bounds=[(-1, 1), (-1, 1)]) +run_and_plot(learner, loss_goal=0.01) +``` + +10. **Ridge function**: + +A function with a narrow ridge along the x-axis, which can be controlled by a parameter `b`. + +```{code-cell} ipython3 +def ridge_function(xy, b=100): + x, y = xy + return np.exp(-b * y**2) * np.sin(x) + + +learner = adaptive.Learner2D(ridge_function, bounds=[(-2, 2), (-1, 1)]) +run_and_plot(learner, loss_goal=0.01) +``` + +### Results + +```{code-cell} ipython3 +df = to_df(benchmarks_2d) +df[["npoints", "error_ratio"]] +``` + +```{code-cell} ipython3 +plot_benchmarks(df) +``` + +[^download]: This notebook can be downloaded as **{nb-download}`benchmarks.ipynb`** and {download}`benchmarks.md`. diff --git a/docs/source/index.md b/docs/source/index.md index aff03a56a..b1bfe8977 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -41,6 +41,7 @@ self algorithms_and_examples docs tutorial/tutorial +benchmarks gallery reference/adaptive CHANGELOG diff --git a/docs/source/reference/adaptive.learner.skopt_learner.md b/docs/source/reference/adaptive.learner.skopt_learner.md deleted file mode 100644 index d02da3dbe..000000000 --- a/docs/source/reference/adaptive.learner.skopt_learner.md +++ /dev/null @@ -1,8 +0,0 @@ -# adaptive.SKOptLearner - -```{eval-rst} -.. autoclass:: adaptive.SKOptLearner - :members: - :undoc-members: - :show-inheritance: -``` diff --git a/docs/source/tutorial/tutorial.IntegratorLearner.md b/docs/source/tutorial/tutorial.IntegratorLearner.md index 50aaf2e5b..12b86e090 100644 --- a/docs/source/tutorial/tutorial.IntegratorLearner.md +++ b/docs/source/tutorial/tutorial.IntegratorLearner.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.5 + jupytext_version: 1.14.7 kernelspec: display_name: python3 name: python3 @@ -86,9 +86,7 @@ if not runner.task.done(): ```{code-cell} ipython3 print( - "The integral value is {} with the corresponding error of {}".format( - learner.igral, learner.err - ) + f"The integral value is {learner.igral} with the corresponding error of {learner.err}" ) learner.plot() ``` diff --git a/docs/source/tutorial/tutorial.LearnerND.md b/docs/source/tutorial/tutorial.LearnerND.md index 46f948708..37705b79d 100644 --- a/docs/source/tutorial/tutorial.LearnerND.md +++ b/docs/source/tutorial/tutorial.LearnerND.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.5 + jupytext_version: 1.17.1 kernelspec: display_name: python3 name: python3 diff --git a/docs/source/tutorial/tutorial.SKOptLearner.md b/docs/source/tutorial/tutorial.SKOptLearner.md deleted file mode 100644 index 49a5340a8..000000000 --- a/docs/source/tutorial/tutorial.SKOptLearner.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -kernelspec: - name: python3 - display_name: python3 -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 ---- -# Tutorial {class}`~adaptive.SKOptLearner` - -```{note} -Because this documentation consists of static html, the `live_plot` and `live_info` widget is not live. -Download the notebook in order to see the real behaviour. [^download] -``` - -```{code-cell} ipython3 -:tags: [hide-cell] - -import adaptive - -adaptive.notebook_extension() - -import holoviews as hv -import numpy as np -``` - -We have wrapped the `Optimizer` class from [scikit-optimize](https://github.com/scikit-optimize/scikit-optimize), to show how existing libraries can be integrated with `adaptive`. - -The {class}`~adaptive.SKOptLearner` attempts to “optimize” the given function `g` (i.e. find the global minimum of `g` in the window of interest). - -Here we use the same example as in the `scikit-optimize` [tutorial](https://github.com/scikit-optimize/scikit-optimize/blob/master/examples/ask-and-tell.ipynb). -Although `SKOptLearner` can optimize functions of arbitrary dimensionality, we can only plot the learner if a 1D function is being learned. - -```{code-cell} ipython3 -def F(x, noise_level=0.1): - return np.sin(5 * x) * (1 - np.tanh(x**2)) + np.random.randn() * noise_level -``` - -```{code-cell} ipython3 -learner = adaptive.SKOptLearner( - F, - dimensions=[(-2.0, 2.0)], - base_estimator="GP", - acq_func="gp_hedge", - acq_optimizer="lbfgs", -) -runner = adaptive.Runner(learner, ntasks=1, npoints_goal=40) -``` - -```{code-cell} ipython3 -:tags: [hide-cell] - -await runner.task # This is not needed in a notebook environment! -``` - -```{code-cell} ipython3 -runner.live_info() -``` - -```{code-cell} ipython3 -xs = np.linspace(*learner.space.bounds[0]) -to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label="to learn") - -plot = runner.live_plot().relabel("prediction", depth=2) * to_learn -plot.opts(legend_position="top") -``` - -[^download]: This notebook can be downloaded as **{nb-download}`tutorial.SKOptLearner.ipynb`** and {download}`tutorial.SKOptLearner.md`. diff --git a/docs/source/tutorial/tutorial.advanced-topics.md b/docs/source/tutorial/tutorial.advanced-topics.md index 2dfc6cf29..8ba0caf40 100644 --- a/docs/source/tutorial/tutorial.advanced-topics.md +++ b/docs/source/tutorial/tutorial.advanced-topics.md @@ -9,7 +9,7 @@ kernelspec: display_name: python3 name: python3 --- - +(TutorialAdvancedTopics)= # Advanced Topics ```{note} @@ -365,22 +365,19 @@ await runner.task # This is not needed in a notebook environment! # The result will only be set when the runner is done. timer.result() ``` - +(CustomParallelization)= ## Custom parallelization using coroutines Adaptive by itself does not implement a way of sharing partial results between function executions. Instead its implementation of parallel computation using executors is minimal by design. The appropriate way to implement custom parallelization is by using coroutines (asynchronous functions). + We illustrate this approach by using `dask.distributed` for parallel computations in part because it supports asynchronous operation out-of-the-box. -Let us consider a function `f(x)` which is composed by two parts: -a slow part `g` which can be reused by multiple inputs and shared across function evaluations and a fast part `h` that will be computed for every `x`. +We will focus on a function `f(x)` that consists of two distinct components: a slow part `g` that can be reused across multiple inputs and shared among various function evaluations, and a fast part `h` that is calculated for each `x` value. ```{code-cell} ipython3 -import time - - -def f(x): +def f(x): # example function without caching """ Integer part of `x` repeats and should be reused Decimal part requires a new computation @@ -390,7 +387,9 @@ def f(x): def g(x): """Slow but reusable function""" - time.sleep(random.randrange(5)) + from time import sleep + + sleep(random.randrange(5)) return x**2 @@ -399,12 +398,59 @@ def h(x): return x**3 ``` +### Using `adaptive.utils.daskify` + +To simplify the process of using coroutines and caching with dask and Adaptive, we provide the {func}`adaptive.utils.daskify` decorator. This decorator can be used to parallelize functions with caching as well as functions without caching, making it a powerful tool for custom parallelization in Adaptive. + +```{code-cell} ipython3 +from dask.distributed import Client + +import adaptive + +client = await Client(asynchronous=True) + + +# The g function has caching enabled +g_dask = adaptive.utils.daskify(client, cache=True)(g) + +# Can be used like a decorator too: +# >>> @adaptive.utils.daskify(client, cache=True) +# ... def g(x): ... + +# The h function does not use caching +h_dask = adaptive.utils.daskify(client)(h) + +# Now we need to rewrite `f(x)` to use `g` and `h` as coroutines + + +async def f_parallel(x): + g_result = await g_dask(int(x)) + h_result = await h_dask(x % 1) + return (g_result + h_result) ** 2 + + +learner = adaptive.Learner1D(f_parallel, bounds=(-3.5, 3.5)) +runner = adaptive.AsyncRunner(learner, loss_goal=0.01, ntasks=20) +runner.live_info() +``` + +Finally, we wait for the runner to finish, and then plot the result. + +```{code-cell} ipython3 +await runner.task +learner.plot() +``` + +### Step-by-step explanation of custom parallelization + +Now let's dive into a detailed explanation of the process to understand how the {func}`adaptive.utils.daskify` decorator works. + In order to combine reuse of values of `g` with adaptive, we need to convert `f` into a dask graph by using `dask.delayed`. ```{code-cell} ipython3 from dask import delayed -# Convert g and h to dask.Delayed objects +# Convert g and h to dask.Delayed objects, such that they run in the Client g, h = delayed(g), delayed(h) @@ -441,7 +487,7 @@ learner = adaptive.Learner1D(f_parallel, bounds=(-3.5, 3.5)) runner = adaptive.AsyncRunner(learner, loss_goal=0.01, ntasks=20) ``` -Finally we await for the runner to finish, and then plot the result. +Finally we wait for the runner to finish, and then plot the result. ```{code-cell} ipython3 await runner.task diff --git a/docs/source/tutorial/tutorial.md b/docs/source/tutorial/tutorial.md index 9813c25d8..a3f79970d 100644 --- a/docs/source/tutorial/tutorial.md +++ b/docs/source/tutorial/tutorial.md @@ -37,7 +37,6 @@ tutorial.IntegratorLearner tutorial.LearnerND tutorial.AverageLearner1D tutorial.SequenceLearner -tutorial.SKOptLearner tutorial.parallelism tutorial.advanced-topics ``` diff --git a/docs/source/tutorial/tutorial.parallelism.md b/docs/source/tutorial/tutorial.parallelism.md index ef0963a3a..5decc61d5 100644 --- a/docs/source/tutorial/tutorial.parallelism.md +++ b/docs/source/tutorial/tutorial.parallelism.md @@ -57,6 +57,8 @@ runner.live_info() runner.live_plot(update_interval=0.1) ``` +Also check out the {ref}`Custom parallelization` section in the {ref}`advanced topics tutorial` for more control over caching and parallelization. + ## `mpi4py.futures.MPIPoolExecutor` This makes sense if you want to run a `Learner` on a cluster non-interactively using a job script. @@ -87,7 +89,7 @@ if __name__ == "__main__": runner.start_periodic_saving(dict(fname=fname), interval=600) # block until runner goal reached - runner.ioloop.run_until_complete(runner.task) + runner.block_until_done() # save one final time before exiting learner.save(fname) diff --git a/environment.yml b/environment.yml index 3ae91c491..1dfb8c86d 100644 --- a/environment.yml +++ b/environment.yml @@ -4,7 +4,7 @@ channels: - conda-forge dependencies: - - python=3.9 + - python=3.13 - sortedcontainers - sortedcollections - scipy @@ -16,6 +16,4 @@ dependencies: - loky - jupyter_client>=5.2.2 - ipywidgets - - scikit-optimize>=0.8.1 - - scikit-learn<=0.24.2 # https://github.com/scikit-optimize/scikit-optimize/issues/1059 - plotly diff --git a/example-notebook.ipynb b/example-notebook.ipynb index af6fa1888..429239c6a 100644 --- a/example-notebook.ipynb +++ b/example-notebook.ipynb @@ -23,16 +23,15 @@ "metadata": {}, "outputs": [], "source": [ - "import adaptive\n", - "\n", - "adaptive.notebook_extension()\n", - "\n", "import random\n", "from functools import partial\n", "\n", - "# Import modules that are used in multiple cells\n", "import holoviews as hv\n", - "import numpy as np" + "import numpy as np\n", + "\n", + "import adaptive\n", + "\n", + "adaptive.notebook_extension()" ] }, { @@ -489,8 +488,7 @@ " print(\"WARINING: The runner hasn't reached it goal yet!\")\n", "\n", "print(\n", - " f\"The integral value is {learner.igral} \"\n", - " f\"with a corresponding error of {learner.err}\"\n", + " f\"The integral value is {learner.igral} with a corresponding error of {learner.err}\"\n", ")\n", "learner.plot()" ] @@ -971,64 +969,6 @@ "learner.extra_data" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# `Scikit-Optimize`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have wrapped the `Optimizer` class from [`scikit-optimize`](https://github.com/scikit-optimize/scikit-optimize), to show how existing libraries can be integrated with `adaptive`.\n", - "\n", - "The `SKOptLearner` attempts to \"optimize\" the given function `g` (i.e. find the global minimum of `g` in the window of interest).\n", - "\n", - "Here we use the same example as in the `scikit-optimize` [tutorial](https://github.com/scikit-optimize/scikit-optimize/blob/master/examples/ask-and-tell.ipynb). Although `SKOptLearner` can optimize functions of arbitrary dimensionality, we can only plot the learner if a 1D function is being learned." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def F(x, noise_level=0.1):\n", - " return np.sin(5 * x) * (1 - np.tanh(x**2)) + np.random.randn() * noise_level" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "learner = adaptive.SKOptLearner(\n", - " F,\n", - " dimensions=[(-2.0, 2.0)],\n", - " base_estimator=\"GP\",\n", - " acq_func=\"gp_hedge\",\n", - " acq_optimizer=\"lbfgs\",\n", - ")\n", - "runner = adaptive.Runner(learner, ntasks=1, npoints_goal=40)\n", - "runner.live_info()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "xs = np.linspace(*learner.space.bounds[0])\n", - "to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label=\"to learn\")\n", - "\n", - "plot = runner.live_plot().relabel(\"prediction\", depth=2) * to_learn\n", - "plot.opts(legend_position=\"top\")" - ] - }, { "cell_type": "markdown", "metadata": { diff --git a/noxfile.py b/noxfile.py index 1866a94a6..71a2217a4 100644 --- a/noxfile.py +++ b/noxfile.py @@ -1,26 +1,45 @@ +"""Nox configuration file.""" + +import os + import nox +nox.options.default_venv_backend = "uv" + +python = ["3.11", "3.12", "3.13"] +num_cpus = os.cpu_count() or 1 +xdist = ("-n", "auto") if num_cpus > 2 else () + + +@nox.session(python=python) +def pytest_min_deps(session: nox.Session) -> None: + """Run pytest with no optional dependencies.""" + session.install(".[test]") + session.run("coverage", "erase") + session.run("pytest", *xdist) + -@nox.session(python=["3.9", "3.10", "3.11"]) -@nox.parametrize("all_deps", [True, False]) -def pytest(session, all_deps): - session.install(".[testing,other]" if all_deps else ".[testing]") +@nox.session(python=python) +def pytest_all_deps(session: nox.Session) -> None: + """Run pytest with "other" optional dependencies.""" + session.install(".[test,other]") session.run("coverage", "erase") - session.run("pytest") + session.run("pytest", *xdist) -@nox.session(python="3.11") -def pytest_typeguard(session): - session.install(".[testing,other]") +@nox.session(python="3.13") +def pytest_typeguard(session: nox.Session) -> None: + """Run pytest with typeguard.""" + session.install(".[test,other]") session.run("coverage", "erase") - session.run("pytest", "--typeguard-packages=adaptive") + session.run("pytest", "--typeguard-packages=adaptive", *xdist) -@nox.session(python="3.11") -def coverage(session): - session.install("coverage") - session.install(".[testing,other]") - session.run("pytest") +@nox.session(python="3.13") +def coverage(session: nox.Session) -> None: + """Generate coverage report.""" + session.install(".[test,other]") + session.run("pytest", *xdist) session.run("coverage", "report") session.run("coverage", "xml") diff --git a/pyproject.toml b/pyproject.toml index f0ea8eddf..5c7186e98 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] build-backend = "setuptools.build_meta" -requires = ["setuptools ~= 65.0.0", "versioningit ~= 2.2.0", "wheel"] +requires = ["setuptools ~= 69.0.0", "versioningit ~= 3.0.0", "wheel"] [project] name = "adaptive" @@ -8,14 +8,14 @@ dynamic = ["version"] description = "Parallel active learning of mathematical functions" maintainers = [{ name = "Adaptive authors" }] license = { text = "BSD" } -requires-python = ">=3.9" +requires-python = ">=3.11" classifiers = [ "Development Status :: 4 - Beta", "License :: OSI Approved :: BSD License", "Intended Audience :: Science/Research", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ] dependencies = [ "scipy", @@ -23,7 +23,6 @@ dependencies = [ "sortedcontainers >= 2.0", "cloudpickle", "loky >= 2.9", - "typing_extensions; python_version < '3.10'", "versioningit", ] @@ -31,16 +30,14 @@ dependencies = [ other = [ "dill", "distributed", - "ipyparallel>=6.2.5", # because of https://github.com/ipython/ipyparallel/issues/404 - "scikit-optimize>=0.8.1", # because of https://github.com/scikit-optimize/scikit-optimize/issues/931 - "scikit-learn", + "ipyparallel>=6.2.5", # because of https://github.com/ipython/ipyparallel/issues/404 "wexpect; os_name == 'nt'", "pexpect; os_name != 'nt'", ] notebook = [ "ipython", - "ipykernel>=4.8.0", # because https://github.com/ipython/ipykernel/issues/274 and https://github.com/ipython/ipykernel/issues/263 - "jupyter_client>=5.2.2", # because https://github.com/jupyter/jupyter_client/pull/314 + "ipykernel>=4.8.0", # because https://github.com/ipython/ipykernel/issues/274 and https://github.com/ipython/ipykernel/issues/263 + "jupyter_client>=5.2.2", # because https://github.com/jupyter/jupyter_client/pull/314 "holoviews>=1.9.1", "ipywidgets", "bokeh", @@ -48,15 +45,18 @@ notebook = [ "matplotlib", "plotly", ] -testing = [ +test = [ "flaky", "pytest", "pytest-cov", "pytest-randomly", "pytest-timeout", + "pytest-xdist", "pre_commit", "typeguard", + "coverage", ] +dev = ["adaptive[test,nox,notebook,other]"] [project.urls] homepage = "https://adaptive.readthedocs.io/" @@ -67,6 +67,12 @@ repository = "https://github.com/python-adaptive/adaptive" content-type = "text/markdown" file = "README.md" +[dependency-groups] +nox = [ + "nox", + "pytest-github-actions-annotate-failures", +] + [tool.setuptools.packages.find] include = ["adaptive.*", "adaptive"] @@ -92,11 +98,13 @@ output = ".coverage.xml" [tool.mypy] ignore_missing_imports = true -python_version = "3.9" +python_version = "3.11" [tool.ruff] -line-length = 150 -target-version = "py39" +line-length = 88 +target-version = "py311" + +[tool.ruff.lint] select = ["B", "C", "E", "F", "W", "T", "B9", "I", "UP"] ignore = [ "T20", # flake8-print @@ -107,16 +115,18 @@ ignore = [ "D402", # First line should not be the function's signature "PLW0603", # Using the global statement to update `X` is discouraged "D401", # First line of docstring should be in imperative mood + "E501", # Line too long + "B905", # `zip()` without an explicit `strict=` parameter ] -[tool.ruff.per-file-ignores] +[tool.ruff.lint.mccabe] +max-complexity = 18 + +[tool.ruff.lint.per-file-ignores] "tests/*" = ["SLF001"] "ci/*" = ["INP001"] "tests/test_examples.py" = ["E501"] -[tool.ruff.mccabe] -max-complexity = 18 - [tool.versioningit] [tool.versioningit.vcs] diff --git a/readthedocs.yml b/readthedocs.yml index 23fab10c6..dba656e3c 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -3,7 +3,14 @@ version: 2 build: os: "ubuntu-22.04" tools: - python: "mambaforge-4.10" + python: "mambaforge-latest" + jobs: + post_checkout: + - git fetch --unshallow || true + +sphinx: + configuration: docs/source/conf.py + builder: dirhtml conda: environment: docs/environment.yml