From 9d2b4de017aed592a8bf2a773930d12cf19cd7ea Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 31 Jul 2023 10:25:41 -0600 Subject: [PATCH 001/120] REL: Begin NumPy 1.26.0 development From f2b82bf0c9148b930856bed4b94407933c41b7db Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 31 Jul 2023 12:07:13 -0600 Subject: [PATCH 002/120] MAINT: Prepare 1.26.x branch for development (#24305) [skip ci] --- doc/source/release.rst | 1 + doc/source/release/1.26.0-notes.rst | 12 ++++++++++++ numpy/core/code_generators/cversions.txt | 1 + pavement.py | 2 +- 4 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 doc/source/release/1.26.0-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index ef1d35a74d2b..cef3a6d0510c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 3 + 1.26.0 1.25.2 1.25.1 1.25.0 diff --git a/doc/source/release/1.26.0-notes.rst b/doc/source/release/1.26.0-notes.rst new file mode 100644 index 000000000000..c160706d6878 --- /dev/null +++ b/doc/source/release/1.26.0-notes.rst @@ -0,0 +1,12 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.26.0 Release Notes +========================== + +The NumPy 1.26.0 release is a continuation of the 1.25.x release cycle, but +with the distutils based build replaced by meson in order to work with Python +3.12. + +The Python versions supported in this release are 3.9-3.12. + diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index e52193d7a462..651d0fe6259e 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -70,4 +70,5 @@ 0x00000010 = 04a7bf1e65350926a0e528798da263c0 # Version 17 (NumPy 1.25) No actual change. +# Version 17 (NumPy 1.26) No change. 0x00000011 = ca1aebdad799358149567d9d93cbca09 diff --git a/pavement.py b/pavement.py index 9704b9ce067c..585194f39647 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/1.25.2-notes.rst' +RELEASE_NOTES = 'doc/source/release/1.26.0-notes.rst' #------------------------------------------------------- From b46eef066dd672f3c3e2db88eba5b75a5acfd882 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 3 Aug 2023 02:01:19 -0600 Subject: [PATCH 003/120] MAINT: Massive update of files from main for numpy 1.26 (#24308) Rather than do the meson fixups bit by bit, The following files are simply checked out from the main branch, we can add stuff back if needed. The main missing bit is 32 bit wheels on Windows, we may need to just drop them. The numpy/f2py and numpy/random directories were copied wholesale. Note that the SIMD and 32 bit linux problems have also arrived and will need to be fixed along with the main branch. The test_mem_policy.py::test_new_policy test also fails, but no longer runs because it is so slow. - .circleci/config.yml - .cirrus.star - .github/workflows/build_test.yml - .github/workflows/emscripten.yml - .github/workflows/linux_meson.yml - .github/workflows/linux_musl.yml - .github/workflows/wheels.yml - .github/workflows/windows_meson.yml - .spin/cmds.py - azure-pipelines.yml - azure-steps-windows.yml - build_requirements.txt - meson_options.txt - numpy/_utils/__init__.py - numpy/_utils/_convertions.py - numpy/core/tests/test_mem_policy.py - numpy/core/tests/test_umath.py - numpy/f2py/capi_maps.py - numpy/f2py/cfuncs.py - numpy/f2py/tests/test_return_integer.py - numpy/f2py/tests/test_return_real.py - numpy/f2py/tests/util.py - numpy/meson.build - numpy/random/_examples/cython/setup.py - numpy/random/_generator.pyx - numpy/random/_mt19937.pyx - numpy/random/_pcg64.pyx - numpy/random/_philox.pyx - numpy/random/_sfc64.pyx - numpy/random/bit_generator.pyx - numpy/random/meson.build - numpy/random/mtrand.pyx - numpy/random/src/distributions/distributions.c - numpy/random/src/mt19937/randomkit.h - numpy/random/src/pcg64/pcg64.orig.h - numpy/random/tests/test_extending.py - numpy/random/tests/test_generator_mt19937.py - numpy/random/tests/test_generator_mt19937_regressions.py - numpy/random/tests/test_random.py - numpy/random/tests/test_randomstate.py - pyproject.toml - pyproject.toml.setuppy - test_requirements.txt - tools/ci/cirrus_macosx_arm64.yml - tools/ci/cirrus_wheels.yml - tools/openblas_support.py - tools/travis-before-install.sh - tools/travis-test.sh - tools/wheels/repair_windows.sh --- .circleci/config.yml | 3 +- .cirrus.star | 26 +- .github/workflows/build_test.yml | 36 +- .github/workflows/emscripten.yml | 7 +- .github/workflows/linux_meson.yml | 21 +- .github/workflows/linux_musl.yml | 4 +- .github/workflows/wheels.yml | 28 +- .github/workflows/windows_meson.yml | 41 +- .spin/cmds.py | 456 ++++++++++++++++++ azure-pipelines.yml | 66 +-- azure-steps-windows.yml | 81 ++-- build_requirements.txt | 2 +- meson_options.txt | 2 + numpy/_utils/__init__.py | 2 + numpy/_utils/_convertions.py | 18 + numpy/core/tests/test_mem_policy.py | 22 +- numpy/core/tests/test_umath.py | 9 + numpy/f2py/capi_maps.py | 108 ++--- numpy/f2py/cfuncs.py | 21 +- numpy/f2py/tests/test_return_integer.py | 16 +- numpy/f2py/tests/test_return_real.py | 16 +- numpy/f2py/tests/util.py | 6 +- numpy/meson.build | 140 ++++-- numpy/random/_examples/cython/setup.py | 46 -- numpy/random/_generator.pyx | 6 +- numpy/random/_mt19937.pyx | 2 + numpy/random/_pcg64.pyx | 2 + numpy/random/_philox.pyx | 16 +- numpy/random/_sfc64.pyx | 2 + numpy/random/bit_generator.pyx | 2 + numpy/random/meson.build | 1 - numpy/random/mtrand.pyx | 2 +- .../random/src/distributions/distributions.c | 22 +- numpy/random/src/mt19937/randomkit.h | 2 +- numpy/random/src/pcg64/pcg64.orig.h | 2 +- numpy/random/tests/test_extending.py | 65 ++- numpy/random/tests/test_generator_mt19937.py | 2 +- .../test_generator_mt19937_regressions.py | 11 + numpy/random/tests/test_random.py | 2 +- numpy/random/tests/test_randomstate.py | 2 +- pyproject.toml | 177 +++---- pyproject.toml.setuppy | 9 + test_requirements.txt | 3 +- tools/ci/cirrus_macosx_arm64.yml | 6 +- tools/ci/cirrus_wheels.yml | 16 +- tools/openblas_support.py | 34 +- tools/travis-before-install.sh | 17 +- tools/travis-test.sh | 26 +- tools/wheels/repair_windows.sh | 32 ++ 49 files changed, 1101 insertions(+), 537 deletions(-) create mode 100644 .spin/cmds.py create mode 100644 numpy/_utils/_convertions.py delete mode 100644 numpy/random/_examples/cython/setup.py create mode 100644 pyproject.toml.setuppy create mode 100644 tools/wheels/repair_windows.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index f5bd44798965..c6b9a6c3ea28 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -61,10 +61,9 @@ jobs: name: build numpy command: | . venv/bin/activate - pip install --progress-bar=off --upgrade pip 'setuptools<49.2.0' pip install --progress-bar=off -r test_requirements.txt pip install --progress-bar=off -r doc_requirements.txt - pip install . + pip install . --config-settings=setup-args="-Dallow-noblas=true" - run: name: create release notes diff --git a/.cirrus.star b/.cirrus.star index 6b2203872394..6f331a7c5b66 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -26,14 +26,26 @@ def main(ctx): SHA = env.get("CIRRUS_CHANGE_IN_REPO") url = "https://api.github.com/repos/numpy/numpy/git/commits/" + SHA dct = http.get(url).json() - # if "[wheel build]" in dct["message"]: - # return fs.read("ci/cirrus_wheels.yml") - if "[skip cirrus]" in dct["message"] or "[skip ci]" in dct["message"]: + commit_msg = dct["message"] + if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: return [] - # add extra jobs to the cirrus run by += adding to config - config = fs.read("tools/ci/cirrus_wheels.yml") - config += fs.read("tools/ci/cirrus_macosx_arm64.yml") + wheel = False + labels = env.get("CIRRUS_PR_LABELS", "") + pr_number = env.get("CIRRUS_PR", "-1") + tag = env.get("CIRRUS_TAG", "") - return config + if "[wheel build]" in commit_msg: + wheel = True + + # if int(pr_number) > 0 and ("14 - Release" in labels or "36 - Build" in labels): + # wheel = True + + if tag.startswith("v") and "dev0" not in tag: + wheel = True + + if wheel: + return fs.read("tools/ci/cirrus_wheels.yml") + + return fs.read("tools/ci/cirrus_macosx_arm64.yml") diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index c6a592bd00f0..928018b13905 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -66,7 +66,7 @@ jobs: if: github.event_name != 'push' strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "pypy3.9-v7.3.11"] + python-version: ["3.9", "3.10", "3.11", "pypy3.9-v7.3.12"] env: EXPECT_CPU_FEATURES: "SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL" steps: @@ -96,7 +96,7 @@ jobs: # for add-apt-repository sudo apt install software-properties-common -y sudo add-apt-repository ppa:deadsnakes/ppa -y - sudo apt install python3.9-dev -y + sudo apt install python3.9-dev ninja-build -y sudo ln -s /usr/bin/python3.9 /usr/bin/pythonx pythonx -m pip install --upgrade pip setuptools wheel pythonx -m pip install -r test_requirements.txt @@ -195,6 +195,7 @@ jobs: - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} + - uses: ./.github/actions blas64: @@ -287,26 +288,6 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - uses: ./.github/actions - numpy2_flag: - needs: [smoke_test] - runs-on: ubuntu-latest - if: github.event_name != 'push' - env: - # Test for numpy-2.0 feature-flagged behavior. - NPY_NUMPY_2_BEHAVIOR: 1 - # Using the future "weak" state doesn't pass tests - # currently unfortunately - NPY_PROMOTION_STATE: legacy - steps: - - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - with: - submodules: recursive - fetch-depth: 0 - - uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # v4.6.1 - with: - python-version: ${{ env.PYTHON_VERSION }} - - uses: ./.github/actions - no_openblas: needs: [smoke_test] runs-on: ubuntu-latest @@ -361,11 +342,10 @@ jobs: sudo apt update sudo apt install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf gfortran-arm-linux-gnueabihf - # Keep the `test_requirements.txt` dependency-subset synced - docker run --name the_container --interactive -v /:/host arm32v7/ubuntu:22.04 /bin/bash -c " + docker run --name the_container --interactive -v /:/host -v $(pwd):/numpy arm32v7/ubuntu:22.04 /bin/bash -c " apt update && apt install -y git python3 python3-dev python3-pip && - python3 -m pip install cython==0.29.34 setuptools\<49.2.0 hypothesis==6.23.3 pytest==6.2.5 'typing_extensions>=4.2.0' && + python3 -m pip install -r /numpy/test_requirements.txt ln -s /host/lib64 /lib64 && ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && ln -s /host/usr/arm-linux-gnueabihf /usr/arm-linux-gnueabihf && @@ -443,7 +423,7 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/751535/sde-external-9.14.0-2022-10-25-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/784319/sde-external-9.24.0-2023-07-13-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde - name: Install dependencies @@ -454,14 +434,14 @@ jobs: run: | export CC=/usr/bin/gcc-12 export CXX=/usr/bin/g++-12 - python -m pip install -e . + python setup.py develop - name: Show config run: | python -c "import numpy as np; np.show_config()" # Run only a few tests, running everything in an SDE takes a long time # Using pytest directly, unable to use python runtests.py -n -t ... - # Disabled running in the SDE because of an SDE bug - name: Run linalg/ufunc/umath tests run: | python -m pytest numpy/core/tests/test_umath* numpy/core/tests/test_ufunc.py numpy/linalg/tests/test_* + # Can't run on SDE just yet: see https://github.com/numpy/numpy/issues/23545#issuecomment-1659047365 #sde -spr -- python -m pytest numpy/core/tests/test_umath* numpy/core/tests/test_ufunc.py numpy/linalg/tests/test_* diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index b33872d025eb..9a13dba61f61 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -55,7 +55,11 @@ jobs: run: pip install "pydantic<2" pyodide-build==$PYODIDE_VERSION - name: Build - run: CFLAGS=-g2 LDFLAGS=-g2 pyodide build + run: | + # Pyodide is still in the process of adding better/easier support for + # non-setup.py based builds. + cp pyproject.toml.setuppy pyproject.toml + CFLAGS=-g2 LDFLAGS=-g2 pyodide build - name: set up node uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 @@ -67,6 +71,7 @@ jobs: pyodide venv .venv-pyodide source .venv-pyodide/bin/activate pip install dist/*.whl + python -c "import sys; print(sys.platform)" pip install -r test_requirements.txt - name: Test run: | diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml index c66a8572c1b2..8ef0e5752119 100644 --- a/.github/workflows/linux_meson.yml +++ b/.github/workflows/linux_meson.yml @@ -24,6 +24,12 @@ jobs: meson_spin: if: "github.repository == 'numpy/numpy'" runs-on: ubuntu-latest + strategy: + matrix: + USE_NIGHTLY_OPENBLAS: [false, true] + env: + USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} + name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: @@ -35,7 +41,15 @@ jobs: - name: Install dependencies run: | pip install -r build_requirements.txt - sudo apt-get install -y libopenblas-serial-dev + # Install OpenBLAS + set -xe + if [[ $USE_NIGHTLY_OPENBLAS == "true" ]]; then + target=$(python tools/openblas_support.py --nightly) + else + target=$(python tools/openblas_support.py) + fi + sudo cp -r $target/lib/* /usr/lib + sudo cp $target/include/* /usr/include - name: Build shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: @@ -52,6 +66,7 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color + LD_LIBRARY_PATH: "/usr/local/lib/" # to find libopenblas.so.0 run: | - pip install pytest hypothesis typing_extensions - spin test + pip install pytest pytest-xdist hypothesis typing_extensions + spin test -j auto diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml index 7d90c20edebf..d7d9258ce255 100644 --- a/.github/workflows/linux_musl.yml +++ b/.github/workflows/linux_musl.yml @@ -59,8 +59,8 @@ jobs: RUNNER_OS=Linux sh tools/wheels/cibw_before_build.sh . pip install -r build_requirements.txt - pip install pytest hypothesis typing_extensions + pip install pytest pytest-xdist hypothesis typing_extensions # use meson to build and test spin build - spin test + spin test -j auto diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f5b4fb03d61f..5a33200089ab 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -21,7 +21,7 @@ on: # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) # │ │ │ │ │ - - cron: "42 1 * * 4" + - cron: "42 2 * * SUN,WED" push: pull_request: types: [labeled, opened, synchronize, reopened] @@ -78,8 +78,7 @@ jobs: - [ubuntu-20.04, musllinux_x86_64] - [macos-12, macosx_x86_64] - [windows-2019, win_amd64] - - [windows-2019, win32] - python: ["cp39", "cp310", "cp311", "pp39"] + python: ["cp39", "cp310", "cp311", "cp312"] # "pp39" exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32] @@ -101,25 +100,20 @@ jobs: # https://github.com/actions/checkout/issues/338 fetch-depth: 0 + - name: pkg-config-for-win + run: | + choco install -y --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + if: runner.os == 'windows' + # Used to push the built wheels - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: "3.x" - # We need rtools 4.0 to have 32 bit support on windows - - if: runner.os == 'windows' - uses: r-windows/install-rtools@ca1090c210479e995c03019a22b9798cdf57073a # main - - - name: setup rtools for 32-bit - run: | - echo "PLAT=i686" >> $env:GITHUB_ENV - echo "PATH=c:\rtools40\mingw32\bin;$env:PATH" >> $env:GITHUB_ENV - gfortran --version - if: ${{ matrix.buildplat[1] == 'win32' }} - - name: Build wheels uses: pypa/cibuildwheel@f21bb8376a051ffb6cb5604b28ccaef7b90e8ab7 # v2.14.1 env: + CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 @@ -192,12 +186,14 @@ jobs: python-version: "3.9" - name: Build sdist run: | - python setup.py sdist + python -m pip install -U pip build + python -m build --sdist -Csetup-args=-Dallow-noblas=true - name: Test the sdist run: | # TODO: Don't run test suite, and instead build wheels from sdist # Depends on pypa/cibuildwheel#1020 - python -m pip install dist/*.gz + python -m pip install dist/*.gz -Csetup-args=-Dallow-noblas=true + pip install ninja pip install -r test_requirements.txt cd .. # Can't import numpy within numpy src directory python -c "import numpy, sys; print(numpy.__version__); sys.exit(numpy.test() is False)" diff --git a/.github/workflows/windows_meson.yml b/.github/workflows/windows_meson.yml index eac0f7e640be..97dfa41eaa2c 100644 --- a/.github/workflows/windows_meson.yml +++ b/.github/workflows/windows_meson.yml @@ -17,10 +17,10 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - meson: - name: Meson windows build/test + msvc_64bit_python_openblas: + name: MSVC, x86-64, LP64 OpenBLAS runs-on: windows-2019 - # if: "github.repository == 'numpy/numpy'" + if: "github.repository == 'numpy/numpy'" steps: - name: Checkout uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 @@ -86,3 +86,38 @@ jobs: echo "LASTEXITCODE is '$LASTEXITCODE'" python -c "import numpy, sys; sys.exit(numpy.test(verbose=3) is False)" echo "LASTEXITCODE is '$LASTEXITCODE'" + + msvc_32bit_python_openblas: + name: MSVC, 32-bit Python, no BLAS + runs-on: windows-2019 + if: "github.repository == 'numpy/numpy'" + steps: + - name: Checkout + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + submodules: recursive + fetch-depth: 0 + + - name: Setup Python (32-bit) + uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 + with: + python-version: '3.10' + architecture: 'x86' + + - name: Setup MSVC (32-bit) + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: 'x86' + + - name: Build and install + run: | + python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" + + - name: Install test dependencies + run: | + python -m pip install -r test_requirements.txt + + - name: Run test suite (fast) + run: | + cd tools + python -m pytest --pyargs numpy -m "not slow" -n2 diff --git a/.spin/cmds.py b/.spin/cmds.py new file mode 100644 index 000000000000..05e619615e58 --- /dev/null +++ b/.spin/cmds.py @@ -0,0 +1,456 @@ +import os +import shutil +import sys +import argparse +import tempfile +import pathlib +import shutil + +import click +from spin.cmds import meson +from spin import util + + +@click.command() +@click.argument("sphinx_target", default="html") +@click.option( + "--clean", is_flag=True, + default=False, + help="Clean previously built docs before building" +) +@click.option( + "--build/--no-build", + "first_build", + default=True, + help="Build numpy before generating docs", +) +@click.option( + '--jobs', '-j', + metavar='N_JOBS', + default="auto", + help="Number of parallel build jobs" +) +@click.option( + "--install-deps/--no-install-deps", + default=True, + help="Install dependencies before building" +) +@click.pass_context +def docs(ctx, sphinx_target, clean, first_build, jobs, install_deps): + """📖 Build Sphinx documentation + + By default, SPHINXOPTS="-W", raising errors on warnings. + To build without raising on warnings: + + SPHINXOPTS="" spin docs + + To list all Sphinx targets: + + spin docs targets + + To build another Sphinx target: + + spin docs TARGET + + """ + if sphinx_target not in ('targets', 'help'): + if install_deps: + util.run(['pip', 'install', '-q', '-r', 'doc_requirements.txt']) + + meson.docs.ignore_unknown_options = True + del ctx.params['install_deps'] + ctx.forward(meson.docs) + + +@click.command() +@click.argument("pytest_args", nargs=-1) +@click.option( + "-m", + "markexpr", + metavar='MARKEXPR', + default="not slow", + help="Run tests with the given markers" +) +@click.option( + "-j", + "n_jobs", + metavar='N_JOBS', + default="1", + help=("Number of parallel jobs for testing. " + "Can be set to `auto` to use all cores.") +) +@click.option( + "--tests", "-t", + metavar='TESTS', + help=(""" +Which tests to run. Can be a module, function, class, or method: + + \b + numpy.random + numpy.random.tests.test_generator_mt19937 + numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric + numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases + \b +""") +) +@click.option( + '--verbose', '-v', is_flag=True, default=False +) +@click.pass_context +def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): + """🔧 Run tests + + PYTEST_ARGS are passed through directly to pytest, e.g.: + + spin test -- --pdb + + To run tests on a directory or file: + + \b + spin test numpy/linalg + spin test numpy/linalg/tests/test_linalg.py + + To report the durations of the N slowest tests: + + spin test -- --durations=N + + To run tests that match a given pattern: + + \b + spin test -- -k "geometric" + spin test -- -k "geometric and not rgeometric" + + By default, spin will run `-m 'not slow'`. To run the full test suite, use + `spin -m full` + + For more, see `pytest --help`. + """ # noqa: E501 + if (not pytest_args) and (not tests): + pytest_args = ('numpy',) + + if '-m' not in pytest_args: + if markexpr != "full": + pytest_args = ('-m', markexpr) + pytest_args + + if (n_jobs != "1") and ('-n' not in pytest_args): + pytest_args = ('-n', str(n_jobs)) + pytest_args + + if tests and not ('--pyargs' in pytest_args): + pytest_args = ('--pyargs', tests) + pytest_args + + if verbose: + pytest_args = ('-v',) + pytest_args + + ctx.params['pytest_args'] = pytest_args + + for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): + del ctx.params[extra_param] + ctx.forward(meson.test) + + +@click.command() +@click.option('--code', '-c', help='Python program passed in as a string') +@click.argument('gdb_args', nargs=-1) +def gdb(code, gdb_args): + """👾 Execute a Python snippet with GDB + + spin gdb -c 'import numpy as np; print(np.__version__)' + + Or pass arguments to gdb: + + spin gdb -c 'import numpy as np; print(np.__version__)' -- --fullname + + Or run another program, they way you normally would with gdb: + + \b + spin gdb ls + spin gdb -- --args ls -al + + You can also run Python programs: + + \b + spin gdb my_tests.py + spin gdb -- my_tests.py --mytest-flag + """ + meson._set_pythonpath() + gdb_args = list(gdb_args) + + if gdb_args and gdb_args[0].endswith('.py'): + gdb_args = ['--args', sys.executable] + gdb_args + + if sys.version_info[:2] >= (3, 11): + PYTHON_FLAGS = ['-P'] + code_prefix = '' + else: + PYTHON_FLAGS = [] + code_prefix = 'import sys; sys.path.pop(0); ' + + if code: + PYTHON_ARGS = ['-c', code_prefix + code] + gdb_args += ['--args', sys.executable] + PYTHON_FLAGS + PYTHON_ARGS + + gdb_cmd = ['gdb', '-ex', 'set detach-on-fork on'] + gdb_args + util.run(gdb_cmd, replace=True) + + +# From scipy: benchmarks/benchmarks/common.py +def _set_mem_rlimit(max_mem=None): + """ + Set address space rlimit + """ + import resource + import psutil + + mem = psutil.virtual_memory() + + if max_mem is None: + max_mem = int(mem.total * 0.7) + cur_limit = resource.getrlimit(resource.RLIMIT_AS) + if cur_limit[0] > 0: + max_mem = min(max_mem, cur_limit[0]) + + try: + resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1])) + except ValueError: + # on macOS may raise: current limit exceeds maximum limit + pass + + +def _commit_to_sha(commit): + p = util.run(['git', 'rev-parse', commit], output=False, echo=False) + if p.returncode != 0: + raise( + click.ClickException( + f'Could not find SHA matching commit `{commit}`' + ) + ) + + return p.stdout.decode('ascii').strip() + + +def _dirty_git_working_dir(): + # Changes to the working directory + p0 = util.run(['git', 'diff-files', '--quiet']) + + # Staged changes + p1 = util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) + + return (p0.returncode != 0 or p1.returncode != 0) + + +def _run_asv(cmd): + # Always use ccache, if installed + PATH = os.environ['PATH'] + EXTRA_PATH = os.pathsep.join([ + '/usr/lib/ccache', '/usr/lib/f90cache', + '/usr/local/lib/ccache', '/usr/local/lib/f90cache' + ]) + env = os.environ + env['PATH'] = f'EXTRA_PATH:{PATH}' + + # Control BLAS/LAPACK threads + env['OPENBLAS_NUM_THREADS'] = '1' + env['MKL_NUM_THREADS'] = '1' + + # Limit memory usage + try: + _set_mem_rlimit() + except (ImportError, RuntimeError): + pass + + try: + util.run(cmd, cwd='benchmarks', env=env, sys_exit=False) + except FileNotFoundError: + click.secho(( + "Cannot find `asv`. " + "Please install Airspeed Velocity:\n\n" + " https://asv.readthedocs.io/en/latest/installing.html\n" + "\n" + "Depending on your system, one of the following should work:\n\n" + " pip install asv\n" + " conda install asv\n" + ), fg="red") + sys.exit(1) + + +@click.command() +@click.option( + '--tests', '-t', + default=None, metavar='TESTS', multiple=True, + help="Which tests to run" +) +@click.option( + '--compare', '-c', + is_flag=True, + default=False, + help="Compare benchmarks between the current branch and main " + "(unless other branches specified). " + "The benchmarks are each executed in a new isolated " + "environment." +) +@click.option( + '--verbose', '-v', is_flag=True, default=False +) +@click.argument( + 'commits', metavar='', + required=False, + nargs=-1 +) +@click.pass_context +def bench(ctx, tests, compare, verbose, commits): + """🏋 Run benchmarks. + + \b + Examples: + + \b + $ spin bench -t bench_lib + $ spin bench -t bench_random.Random + $ spin bench -t Random -t Shuffle + + Two benchmark runs can be compared. + By default, `HEAD` is compared to `main`. + You can also specify the branches/commits to compare: + + \b + $ spin bench --compare + $ spin bench --compare main + $ spin bench --compare main HEAD + + You can also choose which benchmarks to run in comparison mode: + + $ spin bench -t Random --compare + """ + if not commits: + commits = ('main', 'HEAD') + elif len(commits) == 1: + commits = commits + ('HEAD',) + elif len(commits) > 2: + raise click.ClickException( + 'Need a maximum of two revisions to compare' + ) + + bench_args = [] + for t in tests: + bench_args += ['--bench', t] + + if verbose: + bench_args = ['-v'] + bench_args + + if not compare: + # No comparison requested; we build and benchmark the current version + + click.secho( + "Invoking `build` prior to running benchmarks:", + bold=True, fg="bright_green" + ) + ctx.invoke(meson.build) + + meson._set_pythonpath() + + p = util.run( + ['python', '-c', 'import numpy as np; print(np.__version__)'], + cwd='benchmarks', + echo=False, + output=False + ) + os.chdir('..') + + np_ver = p.stdout.strip().decode('ascii') + click.secho( + f'Running benchmarks on NumPy {np_ver}', + bold=True, fg="bright_green" + ) + cmd = [ + 'asv', 'run', '--dry-run', '--show-stderr', '--python=same' + ] + bench_args + + _run_asv(cmd) + + else: + # Benchmark comparison + + # Ensure that we don't have uncommited changes + commit_a, commit_b = [_commit_to_sha(c) for c in commits] + + if commit_b == 'HEAD': + if _dirty_git_working_dir(): + click.secho( + "WARNING: you have uncommitted changes --- " + "these will NOT be benchmarked!", + fg="red" + ) + + cmd_compare = [ + 'asv', 'continuous', '--factor', '1.05', + ] + bench_args + [commit_a, commit_b] + + _run_asv(cmd_compare) + + +@click.command(context_settings={ + 'ignore_unknown_options': True +}) +@click.argument("python_args", metavar='', nargs=-1) +@click.pass_context +def python(ctx, python_args): + """🐍 Launch Python shell with PYTHONPATH set + + OPTIONS are passed through directly to Python, e.g.: + + spin python -c 'import sys; print(sys.path)' + """ + env = os.environ + env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') + ctx.invoke(meson.build) + ctx.forward(meson.python) + + +@click.command(context_settings={ + 'ignore_unknown_options': True +}) +@click.argument("ipython_args", metavar='', nargs=-1) +@click.pass_context +def ipython(ctx, ipython_args): + """💻 Launch IPython shell with PYTHONPATH set + + OPTIONS are passed through directly to IPython, e.g.: + + spin ipython -i myscript.py + """ + env = os.environ + env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') + + ctx.invoke(meson.build) + + ppath = meson._set_pythonpath() + + print(f'💻 Launching IPython with PYTHONPATH="{ppath}"') + preimport = (r"import numpy as np; " + r"print(f'\nPreimported NumPy {np.__version__} as np')") + util.run(["ipython", "--ignore-cwd", + f"--TerminalIPythonApp.exec_lines={preimport}"] + + list(ipython_args)) + + +@click.command(context_settings={"ignore_unknown_options": True}) +@click.argument("args", nargs=-1) +@click.pass_context +def run(ctx, args): + """🏁 Run a shell command with PYTHONPATH set + + \b + spin run make + spin run 'echo $PYTHONPATH' + spin run python -c 'import sys; del sys.path[0]; import mypkg' + + If you'd like to expand shell variables, like `$PYTHONPATH` in the example + above, you need to provide a single, quoted command to `run`: + + spin run 'echo $SHELL && echo $PWD' + + On Windows, all shell commands are run via Bash. + Install Git for Windows if you don't have Bash already. + """ + ctx.invoke(meson.build) + ctx.forward(meson.run) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index e99ee10023ae..d14e73b27edc 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -32,42 +32,6 @@ stages: - bash: echo "##vso[task.setvariable variable=start_main;isOutput=true]$RET" name: result -- stage: InitialTests - condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) - dependsOn: Check - jobs: - - # Native build is based on gcc flag `-march=native` - - job: Linux_baseline_native - pool: - vmImage: 'ubuntu-20.04' - steps: - - script: | - git submodule update --init - displayName: 'Fetch submodules' - - script: | - if ! `gcc 2>/dev/null`; then - sudo apt install gcc - fi - sudo add-apt-repository ppa:deadsnakes/ppa -y - sudo apt install python3.9 - sudo apt install python3.9-dev - sudo apt install python3.9-distutils - # python3 has no setuptools, so install one to get us going - python3.9 -m pip install --user --upgrade pip 'setuptools<49.2.0' - python3.9 -m pip install --user -r test_requirements.txt - displayName: 'install python/requirements' - - script: | - python3.9 runtests.py --show-build-log --cpu-baseline=native --cpu-dispatch=none \ - --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml - displayName: 'Run native baseline Build / Tests' - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for baseline/native' - - stage: ComprehensiveTests condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) dependsOn: Check @@ -101,6 +65,7 @@ stages: git submodule update --init displayName: 'Fetch submodules' - script: | + # yum does not have a ninja package, so use the PyPI one docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2014_i686 \ /bin/bash -xc " \ @@ -111,19 +76,13 @@ stages: target=\$(python3 tools/openblas_support.py) && \ cp -r \$target/lib/* /usr/lib && \ cp \$target/include/* /usr/include && \ + python3 -m pip install ninja && \ python3 -m pip install -r test_requirements.txt && \ echo CFLAGS \$CFLAGS && \ python3 -m pip install -v . && \ - python3 runtests.py -n --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml && \ - python3 -m pip install threadpoolctl && \ - python3 tools/openblas_support.py --check_version" + cd tools && \ + python3 -m pytest --pyargs numpy" displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python 3.9-32 bit full Linux' - job: macOS @@ -182,7 +141,7 @@ stages: - script: | python -m pip install -r test_requirements.txt # Don't use doc_requirements.txt since that messes up tests - python -m pip install vulture sphinx==4.3.0 numpydoc==1.4.0 + python -m pip install vulture sphinx==4.3.0 numpydoc==1.4.0 ninja displayName: 'Install dependencies; some are optional to avoid test skips' - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'" displayName: 'Check for unreachable code paths in Python modules' @@ -249,36 +208,27 @@ stages: pool: vmImage: 'windows-2019' strategy: - maxParallel: 5 + maxParallel: 3 matrix: - Python39-32bit-full: - PYTHON_VERSION: '3.9' - PYTHON_ARCH: 'x86' - TEST_MODE: full - BITS: 32 Python310-64bit-fast: PYTHON_VERSION: '3.10' PYTHON_ARCH: 'x64' TEST_MODE: fast BITS: 64 - Python311-32bit-fast: - PYTHON_VERSION: '3.11' - PYTHON_ARCH: 'x86' - TEST_MODE: fast - BITS: 32 Python311-64bit-full: PYTHON_VERSION: '3.11' PYTHON_ARCH: 'x64' TEST_MODE: full BITS: 64 NPY_USE_BLAS_ILP64: '1' - PyPy39-64bit-fast: PYTHON_VERSION: 'pypy3.9' PYTHON_ARCH: 'x64' TEST_MODE: fast BITS: 64 NPY_USE_BLAS_ILP64: '1' + # Broken - it builds but _multiarray_umath doesn't import - needs investigating + DISABLE_BLAS: '1' steps: - template: azure-steps-windows.yml diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index a5404425cc12..e09663cd7fbc 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -14,67 +14,62 @@ steps: displayName: 'Install dependencies; some are optional to avoid test skips' - powershell: | - # rtools 42+ does not support 32 bits builds. - choco install --confirm --no-progress --allow-downgrade rtools --version=4.0.0.20220206 - echo "##vso[task.setvariable variable=RTOOLS40_HOME]c:\rtools40" - displayName: 'Install rtools' + # Note that rtools 42+ does not support 32 bits builds. We dropped testing + # those, but if there's a need to go back on that, use version 4.0.0.20220206 + choco install --confirm --no-progress --allow-downgrade rtools --version=4.3.5550 + choco install unzip -y + choco install -y --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + choco install ninja + echo "##vso[task.setvariable variable=RTOOLS43_HOME]c:\rtools43" + displayName: 'Install utilities' - powershell: | $ErrorActionPreference = "Stop" - # Download and get the path to "openblas". We cannot copy it - # to $PYTHON_EXE's directory since that is on a different drive which - # mingw does not like. Instead copy it to a directory and set OPENBLAS, - # since OPENBLAS will be picked up by the openblas discovery - $target = $(python tools/openblas_support.py) - mkdir openblas - echo "Copying $target to openblas/" - cp -r $target/* openblas/ - $env:OPENBLAS = $target + mkdir C:/opt/openblas/openblas_dll + mkdir C:/opt/32/lib/pkgconfig + mkdir C:/opt/64/lib/pkgconfig + $target=$(python -c "import tools.openblas_support as obs; plat=obs.get_plat(); ilp64=obs.get_ilp64(); target=f'openblas_{plat}.zip'; obs.download_openblas(target, plat, ilp64);print(target)") + unzip -o -d c:/opt/ $target + echo "##vso[task.setvariable variable=PKG_CONFIG_PATH]c:/opt/64/lib/pkgconfig" + copy C:/opt/64/bin/*.dll C:/opt/openblas/openblas_dll displayName: 'Download / Install OpenBLAS' -# NOTE: for Windows builds it seems much more tractable to use runtests.py -# vs. manual setup.py and then runtests.py for testing only - - powershell: | - ls openblas - If ($(BITS) -eq 32) { - $env:CFLAGS = "-m32" - $env:LDFLAGS = "-m32" - $env:PATH = "$env:RTOOLS40_HOME\\mingw32\\bin;$env:PATH" - } - Else - { - $env:PATH = "$env:RTOOLS40_HOME\\mingw64\\bin;$env:PATH" + # Note: ensure the `pip install .` command remains the last one here, to + # avoid "green on failure" issues + python -c "from tools import openblas_support; openblas_support.make_init('numpy')" + If ( Test-Path env:DISABLE_BLAS ) { + python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" -Csetup-args="-Dallow-noblas=true" } - If ( Test-Path env:NPY_USE_BLAS_ILP64 ) { - $env:OPENBLAS64_ = "openblas" + elseif ( Test-Path env:NPY_USE_BLAS_ILP64 ) { + python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Duse-ilp64=true" -Csetup-args="-Dblas-symbol-suffix=64_" } else { - $env:OPENBLAS = "openblas" - } - python -c "from tools import openblas_support; openblas_support.make_init('numpy')" - python -m pip wheel -v -v -v --no-build-isolation --no-use-pep517 --wheel-dir=dist . - - ls dist -r | Foreach-Object { - python -m pip install $_.FullName + python -m pip install . -v -Csetup-args="--vsenv" } displayName: 'Build NumPy' +- powershell: | + # copy from c:/opt/openblas/openblas_dll to numpy/.libs to ensure it can + # get loaded when numpy is imported (no RPATH on Windows) + $target = $(python -c "import sysconfig; print(sysconfig.get_path('platlib'))") + mkdir $target/numpy/.libs + copy C:/opt/openblas/openblas_dll/*.dll $target/numpy/.libs + displayName: 'Copy OpenBLAS DLL to site-packages' + - script: | python -m pip install threadpoolctl python tools/openblas_support.py --check_version displayName: 'Check OpenBLAS version' - powershell: | - If ($(BITS) -eq 32) { - $env:CFLAGS = "-m32" - $env:LDFLAGS = "-m32" - $env:PATH = "$env:RTOOLS40_HOME\\mingw32\\bin;$env:PATH" - } - Else - { - $env:PATH = "$env:RTOOLS40_HOME\\mingw64\\bin;$env:PATH" + cd tools # avoid root dir to not pick up source tree + # Get a gfortran onto the path for f2py tests + $env:PATH = "$env:RTOOLS43_HOME\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + If ( $env:TEST_MODE -eq "full" ) { + pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml + } else { + pytest --pyargs numpy -m "not slow" -rsx --junitxml=junit/test-results.xml } - python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml displayName: 'Run NumPy Test Suite' - task: PublishTestResults@2 diff --git a/build_requirements.txt b/build_requirements.txt index a936715b9ac2..3627f1b91685 100644 --- a/build_requirements.txt +++ b/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.10.0 -Cython>=0.29.34,<3.0 +Cython wheel==0.38.1 ninja spin==0.4 diff --git a/meson_options.txt b/meson_options.txt index f18d1c0942ac..7ce4eefacd89 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -2,6 +2,8 @@ option('blas', type: 'string', value: 'openblas', description: 'option for BLAS library switching') option('lapack', type: 'string', value: 'openblas', description: 'option for LAPACK library switching') +option('allow-noblas', type: 'boolean', value: false, + description: 'If set to true, allow building with (slow!) internal fallback routines') option('use-ilp64', type: 'boolean', value: false, description: 'Use ILP64 (64-bit integer) BLAS and LAPACK interfaces') option('blas-symbol-suffix', type: 'string', value: '', diff --git a/numpy/_utils/__init__.py b/numpy/_utils/__init__.py index 60703f145afe..388dd9174f35 100644 --- a/numpy/_utils/__init__.py +++ b/numpy/_utils/__init__.py @@ -8,6 +8,8 @@ in ``numpy.core``. """ +from ._convertions import asunicode, asbytes + def set_module(module): """Private decorator for overriding __module__ on a function or class. diff --git a/numpy/_utils/_convertions.py b/numpy/_utils/_convertions.py new file mode 100644 index 000000000000..ab15a8ba019f --- /dev/null +++ b/numpy/_utils/_convertions.py @@ -0,0 +1,18 @@ +""" +A set of methods retained from np.compat module that +are still used across codebase. +""" + +__all__ = ["asunicode", "asbytes"] + + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') diff --git a/numpy/core/tests/test_mem_policy.py b/numpy/core/tests/test_mem_policy.py index 60cdaa8faa2a..bc3f330dc197 100644 --- a/numpy/core/tests/test_mem_policy.py +++ b/numpy/core/tests/test_mem_policy.py @@ -9,6 +9,11 @@ import sys +# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on +# Python 3.12 and up. It's an internal test utility, so for now we just skip +# these tests. + + @pytest.fixture def get_module(tmp_path): """ Add a memory policy that returns a false pointer 64 bytes into the @@ -213,6 +218,7 @@ def get_module(tmp_path): more_init=more_init) +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_set_policy(get_module): get_handler_name = np.core.multiarray.get_handler_name @@ -241,6 +247,7 @@ def test_set_policy(get_module): assert get_handler_name() == orig_policy_name +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_default_policy_singleton(get_module): get_handler_name = np.core.multiarray.get_handler_name @@ -262,6 +269,7 @@ def test_default_policy_singleton(get_module): assert def_policy_1 is def_policy_2 is get_module.get_default_policy() +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_policy_propagation(get_module): # The memory policy goes hand-in-hand with flags.owndata @@ -320,6 +328,7 @@ async def async_test_context_locality(get_module): assert np.core.multiarray.get_handler_name() == orig_policy_name +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_context_locality(get_module): if (sys.implementation.name == 'pypy' and sys.pypy_version_info[:3] < (7, 3, 6)): @@ -341,6 +350,7 @@ def concurrent_thread2(get_module, event): get_module.set_secret_data_policy() +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_thread_locality(get_module): orig_policy_name = np.core.multiarray.get_handler_name() @@ -359,7 +369,8 @@ def test_thread_locality(get_module): assert np.core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.slow +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +@pytest.mark.skip(reason="too slow, see gh-23975") def test_new_policy(get_module): a = np.arange(10) orig_policy_name = np.core.multiarray.get_handler_name(a) @@ -377,16 +388,19 @@ def test_new_policy(get_module): # # if needed, debug this by # - running tests with -- -s (to not capture stdout/stderr + # - setting verbose=2 # - setting extra_argv=['-vv'] here - assert np.core.test('full', verbose=2, extra_argv=['-vv']) + assert np.core.test('full', verbose=1, extra_argv=[]) # also try the ma tests, the pickling test is quite tricky - assert np.ma.test('full', verbose=2, extra_argv=['-vv']) + assert np.ma.test('full', verbose=1, extra_argv=[]) get_module.set_old_policy(orig_policy) c = np.arange(10) assert np.core.multiarray.get_handler_name(c) == orig_policy_name + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") @pytest.mark.xfail(sys.implementation.name == "pypy", reason=("bad interaction between getenv and " "os.environ inside pytest")) @@ -419,6 +433,8 @@ def test_switch_owner(get_module, policy): if oldval is not None: np.core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_owner_is_base(get_module): a = get_module.get_array_with_base() with pytest.warns(UserWarning, match='warn_on_free'): diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 892226f993ca..0e07a6cb937b 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1764,6 +1764,8 @@ def test_expm1(self): np.log, np.log2, np.log10, np.reciprocal, np.arccosh ] + @pytest.mark.skipif(sys.platform == "win32" and sys.maxsize < 2**31 + 1, + reason='failures on 32-bit Python, see FIXME below') @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP) @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) @pytest.mark.parametrize("data, escape", ( @@ -1810,6 +1812,8 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # FIXME: skipped on MSVC:32-bit during switch to Meson, 10 cases fail + # when SIMD support not present / disabled if ufunc in (np.spacing, np.ceil) and dtype == 'e': return array = np.array(data, dtype=dtype) @@ -4173,6 +4177,11 @@ def test_against_cmath(self): b = cfunc(p) assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b)) + @pytest.mark.xfail( + # manylinux2014 uses glibc2.17 + _glibc_older_than("2.18"), + reason="Older glibc versions are imprecise (maybe passes with SIMD?)" + ) @pytest.mark.xfail(IS_MUSL, reason="gh23049") @pytest.mark.xfail(IS_WASM, reason="doesn't work") @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex]) diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index f0a7221b7165..c7efe87e82ba 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -32,9 +32,6 @@ ] -# Numarray and Numeric users should set this False -using_newcore = True - depargs = [] lcb_map = {} lcb2_map = {} @@ -58,89 +55,48 @@ 'string': 'string', 'character': 'bytes', } + c2capi_map = {'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_DOUBLE', # forced casting - 'char': 'NPY_STRING', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'long_long': 'NPY_LONG', # forced casting - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', # forced casting - 'string': 'NPY_STRING', - 'character': 'NPY_CHAR'} - -# These new maps aren't used anywhere yet, but should be by default -# unless building numeric or numarray extensions. -if using_newcore: - c2capi_map = {'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_LONGDOUBLE', - 'char': 'NPY_BYTE', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'unsigned_long': 'NPY_ULONG', - 'long_long': 'NPY_LONGLONG', - 'unsigned_long_long': 'NPY_ULONGLONG', - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', - 'string': 'NPY_STRING', - 'character': 'NPY_STRING'} + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_STRING', + 'character': 'NPY_STRING'} c2pycode_map = {'double': 'd', 'float': 'f', - 'long_double': 'd', # forced casting - 'char': '1', - 'signed_char': '1', - 'unsigned_char': 'b', - 'short': 's', - 'unsigned_short': 'w', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', 'int': 'i', - 'unsigned': 'u', + 'unsigned': 'I', 'long': 'l', - 'long_long': 'L', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', 'complex_float': 'F', 'complex_double': 'D', - 'complex_long_double': 'D', # forced casting - 'string': 'c', - 'character': 'c' - } - -if using_newcore: - c2pycode_map = {'double': 'd', - 'float': 'f', - 'long_double': 'g', - 'char': 'b', - 'unsigned_char': 'B', - 'signed_char': 'b', - 'short': 'h', - 'unsigned_short': 'H', - 'int': 'i', - 'unsigned': 'I', - 'long': 'l', - 'unsigned_long': 'L', - 'long_long': 'q', - 'unsigned_long_long': 'Q', - 'complex_float': 'F', - 'complex_double': 'D', - 'complex_long_double': 'G', - 'string': 'S', - 'character': 'c'} + 'complex_long_double': 'G', + 'string': 'S', + 'character': 'c'} # https://docs.python.org/3/c-api/arg.html#building-values -# c2buildvalue_map is NumPy agnostic, so no need to bother with using_newcore c2buildvalue_map = {'double': 'd', 'float': 'f', 'char': 'b', diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 2d27b652432b..f89793061bad 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -53,6 +53,7 @@ includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API #include "arrayobject.h"''' +includes['npy_math.h'] = '#include "numpy/npy_math.h"' includes['arrayobject.h'] = '#include "fortranobject.h"' includes['stdarg.h'] = '#include ' @@ -1096,7 +1097,7 @@ needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', - 'complex_double_from_pyobj'] + 'complex_double_from_pyobj', 'npy_math.h'] cfuncs['complex_long_double_from_pyobj'] = """\ static int complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) @@ -1108,8 +1109,8 @@ return 1; } else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; - (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); return 1; } } @@ -1123,7 +1124,7 @@ """ -needs['complex_double_from_pyobj'] = ['complex_double'] +needs['complex_double_from_pyobj'] = ['complex_double', 'npy_math.h'] cfuncs['complex_double_from_pyobj'] = """\ static int complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) { @@ -1138,14 +1139,14 @@ if (PyArray_IsScalar(obj, CFloat)) { npy_cfloat new; PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)new.real; - (*v).i = (double)new.imag; + (*v).r = (double)npy_crealf(new); + (*v).i = (double)npy_cimagf(new); } else if (PyArray_IsScalar(obj, CLongDouble)) { npy_clongdouble new; PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)new.real; - (*v).i = (double)new.imag; + (*v).r = (double)npy_creall(new); + (*v).i = (double)npy_cimagl(new); } else { /* if (PyArray_IsScalar(obj, CDouble)) */ PyArray_ScalarAsCtype(obj, v); @@ -1163,8 +1164,8 @@ if (arr == NULL) { return 0; } - (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; - (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; + (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); Py_DECREF(arr); return 1; } diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index a43c677fd0af..3b2f42e2bff6 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -13,15 +13,13 @@ def check_function(self, t, tname): assert t([123]) == 123 assert t((123, )) == 123 assert t(array(123)) == 123 - assert t(array([123])) == 123 - assert t(array([[123]])) == 123 - assert t(array([123], "b")) == 123 - assert t(array([123], "h")) == 123 - assert t(array([123], "i")) == 123 - assert t(array([123], "l")) == 123 - assert t(array([123], "B")) == 123 - assert t(array([123], "f")) == 123 - assert t(array([123], "d")) == 123 + assert t(array(123, "b")) == 123 + assert t(array(123, "h")) == 123 + assert t(array(123, "i")) == 123 + assert t(array(123, "l")) == 123 + assert t(array(123, "B")) == 123 + assert t(array(123, "f")) == 123 + assert t(array(123, "d")) == 123 # pytest.raises(ValueError, t, array([123],'S3')) pytest.raises(ValueError, t, "abc") diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index 9e76c151e88e..a15d6475a950 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -20,15 +20,13 @@ def check_function(self, t, tname): assert abs(t([234]) - 234) <= err assert abs(t((234, )) - 234.0) <= err assert abs(t(array(234)) - 234.0) <= err - assert abs(t(array([234])) - 234.0) <= err - assert abs(t(array([[234]])) - 234.0) <= err - assert abs(t(array([234]).astype("b")) + 22) <= err - assert abs(t(array([234], "h")) - 234.0) <= err - assert abs(t(array([234], "i")) - 234.0) <= err - assert abs(t(array([234], "l")) - 234.0) <= err - assert abs(t(array([234], "B")) - 234.0) <= err - assert abs(t(array([234], "f")) - 234.0) <= err - assert abs(t(array([234], "d")) - 234.0) <= err + assert abs(t(array(234).astype("b")) + 22) <= err + assert abs(t(array(234, "h")) - 234.0) <= err + assert abs(t(array(234, "i")) - 234.0) <= err + assert abs(t(array(234, "l")) - 234.0) <= err + assert abs(t(array(234, "B")) - 234.0) <= err + assert abs(t(array(234, "f")) - 234.0) <= err + assert abs(t(array(234, "d")) - 234.0) <= err if tname in ["t0", "t4", "s0", "s4"]: assert t(1e200) == t(1e300) # inf diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 26fa7e49d14e..75b257cdb825 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -20,7 +20,7 @@ import numpy from pathlib import Path -from numpy.compat import asbytes, asstr +from numpy._utils import asunicode from numpy.testing import temppath, IS_WASM from importlib import import_module @@ -144,7 +144,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): out, err = p.communicate() if p.returncode != 0: raise RuntimeError("Running f2py failed: %s\n%s" % - (cmd[4:], asstr(out))) + (cmd[4:], asunicode(out))) finally: os.chdir(cwd) @@ -318,7 +318,7 @@ def configuration(parent_name='',top_path=None): script = os.path.join(d, get_temp_module_name() + ".py") dst_sources.append(script) with open(script, "wb") as f: - f.write(asbytes(code)) + f.write(code.encode('latin1')) # Build cwd = os.getcwd() diff --git a/numpy/meson.build b/numpy/meson.build index c9a4970a8a51..76ef7b52ece5 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -1,5 +1,6 @@ # We need -lm for all C code (assuming it uses math functions, which is safe to -# assume for numpy). +# assume for NumPy). For C++ it isn't needed, because libstdc++/libc++ is +# guaranteed to depend on it. m_dep = cc.find_library('m', required : false) mlib_linkflag = '' if m_dep.found() @@ -48,7 +49,28 @@ else endif -# TODO: 64-bit BLAS and LAPACK +# This is currently injected directly into CFLAGS/CXXFLAGS for wheel builds +# (see cibuildwheel settings in pyproject.toml), but used by CI jobs already +blas_symbol_suffix = get_option('blas-symbol-suffix') + +use_ilp64 = get_option('use-ilp64') +if not use_ilp64 + # For now, keep supporting the `NPY_USE_BLAS_ILP64` environment variable too + # (same as in setup.py) `false is the default for the CLI flag, so check if + # env var was set + use_ilp64 = run_command(py, + [ + '-c', + 'import os; print(1) if os.environ.get("NPY_USE_BLAS_ILP64", "0") != "0" else print(0)' + ], + check: true + ).stdout().strip() == '1' +endif + + +# TODO: 64-bit (ILP64) BLAS and LAPACK support (e.g., check for more .pc files +# so we detect `openblas64_.so` directly). Partially supported now, needs more +# auto-detection. # # Note that this works as long as BLAS and LAPACK are detected properly via # pkg-config. By default we look for OpenBLAS, other libraries can be configured via @@ -62,13 +84,26 @@ lapack_name = get_option('lapack') # pkg-config uses a lower-case name while CMake uses a capitalized name, so try # that too to make the fallback detection with CMake work if blas_name == 'openblas' - blas = dependency(['openblas', 'OpenBLAS'], required: false) + if use_ilp64 + _openblas_names = ['openblas64', 'openblas', 'OpenBLAS'] + else + _openblas_names = ['openblas', 'OpenBLAS'] + endif + blas = dependency(_openblas_names, required: false) else blas = dependency(blas_name, required: false) endif have_blas = blas.found() cblas = [] if have_blas + # As noted above, at this point the BLAS_SYMBOL_SUFFIX may be injected into + # the CFLAGS directly, so this requires care to use that when it happens: + if blas_symbol_suffix != '' + probe_args = ['-DBLAS_SYMBOL_SUFFIX=' + blas_symbol_suffix] + else + probe_args = [] + endif + # Netlib BLAS has a separate `libcblas.so` which we use directly in the g77 # ABI wrappers, so detect it and error out if we cannot find it. OpenBLAS can # be built without CBLAS too (see gh-23909, done by Arch Linux until @@ -78,15 +113,22 @@ if have_blas # see https://github.com/mesonbuild/meson/pull/10921. have_cblas = false if cc.links(''' + #ifndef BLAS_SYMBOL_SUFFIX + # define BLAS_SYMBOL_SUFFIX + #endif + #define EXPAND(suffix) cblas_ddot ## suffix + #define DDOT(suffix) EXPAND(suffix) + #include int main(int argc, const char *argv[]) { double a[4] = {1,2,3,4}; double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; + return DDOT(BLAS_SYMBOL_SUFFIX)(4, a, 1, b, 1) > 10; } ''', dependencies: blas, + args: probe_args, name: 'CBLAS', ) have_cblas = true @@ -98,53 +140,52 @@ if have_blas endif endif -if lapack_name == 'openblas' - lapack_name = ['openblas', 'OpenBLAS'] -endif -lapack_dep = dependency(lapack_name, required: false) -have_lapack = lapack_dep.found() - -dependency_map = { - 'BLAS': blas, - 'LAPACK': lapack_dep, -} - -use_ilp64 = get_option('use-ilp64') -if not use_ilp64 - # For now, keep supporting this environment variable too (same as in setup.py) - # `false is the default for the CLI flag, so check if env var was set - use_ilp64 = run_command(py, - [ - '-c', - 'import os; print(1) if os.environ.get("NPY_USE_BLAS_ILP64", "0") != "0" else print(0)' - ], - check: true - ).stdout().strip() == '1' -endif - -# BLAS and LAPACK are optional dependencies for NumPy. We can only use a BLAS -# which provides a CBLAS interface. -# TODO: add ILP64 support +# BLAS and LAPACK are dependencies for NumPy. Since NumPy 2.0, by default the +# build will fail if they are missing; the performance impact is large, so +# using fallback routines must be explicitly opted into by the user. xref +# gh-24200 for a discussion on this. +# +# Note that we can only use a BLAS which provides a CBLAS interface. So disable +# BLAS completely if CBLAS is not found. +allow_noblas = get_option('allow-noblas') if have_blas - c_args_blas = [] # note: used for C and C++ via `blas_dep` below + _args_blas = [] # note: used for C and C++ via `blas_dep` below if have_cblas - c_args_blas += ['-DHAVE_CBLAS'] + _args_blas += ['-DHAVE_CBLAS'] + elif not allow_noblas + error('No CBLAS interface detected! Install a BLAS library with CBLAS ' + \ + 'support, or use the `allow-noblas` build option (note, this ' + \ + 'may be up to 100x slower for some linear algebra operations).') endif if use_ilp64 - c_args_blas += ['-DHAVE_BLAS_ILP64'] + _args_blas += ['-DHAVE_BLAS_ILP64'] endif - # This is currently injected directly into CFLAGS/CXXFLAGS for wheel builds - # (see cibuildwheel settings in pyproject.toml) - blas_symbol_suffix = get_option('blas-symbol-suffix') if blas_symbol_suffix != '' - c_args_blas += ['-DBLAS_SYMBOL_SUFFIX=' + blas_symbol_suffix] + _args_blas += ['-DBLAS_SYMBOL_SUFFIX=' + blas_symbol_suffix] endif blas_dep = declare_dependency( dependencies: [blas, cblas], - compile_args: c_args_blas, + compile_args: _args_blas, ) else - blas_dep = [] + if allow_noblas + blas_dep = [] + else + error('No BLAS library detected! Install one, or use the ' + \ + '`allow-noblas` build option (note, this may be up to 100x slower ' + \ + 'for some linear algebra operations).') + endif +endif + +if lapack_name == 'openblas' + lapack_name = ['openblas', 'OpenBLAS'] +endif +lapack_dep = dependency(lapack_name, required: false) +have_lapack = lapack_dep.found() +if not have_lapack and not allow_noblas + error('No LAPACK library detected! Install one, or use the ' + \ + '`allow-noblas` build option (note, this may be up to 100x slower ' + \ + 'for some linear algebra operations).') endif # Copy the main __init__.py|pxd files to the build dir (needed for Cython) @@ -187,10 +228,6 @@ src_file = generator(src_file_cli, ) tempita_cli = find_program('_build_utils/tempita.py') -tempita = generator(tempita_cli, - arguments : ['@INPUT@', '--outfile', '@OUTPUT@'], - output : '@BASENAME@' -) pure_subdirs = [ '_pyinstaller', @@ -261,7 +298,20 @@ conf_data.set('CROSS_COMPILED', meson.is_cross_build()) conf_data.set('PYTHON_PATH', py.full_path()) conf_data.set('PYTHON_VERSION', py.language_version()) -# Dependencies information +# BLAS/LAPACK dependency info. Ensure we report dependencies correctly for +# `np.show_config()`; needs some special handling for the case BLAS was found +# but CBLAS not (and hence BLAS was also disabled) +dependency_map = { + 'LAPACK': lapack_dep, +} +if have_blas and have_cblas + dependency_map += {'BLAS': blas} +else + conf_data.set('BLAS_NAME', blas_name) + conf_data.set('BLAS_FOUND', false) +endif + + foreach name, dep : dependency_map conf_data.set(name + '_NAME', dep.name()) conf_data.set(name + '_FOUND', dep.found()) diff --git a/numpy/random/_examples/cython/setup.py b/numpy/random/_examples/cython/setup.py deleted file mode 100644 index e70a1fddca74..000000000000 --- a/numpy/random/_examples/cython/setup.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 -""" -Build the Cython demonstrations of low-level access to NumPy random - -Usage: python setup.py build_ext -i -""" -from os.path import dirname, join, abspath - -from setuptools import setup -from setuptools.extension import Extension - -import numpy as np -from Cython.Build import cythonize - - -path = dirname(__file__) -src_dir = join(dirname(path), '..', 'src') -defs = [('NPY_NO_DEPRECATED_API', 0)] -inc_path = np.get_include() -# Add paths for npyrandom and npymath libraries: -lib_path = [ - abspath(join(np.get_include(), '..', '..', 'random', 'lib')), - abspath(join(np.get_include(), '..', 'lib')) -] - -extending = Extension("extending", - sources=[join('.', 'extending.pyx')], - include_dirs=[ - np.get_include(), - join(path, '..', '..') - ], - define_macros=defs, - ) -distributions = Extension("extending_distributions", - sources=[join('.', 'extending_distributions.pyx')], - include_dirs=[inc_path], - library_dirs=lib_path, - libraries=['npyrandom', 'npymath'], - define_macros=defs, - ) - -extensions = [extending, distributions] - -setup( - ext_modules=cythonize(extensions) -) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index a30d116c2127..ee68810dd798 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -1,5 +1,5 @@ #!python -#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3, binding=True import operator import warnings from collections.abc import Sequence @@ -277,7 +277,7 @@ cdef class Generator: >>> entropy = 0x3034c61a9ae04ff8cb62ab8ec2c4b501 >>> rng = np.random.default_rng(entropy) - Create two new generators for example for parallel executation: + Create two new generators for example for parallel execution: >>> child_rng1, child_rng2 = rng.spawn(2) @@ -3477,7 +3477,7 @@ cdef class Generator: # answer = 0.003 ... pretty unlikely! """ - DEF HYPERGEOM_MAX = 10**9 + cdef double HYPERGEOM_MAX = 10**9 cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index 8b991254aac7..50bc0084c6c2 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -1,3 +1,5 @@ +#cython: binding=True + import operator import numpy as np diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index f7891aa85b98..a924d75fdbf3 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -1,3 +1,5 @@ +#cython: binding=True + import numpy as np cimport numpy as np diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index e5353460c83b..d90da6a9b657 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -1,3 +1,5 @@ +#cython: binding=True + from cpython.pycapsule cimport PyCapsule_New import numpy as np @@ -11,7 +13,7 @@ __all__ = ['Philox'] np.import_array() -DEF PHILOX_BUFFER_SIZE=4 +cdef int PHILOX_BUFFER_SIZE=4 cdef extern from 'src/philox/philox.h': struct s_r123array2x64: @@ -30,7 +32,7 @@ cdef extern from 'src/philox/philox.h': philox4x64_ctr_t *ctr philox4x64_key_t *key int buffer_pos - uint64_t buffer[PHILOX_BUFFER_SIZE] + uint64_t *buffer int has_uint32 uint32_t uinteger @@ -193,11 +195,13 @@ cdef class Philox(BitGenerator): self._bitgen.next_raw = &philox_uint64 cdef _reset_state_variables(self): - self.rng_state.has_uint32 = 0 - self.rng_state.uinteger = 0 - self.rng_state.buffer_pos = PHILOX_BUFFER_SIZE + cdef philox_state *rng_state = &self.rng_state + + rng_state[0].has_uint32 = 0 + rng_state[0].uinteger = 0 + rng_state[0].buffer_pos = PHILOX_BUFFER_SIZE for i in range(PHILOX_BUFFER_SIZE): - self.rng_state.buffer[i] = 0 + rng_state[0].buffer[i] = 0 @property def state(self): diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 419045c1d32f..9b38dff84122 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -1,3 +1,5 @@ +#cython: binding=True + import numpy as np cimport numpy as np diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index 83441747a316..bcc9e50a1804 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -1,3 +1,5 @@ +#cython: binding=True + """ BitGenerator base class and SeedSequence used to seed the BitGenerators. diff --git a/numpy/random/meson.build b/numpy/random/meson.build index c582428f9629..4980a80ba2c8 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -153,7 +153,6 @@ py.install_sources( '_examples/cython/extending.pyx', '_examples/cython/extending_distributions.pyx', '_examples/cython/meson.build', - '_examples/cython/setup.py', ], subdir: 'numpy/random/_examples/cython' ) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index d14d081644af..752d9beaefd2 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -1,5 +1,5 @@ #!python -#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3, binding=True import operator import warnings from collections.abc import Sequence diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index cebeb07cf9f7..1241329151a9 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -403,11 +403,29 @@ float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) { return scale * random_standard_gamma_f(bitgen_state, shape); } +#define BETA_TINY_THRESHOLD 3e-103 + +/* + * Note: random_beta assumes that a != 0 and b != 0. + */ double random_beta(bitgen_t *bitgen_state, double a, double b) { double Ga, Gb; if ((a <= 1.0) && (b <= 1.0)) { double U, V, X, Y, XpY; + + if (a < BETA_TINY_THRESHOLD && b < BETA_TINY_THRESHOLD) { + /* + * When a and b are this small, the probability that the + * sample would be a double precision float that is not + * 0 or 1 is less than approx. 1e-100. So we use the + * proportion a/(a + b) and a single uniform sample to + * generate the result. + */ + U = next_double(bitgen_state); + return (a + b)*U < a; + } + /* Use Johnk's algorithm */ while (1) { @@ -417,8 +435,8 @@ double random_beta(bitgen_t *bitgen_state, double a, double b) { Y = pow(V, 1.0 / b); XpY = X + Y; /* Reject if both U and V are 0.0, which is approx 1 in 10^106 */ - if ((XpY <= 1.0) && (XpY > 0.0)) { - if (X + Y > 0) { + if ((XpY <= 1.0) && (U + V > 0.0)) { + if (XpY > 0) { return X / XpY; } else { double logX = log(U) / a; diff --git a/numpy/random/src/mt19937/randomkit.h b/numpy/random/src/mt19937/randomkit.h index abb082cb2ed8..5b933af2b218 100644 --- a/numpy/random/src/mt19937/randomkit.h +++ b/numpy/random/src/mt19937/randomkit.h @@ -177,7 +177,7 @@ extern void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt, /* * Fills an array with cnt random npy_bool between off and off + rng - * inclusive. It is assumed tha npy_bool as the same size as npy_uint8. + * inclusive. It is assumed that npy_bool is the same size as npy_uint8. */ extern void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt, npy_bool *out, rk_state *state); diff --git a/numpy/random/src/pcg64/pcg64.orig.h b/numpy/random/src/pcg64/pcg64.orig.h index 74be91f31a50..a1b31bf889a5 100644 --- a/numpy/random/src/pcg64/pcg64.orig.h +++ b/numpy/random/src/pcg64/pcg64.orig.h @@ -766,7 +766,7 @@ inline void pcg_setseq_128_srandom_r(struct pcg_state_setseq_128 *rng, * such as raw LCGs do better using a technique based on division.) * Empricical tests show that division is preferable to modulus for * reducting the range of an RNG. It's faster, and sometimes it can - * even be statistically prefereable. + * even be statistically preferable. */ /* Generation functions for XSH RS */ diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 5ace080f1bca..8c684ca43f40 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -1,13 +1,18 @@ +from importlib.util import spec_from_file_location, module_from_spec import os +import pathlib import pytest import shutil import subprocess import sys +import sysconfig +import textwrap import warnings + import numpy as np -from numpy.distutils.misc_util import exec_mod_from_location from numpy.testing import IS_WASM + try: import cffi except ImportError: @@ -38,7 +43,7 @@ # other fixes in the 0.29 series that are needed even for earlier # Python versions. # Note: keep in sync with the one in pyproject.toml - required_version = '0.29.30' + required_version = '0.29.35' if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None @@ -48,20 +53,30 @@ @pytest.mark.skipif(cython is None, reason="requires cython") @pytest.mark.slow def test_cython(tmp_path): + import glob + # build the examples in a temporary directory srcdir = os.path.join(os.path.dirname(__file__), '..') shutil.copytree(srcdir, tmp_path / 'random') - # build the examples and "install" them into a temporary directory build_dir = tmp_path / 'random' / '_examples' / 'cython' - subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', - '--prefix', str(tmp_path / 'installdir'), - '--single-version-externally-managed', - '--record', str(tmp_path/ 'tmp_install_log.txt'), - ], - cwd=str(build_dir), - ) + target_dir = build_dir / "build" + os.makedirs(target_dir, exist_ok=True) + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", str(build_dir)], + cwd=target_dir, + ) + else: + subprocess.check_call(["meson", "setup", str(build_dir)], + cwd=target_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) + # gh-16162: make sure numpy's __init__.pxd was used for cython # not really part of this test, but it is a convenient place to check - with open(build_dir / 'extending.c') as fid: + + g = glob.glob(str(target_dir / "*" / "extending.pyx.c")) + with open(g[0]) as fid: txt_to_find = 'NumPy API declarations from "numpy/__init__' for i, line in enumerate(fid): if txt_to_find in line: @@ -69,21 +84,21 @@ def test_cython(tmp_path): else: assert False, ("Could not find '{}' in C file, " "wrong pxd used".format(txt_to_find)) - # get the path to the so's - so1 = so2 = None - with open(tmp_path /'tmp_install_log.txt') as fid: - for line in fid: - if 'extending.' in line: - so1 = line.strip() - if 'extending_distributions' in line: - so2 = line.strip() - assert so1 is not None - assert so2 is not None - # import the so's without adding the directory to sys.path - exec_mod_from_location('extending', so1) - extending_distributions = exec_mod_from_location( - 'extending_distributions', so2) + # import without adding the directory to sys.path + suffix = sysconfig.get_config_var('EXT_SUFFIX') + + def load(modname): + so = (target_dir / modname).with_suffix(suffix) + spec = spec_from_file_location(modname, so) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + # test that the module can be imported + load("extending") + load("extending_cpp") # actually test the cython c-extension + extending_distributions = load("extending_distributions") from numpy.random import PCG64 values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd') assert values.shape == (10,) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 5c4c2cbf92fe..1c57b3fa5d8d 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -644,7 +644,7 @@ def test_respect_dtype_singleton(self, endpoint): sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) assert_equal(sample.dtype, dt) - for dt in (bool, int, np.compat.long): + for dt in (bool, int): lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 ubnd = ubnd - 1 if endpoint else ubnd diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 7c2b6867c0e1..f16af2b293ce 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -75,6 +75,17 @@ def test_beta_small_parameters(self): x = self.mt19937.beta(0.0001, 0.0001, size=100) assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') + def test_beta_very_small_parameters(self): + # gh-24203: beta would hang with very small parameters. + self.mt19937.beta(1e-49, 1e-40) + + def test_beta_ridiculously_small_parameters(self): + # gh-24266: beta would generate nan when the parameters + # were subnormal or a small multiple of the smallest normal. + tiny = np.finfo(1.0).tiny + x = self.mt19937.beta(tiny/32, tiny/40, size=50) + assert not np.any(np.isnan(x)) + def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 0f4e7925a501..e64ace711953 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -276,7 +276,7 @@ def test_respect_dtype_singleton(self): sample = self.rfunc(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) - for dt in (bool, int, np.compat.long): + for dt in (bool, int): lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 3a296109890e..3099853d2a8e 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -422,7 +422,7 @@ def test_respect_dtype_singleton(self): sample = self.rfunc(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) - for dt in (bool, int, np.compat.long): + for dt in (bool, int): lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 diff --git a/pyproject.toml b/pyproject.toml index 759b538fb6e1..47fa0959bd3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,79 +1,61 @@ [build-system] -# Uncomment this line, the `meson-python` requires line, and the [project] and -# [project.urls] tables below in order to build with Meson by default -#build-backend = "mesonpy" +build-backend = "mesonpy" requires = [ - # setuptools, wheel and Cython are needed for the setup.py based build - "setuptools==59.2.0", - # `wheel` is needed for non-isolated builds, given that `meson-python` - # doesn't list it as a runtime requirement (at least in 0.11.0) - it's - # likely to be removed as a dependency in meson-python 0.12.0. - "wheel==0.38.1", - "Cython>=0.29.34,<3.0", -# "meson-python>=0.10.0", + "Cython>=0.29.34,<3.1", + "meson-python>=0.13.1,<0.16.0", ] -#[project] -#name = "numpy" -# -## Using https://peps.python.org/pep-0639/ -## which is still in draft -#license = {text = "BSD-3-Clause"} -## Note: needed for Meson, but setuptools errors on it. Uncomment once Meson is default. -##license-files.paths = [ -## "LICENSE.txt", -## "LICENSES_bundles.txt" -##] -# -#description = "Fundamental package for array computing in Python" -#authors = [{name = "Travis E. Oliphant et al."}] -#maintainers = [ -# {name = "NumPy Developers", email="numpy-discussion@python.org"}, -#] -#requires-python = ">=3.9" -#readme = "README.md" -#classifiers = [ -# 'Development Status :: 5 - Production/Stable', -# 'Intended Audience :: Science/Research', -# 'Intended Audience :: Developers', -# 'License :: OSI Approved :: BSD License', -# 'Programming Language :: C', -# 'Programming Language :: Python', -# 'Programming Language :: Python :: 3', -# 'Programming Language :: Python :: 3.9', -# 'Programming Language :: Python :: 3.10', -# 'Programming Language :: Python :: 3.11', -# 'Programming Language :: Python :: 3 :: Only', -# 'Programming Language :: Python :: Implementation :: CPython', -# 'Topic :: Software Development', -# 'Topic :: Scientific/Engineering', -# 'Typing :: Typed', -# 'Operating System :: Microsoft :: Windows', -# 'Operating System :: POSIX', -# 'Operating System :: Unix', -# 'Operating System :: MacOS', -#] -#dynamic = ["version", "scripts"] -# -#[project.scripts] -## Note: this is currently dynamic, see setup.py. Can we get rid of that? -## see commit f22a33b71 for rationale for dynamic behavior -#'f2py = numpy.f2py.f2py2e:main' -#'f2py3 = numpy.f2py.f2py2e:main' -#'f2py3.MINOR_VERSION = numpy.f2py.f2py2e:main' -# -# When enabling this stanza, make sure to remove the meson-specific xfail from -# numpy/tests/test_public_api.py -#[project.entry-points] -#'array_api': 'numpy = numpy.array_api' -#'pyinstaller40': 'hook-dirs = numpy:_pyinstaller_hooks_dir' -# -#[project.urls] -#homepage = "https://numpy.org" -#documentation = "https://numpy.org/doc/" -#source = "https://github.com/numpy/numpy" -#download = "https://pypi.org/project/numpy/#files" -#tracker = "https://github.com/numpy/numpy/issues" +[project] +name = "numpy" +version = "2.0.0.dev0" +# TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) +license = {file = "LICENSE.txt"} + +description = "Fundamental package for array computing in Python" +authors = [{name = "Travis E. Oliphant et al."}] +maintainers = [ + {name = "NumPy Developers", email="numpy-discussion@python.org"}, +] +requires-python = ">=3.9" +readme = "README.md" +classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: BSD License', + 'Programming Language :: C', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: Implementation :: CPython', + 'Topic :: Software Development', + 'Topic :: Scientific/Engineering', + 'Typing :: Typed', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'Operating System :: Unix', + 'Operating System :: MacOS', +] + +[project.scripts] +f2py = 'numpy.f2py.f2py2e:main' + +[project.entry-points.array_api] +numpy = 'numpy.array_api' + +[project.entry-points.pyinstaller40] +hook-dirs = 'numpy:_pyinstaller_hooks_dir' + +[project.urls] +homepage = "https://numpy.org" +documentation = "https://numpy.org/doc/" +source = "https://github.com/numpy/numpy" +download = "https://pypi.org/project/numpy/#files" +tracker = "https://github.com/numpy/numpy/issues" [tool.towncrier] # Do no set this since it is hard to import numpy inside the source directory @@ -97,6 +79,11 @@ requires = [ name = "New functions" showcontent = true + [[tool.towncrier.type]] + directory = "python_removal" + name = "NumPy 2.0 Python API removals" + showcontent = true + [[tool.towncrier.type]] directory = "deprecation" name = "Deprecations" @@ -122,6 +109,11 @@ requires = [ name = "C API changes" showcontent = true + [[tool.towncrier.type]] + directory = "c_api_removal" + name = "NumPy 2.0 C API removals" + showcontent = true + [[tool.towncrier.type]] directory = "new_feature" name = "New Features" @@ -144,17 +136,25 @@ requires = [ [tool.cibuildwheel] -skip = "cp36-* cp37-* pp37-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux_aarch64" +# Note: the below skip command doesn't do much currently, the platforms to +# build wheels for in CI are controlled in `.github/workflows/wheels.yml` and +# `tools/ci/cirrus_wheels.yml`. +skip = "cp36-* cp37-* cp-38* pp37-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux_aarch64 *-win32" build-verbosity = "3" before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" -before-test = "pip install -r {project}/test_requirements.txt" +# meson has a hard dependency on ninja, and we need meson to build +# c-extensions in tests. There is a ninja PyPI package used in +# build_requirements.txt for macOS, windows, linux but it cannot be in +# test_requirements.txt since pyodide, which uses test_requirements.txt, does +# not have it. +before-test = "pip install ninja && pip install -r {project}/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux2014" manylinux-aarch64-image = "manylinux2014" musllinux-x86_64-image = "musllinux_1_1" -environment = { CFLAGS="-std=c99 -fno-strict-aliasing", LDFLAGS="-Wl,--strip-debug", OPENBLAS64_="/usr/local", NPY_USE_BLAS_ILP64="1", RUNNER_OS="Linux" } +environment = { CFLAGS="-fno-strict-aliasing -DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", LDFLAGS="-Wl,--strip-debug", CXXFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", NPY_USE_BLAS_ILP64="1", RUNNER_OS="Linux"} [tool.cibuildwheel.macos] # For universal2 wheels, we will need to fuse them manually @@ -165,18 +165,29 @@ environment = { CFLAGS="-std=c99 -fno-strict-aliasing", LDFLAGS="-Wl,--strip-deb archs = "x86_64 arm64" test-skip = "*_universal2:arm64" # MACOS linker doesn't support stripping symbols -environment = { CFLAGS="-std=c99 -fno-strict-aliasing", OPENBLAS64_="/usr/local", NPY_USE_BLAS_ILP64="1", CC="clang", CXX = "clang++", RUNNER_OS="macOS" } +environment = {CFLAGS="-fno-strict-aliasing -DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", CXXFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", NPY_USE_BLAS_ILP64="1", CC="clang", CXX = "clang++", RUNNER_OS="macOS"} [tool.cibuildwheel.windows] -environment = { OPENBLAS64_="openblas", OPENBLAS="", NPY_USE_BLAS_ILP64="1", CFLAGS="", LDFLAGS="" } - -[[tool.cibuildwheel.overrides]] -select = "*-win32" -environment = { OPENBLAS64_="", OPENBLAS="openblas", NPY_USE_BLAS_ILP64="0", CFLAGS="-m32", LDFLAGS="-m32" } +archs = ['AMD64'] +environment = {NPY_USE_BLAS_ILP64="1", CFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", CXXFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", LDFLAGS="", PKG_CONFIG_PATH="C:/opt/64/lib/pkgconfig"} +config-settings = "setup-args=--vsenv" +repair-wheel-command = "bash ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" + +#[[tool.cibuildwheel.overrides]] +# Note: 32-bit Python wheel builds are skipped right now; probably needs +# --native-file to build due to `arch != pyarch` check in Meson's `python` dependency +# Note: uses 32-bit rather than 64-bit OpenBLAS +#select = "*-win32" +#environment = CFLAGS="-m32", LDFLAGS="-m32", PKG_CONFIG_PATH="/opt/32/lib/pkgconfig"} [tool.spin] package = 'numpy' [tool.spin.commands] -"Build" = ["spin.cmds.meson.build", "spin.cmds.meson.test"] -"Environments" = ["spin.cmds.meson.shell", "spin.cmds.meson.ipython", "spin.cmds.meson.python"] +"Build" = ["spin.cmds.meson.build", ".spin/cmds.py:test"] +"Environments" = [ + ".spin/cmds.py:run", ".spin/cmds.py:ipython", + ".spin/cmds.py:python", ".spin/cmds.py:gdb" +] +"Documentation" = [".spin/cmds.py:docs"] +"Metrics" = [".spin/cmds.py:bench"] diff --git a/pyproject.toml.setuppy b/pyproject.toml.setuppy new file mode 100644 index 000000000000..044cf5538af4 --- /dev/null +++ b/pyproject.toml.setuppy @@ -0,0 +1,9 @@ +# pyproject.toml needed to build with setup.py +# This file is used temporarily to replace the main pyproject.toml when needing +# to avoid building with Meson (e.g., in the Emscripten/Pyodide CI job) +[build-system] +requires = [ + "setuptools==59.2.0", + "wheel==0.38.1", + "Cython>=0.29.34,<3.1", +] diff --git a/test_requirements.txt b/test_requirements.txt index 295be18d5bf6..91237409e0ac 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,4 +1,4 @@ -cython>=0.29.34,<3.0 +Cython wheel==0.38.1 setuptools==59.2.0 ; python_version < '3.12' setuptools ; python_version >= '3.12' @@ -6,6 +6,7 @@ hypothesis==6.81.1 pytest==7.4.0 pytz==2023.3 pytest-cov==4.1.0 +meson pytest-xdist # for numpy.random.test.test_extending cffi; python_version < '3.10' diff --git a/tools/ci/cirrus_macosx_arm64.yml b/tools/ci/cirrus_macosx_arm64.yml index 53e4afeb3b46..0dc97763e954 100644 --- a/tools/ci/cirrus_macosx_arm64.yml +++ b/tools/ci/cirrus_macosx_arm64.yml @@ -51,7 +51,7 @@ macos_arm64_test_task: popd pip install -r build_requirements.txt - pip install pytest hypothesis typing_extensions + pip install pytest pytest-xdist hypothesis typing_extensions - spin build - spin test + spin build -- -Dallow-noblas=true + spin test -j auto diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index e3bc2c680832..068fc0486d85 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -32,8 +32,13 @@ linux_aarch64_task: - env: CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp311-* + - env: + CIRRUS_CLONE_SUBMODULES: true + CIBW_PRERELEASE_PYTHONS: True + CIBW_BUILD: cp312-* build_script: | + apt update apt install -y python3-venv python-is-python3 gfortran libatlas-base-dev libgfortran5 eatmydata git fetch origin ./tools/travis-before-install.sh @@ -56,6 +61,10 @@ macosx_arm64_task: - env: CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp310-* cp311-* + - env: + CIRRUS_CLONE_SUBMODULES: true + CIBW_PRERELEASE_PYTHONS: True + CIBW_BUILD: cp312-* env: PATH: /opt/homebrew/opt/python@3.10/bin:/usr/local/lib:/usr/local/include:$PATH CIBW_ARCHS: arm64 @@ -68,7 +77,8 @@ macosx_arm64_task: RUNNER_OS=macOS SDKROOT=/Applications/Xcode-14.0.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.3.sdk LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH - CFLAGS="-std=c99 -fno-strict-aliasing" + CFLAGS="-fno-strict-aliasing -DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64" + CXXFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64" OPENBLAS64_="/usr/local" NPY_USE_BLAS_ILP64="1" @@ -106,7 +116,7 @@ wheels_upload_task: env: NUMPY_STAGING_UPLOAD_TOKEN: ENCRYPTED[!5a69522ae0c2af9edb2bc1cdfeaca6292fb3666d9ecd82dca0615921834a6ce3b702352835d8bde4ea2a9ed5ef8424ac!] - NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[!196422e6c3419a3b1d79815e1026094a215cb0f346fe34ed0f9d3ca1c19339df7398d04556491b1e0420fc1fe3713289!] + NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[ef04347663cfcb58d121385707e55951dc8e03b009edeed988aa4a33ba8205c54ca9980ac4da88e1adfdebff8b9d7ed4] upload_script: | apt-get update @@ -115,7 +125,7 @@ wheels_upload_task: export IS_PUSH="false" # cron job - if [[ "$CIRRUS_CRON" == "weekly" ]]; then + if [[ "$CIRRUS_CRON" == "nightly" ]]; then export IS_SCHEDULE_DISPATCH="true" fi diff --git a/tools/openblas_support.py b/tools/openblas_support.py index 47159d0fa3ed..d99a45b0bd0f 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -15,8 +15,9 @@ OPENBLAS_V = '0.3.23.dev' OPENBLAS_LONG = 'v0.3.23-246-g3d31191b' -BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs' -BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download' +BASE_LOC = ( + 'https://anaconda.org/scientific-python-nightly-wheels/openblas-libs' +) SUPPORTED_PLATFORMS = [ 'linux-aarch64', 'linux-x86_64', @@ -91,7 +92,7 @@ def get_linux(arch): return get_musllinux(arch) -def download_openblas(target, plat, ilp64): +def download_openblas(target, plat, ilp64, *, nightly=False): osname, arch = plat.split("-") fnsuffix = {None: "", "64_": "64_"}[ilp64] filename = '' @@ -120,7 +121,12 @@ def download_openblas(target, plat, ilp64): if not suffix: return None - filename = f'{BASEURL}/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}' + openblas_version = "HEAD" if nightly else OPENBLAS_LONG + filename = ( + f'{BASE_LOC}/{openblas_version}/download/' + f'openblas{fnsuffix}-{openblas_version}-{suffix}' + ) + print(f'Attempting to download {filename}', file=sys.stderr) req = Request(url=filename, headers=headers) try: response = urlopen(req) @@ -141,7 +147,7 @@ def download_openblas(target, plat, ilp64): return typ -def setup_openblas(plat=get_plat(), ilp64=get_ilp64()): +def setup_openblas(plat=get_plat(), ilp64=get_ilp64(), nightly=False): ''' Download and setup an openblas library for building. If successful, the configuration script will find it automatically. @@ -155,7 +161,7 @@ def setup_openblas(plat=get_plat(), ilp64=get_ilp64()): _, tmp = mkstemp() if not plat: raise ValueError('unknown platform') - typ = download_openblas(tmp, plat, ilp64) + typ = download_openblas(tmp, plat, ilp64, nightly=nightly) if not typ: return '' osname, arch = plat.split("-") @@ -182,10 +188,11 @@ def unpack_windows_zip(fname, plat): # Copy the lib to openblas.lib. Once we can properly use pkg-config # this will not be needed lib = glob.glob(os.path.join(target, 'lib', '*.lib')) - assert len(lib) == 1 - for f in lib: - shutil.copy(f, os.path.join(target, 'lib', 'openblas.lib')) - shutil.copy(f, os.path.join(target, 'lib', 'openblas64_.lib')) + if len(lib) == 1: + # The 64-bit tarball already has these copied, no need to do it + for f in lib: + shutil.copy(f, os.path.join(target, 'lib', 'openblas.lib')) + shutil.copy(f, os.path.join(target, 'lib', 'openblas64_.lib')) # Copy the dll from bin to lib so system_info can pick it up dll = glob.glob(os.path.join(target, 'bin', '*.dll')) for f in dll: @@ -322,6 +329,9 @@ def test_version(expected_version=None): data = threadpoolctl.threadpool_info() if len(data) != 1: + if platform.python_implementation() == 'PyPy': + print(f"Not using OpenBLAS for PyPy in Azure CI, so skip this") + return raise ValueError(f"expected single threadpool_info result, got {data}") if not expected_version: expected_version = OPENBLAS_V @@ -342,11 +352,13 @@ def test_version(expected_version=None): parser.add_argument('--check_version', nargs='?', default='', help='Check provided OpenBLAS version string ' 'against available OpenBLAS') + parser.add_argument('--nightly', action='store_true', + help='If set, use nightly OpenBLAS build.') args = parser.parse_args() if args.check_version != '': test_version(args.check_version) elif args.test is None: - print(setup_openblas()) + print(setup_openblas(nightly=args.nightly)) else: if len(args.test) == 0 or 'all' in args.test: test_setup(SUPPORTED_PLATFORMS) diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh index 056e9747278f..2dd8bf59faa5 100755 --- a/tools/travis-before-install.sh +++ b/tools/travis-before-install.sh @@ -36,26 +36,11 @@ gcc --version popd -pip install --upgrade pip 'setuptools<49.2.0' wheel - -# 'setuptools', 'wheel' and 'cython' are build dependencies. This information -# is stored in pyproject.toml, but there is not yet a standard way to install -# those dependencies with, say, a pip command, so we'll just hard-code their -# installation here. We only need to install them separately for the cases -# where numpy is installed with setup.py, which is the case for the Travis jobs -# where the environment variables USE_DEBUG or USE_WHEEL are set. When pip is -# used to install numpy, pip gets the build dependencies from pyproject.toml. -# A specific version of cython is required, so we read the cython package -# requirement using `grep cython test_requirements.txt` instead of simply -# writing 'pip install setuptools wheel cython'. -pip install `grep cython test_requirements.txt` - if [ -n "$DOWNLOAD_OPENBLAS" ]; then - pwd target=$(python tools/openblas_support.py) sudo cp -r $target/lib/* /usr/lib sudo cp $target/include/* /usr/include fi -if [ -n "$USE_ASV" ]; then pip install asv; fi + diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 67e507eb3d52..73d9acc7c5b6 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -16,6 +16,11 @@ fi source builds/venv/bin/activate +pip install --upgrade pip 'setuptools<49.2.0' + +pip install -r build_requirements.txt + +if [ -n "$USE_ASV" ]; then pip install asv; fi # travis venv tests override python PYTHON=${PYTHON:-python} PIP=${PIP:-pip} @@ -35,7 +40,8 @@ setup_base() # use default python flags but remove sign-compare sysflags="$($PYTHON -c "import sysconfig; \ print (sysconfig.get_config_var('CFLAGS'))")" - export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare" + # For cython3.0 add -Wno-error=undef, see cython/cython#5557 + export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare -Wno-error=undef" build_args=() # Strictly disable all kinds of optimizations @@ -49,18 +55,11 @@ setup_base() else # SIMD extensions that need to be tested on both runtime and compile-time via (test_simd.py) # any specified features will be ignored if they're not supported by compiler or platform - # note: it almost the same default value of --simd-test execpt adding policy `$werror` to treat all + # note: it almost the same default value of --simd-test except adding policy `$werror` to treat all # warnings as errors build_args+=("--simd-test=\$werror BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD VX VXE VXE2") fi if [ -z "$USE_DEBUG" ]; then - # activates '-Werror=undef' when DEBUG isn't enabled since _cffi_backend' - # extension breaks the build due to the following error: - # - # error: "HAVE_FFI_PREP_CIF_VAR" is not defined, evaluates to 0 [-Werror=undef] - # #if !HAVE_FFI_PREP_CIF_VAR && defined(__arm64__) && defined(__APPLE__) - # - export CFLAGS="$CFLAGS -Werror=undef" $PYTHON setup.py build "${build_args[@]}" install 2>&1 | tee log else # The job run with USE_DEBUG=1 on travis needs this. @@ -80,6 +79,9 @@ setup_base() run_test() { + # see note in pyproject.toml for why ninja is installed as a test requirement + PYTHONOPTIMIZE="" $PIP install ninja + # Install the test dependencies. # Clear PYTHONOPTIMIZE when running `pip install -r test_requirements.txt` # because version 2.19 of pycparser (a dependency of one of the packages @@ -148,7 +150,7 @@ EOF fi if [ -n "$RUN_FULL_TESTS" ]; then - # Travis has a limit on log length that is causeing test failutes. + # Travis has a limit on log length that is causing test failutes. # The fix here is to remove the "-v" from the runtest arguments. export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv" $PYTHON -b ../runtests.py -n --mode=full $DURATIONS_FLAG $COVERAGE_FLAG @@ -196,7 +198,7 @@ export PIP if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then # ensure some warnings are not issued - export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result" + export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result -Wno-error=undef" # adjust gcc flags if C coverage requested if [ -n "$RUN_COVERAGE" ]; then export NPY_DISTUTILS_APPEND_FLAGS=1 @@ -220,7 +222,7 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then # temporary workaround for sdist failures. $PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)" # ensure some warnings are not issued - export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result" + export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result -Wno-error=undef" $PYTHON setup.py sdist # Make another virtualenv to install into $PYTHON -m venv venv-for-wheel diff --git a/tools/wheels/repair_windows.sh b/tools/wheels/repair_windows.sh new file mode 100644 index 000000000000..a7aa209d21d9 --- /dev/null +++ b/tools/wheels/repair_windows.sh @@ -0,0 +1,32 @@ +set -xe + +WHEEL="$1" +DEST_DIR="$2" + +# create a temporary directory in the destination folder and unpack the wheel +# into there +pushd $DEST_DIR +mkdir -p tmp +pushd tmp +wheel unpack $WHEEL +pushd numpy* + +# To avoid DLL hell, the file name of libopenblas that's being vendored with +# the wheel has to be name-mangled. delvewheel is unable to name-mangle PYD +# containing extra data at the end of the binary, which frequently occurs when +# building with mingw. +# We therefore find each PYD in the directory structure and strip them. + +for f in $(find ./scipy* -name '*.pyd'); do strip $f; done + + +# now repack the wheel and overwrite the original +wheel pack . +mv -fv *.whl $WHEEL + +cd $DEST_DIR +rm -rf tmp + +# the libopenblas.dll is placed into this directory in the cibw_before_build +# script. +delvewheel repair --add-path /c/opt/openblas/openblas_dll -w $DEST_DIR $WHEEL From 8ec15334df91b36708cf3caf021c2ec83628df98 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 3 Aug 2023 11:27:58 +0200 Subject: [PATCH 004/120] BLD: update `tools/wheels/` for the move to Meson --- tools/wheels/check_license.py | 8 ++++++-- tools/wheels/cibw_before_build.sh | 33 +++++++++++++++++++++++-------- tools/wheels/cibw_test_command.sh | 7 +------ 3 files changed, 32 insertions(+), 16 deletions(-) diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py index 8ced317d674c..7d0ef7921a4e 100644 --- a/tools/wheels/check_license.py +++ b/tools/wheels/check_license.py @@ -7,10 +7,10 @@ distribution. """ -import os import sys import re import argparse +import pathlib def check_text(text): @@ -33,8 +33,12 @@ def main(): __import__(args.module) mod = sys.modules[args.module] + # LICENSE.txt is installed in the .dist-info directory, so find it there + sitepkgs = pathlib.Path(mod.__file__).parent.parent + distinfo_path = [s for s in sitepkgs.glob("numpy-*.dist-info")][0] + # Check license text - license_txt = os.path.join(os.path.dirname(mod.__file__), "LICENSE.txt") + license_txt = distinfo_path / "LICENSE.txt" with open(license_txt, encoding="utf-8") as f: text = f.read() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 493cceeae4b1..372b25af09b8 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -29,15 +29,32 @@ if [[ $RUNNER_OS == "Linux" || $RUNNER_OS == "macOS" ]] ; then cp $basedir/include/* /usr/local/include fi elif [[ $RUNNER_OS == "Windows" ]]; then + # delvewheel is the equivalent of delocate/auditwheel for windows. + python -m pip install delvewheel + + # make the DLL available for tools/wheels/repair_windows.sh. If you change + # this location you need to alter that script. + mkdir -p /c/opt/openblas/openblas_dll + PYTHONPATH=tools python -c "import openblas_support; openblas_support.make_init('numpy')" - target=$(python tools/openblas_support.py) - ls /tmp - mkdir -p openblas - # bash on windows does not like cp -r $target/* openblas - for f in $(ls $target); do - cp -r $target/$f openblas - done - ls openblas + mkdir -p /c/opt/32/lib/pkgconfig + mkdir -p /c/opt/64/lib/pkgconfig + target=$(python -c "import tools.openblas_support as obs; plat=obs.get_plat(); ilp64=obs.get_ilp64(); target=f'openblas_{plat}.zip'; obs.download_openblas(target, plat, ilp64);print(target)") + if [[ $PLATFORM == 'win-32' ]]; then + # 32-bit openBLAS + # Download 32 bit openBLAS and put it into c/opt/32/lib + unzip -o -d /c/opt/ $target + cp /c/opt/32/bin/*.dll /c/opt/openblas/openblas_dll + else + # 64-bit openBLAS + unzip -o -d /c/opt/ $target + if [[ -f /c/opt/64/lib/pkgconfig/openblas64.pc ]]; then + # As of v0.3.23, the 64-bit interface has a openblas64.pc file, + # but this is wrong. It should be openblas.pc + cp /c/opt/64/lib/pkgconfig/openblas{64,}.pc + fi + cp /c/opt/64/bin/*.dll /c/opt/openblas/openblas_dll + fi fi if [[ $RUNNER_OS == "macOS" ]]; then diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 36c275f32d91..47a1a56e844f 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -26,10 +26,5 @@ fi # Set available memory value to avoid OOM problems on aarch64. # See gh-22418. export NPY_AVAILABLE_MEM="4 GB" -if [[ $(python -c "import sys; print(sys.implementation.name)") == "pypy" ]]; then - # make PyPy more verbose, try to catch a segfault - python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', verbose=2))" -else - python -c "import sys; import numpy; sys.exit(not numpy.test(label='full'))" -fi +python -c "import sys; import numpy; sys.exit(not numpy.test(label='full'))" python $PROJECT_DIR/tools/wheels/check_license.py From dafd68ad396f3c78098dba5303bea2bae294f84d Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 30 Jul 2023 13:10:28 +0200 Subject: [PATCH 005/120] CI: ensure tests in cibuildwheel jobs run in parallel [skip azp] [skip circle] [skip travis] --- tools/wheels/cibw_test_command.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 47a1a56e844f..c35fb56832e5 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -26,5 +26,7 @@ fi # Set available memory value to avoid OOM problems on aarch64. # See gh-22418. export NPY_AVAILABLE_MEM="4 GB" -python -c "import sys; import numpy; sys.exit(not numpy.test(label='full'))" +# Run full tests with -n=auto. This makes pytest-xdist distribute tests across +# the available N CPU cores: 2 by default for Linux instances and 4 for macOS arm64 +python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', extra_argv=['-n=auto']))" python $PROJECT_DIR/tools/wheels/check_license.py From 3b93cc89a23999e0c1b3c6e18e688b06aed621bb Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 30 Jul 2023 13:32:39 +0200 Subject: [PATCH 006/120] TST: disable mypy tests in test suite unless an environment variable is set These tests are super slow, and they're effectively always passing in CI. Running them on all "full" test suite runs is too expensive. Note that SciPy has an XSLOW mark, NumPy does not. So use an env var for now. [skip circle] [skip travis] [skip azp] --- .github/workflows/linux_meson.yml | 1 + numpy/typing/tests/test_typing.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml index 8ef0e5752119..b489c9e3f12f 100644 --- a/.github/workflows/linux_meson.yml +++ b/.github/workflows/linux_meson.yml @@ -68,5 +68,6 @@ jobs: TERM: xterm-256color LD_LIBRARY_PATH: "/usr/local/lib/" # to find libopenblas.so.0 run: | + export NPY_RUN_MYPY_IN_TESTSUITE=1 pip install pytest pytest-xdist hypothesis typing_extensions spin test -j auto diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index bcaaf5250c9d..491431a86351 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -18,6 +18,21 @@ _C_INTP, ) + +# Only trigger a full `mypy` run if this environment variable is set +# Note that these tests tend to take over a minute even on a macOS M1 CPU, +# and more than that in CI. +RUN_MYPY = "NPY_RUN_MYPY_IN_TESTSUITE" in os.environ +if RUN_MYPY and RUN_MYPY not in ('0', '', 'false'): + RUN_MYPY = True + +# Skips all functions in this file +pytestmark = pytest.mark.skipif( + not RUN_MYPY, + reason="`NPY_RUN_MYPY_IN_TESTSUITE` not set" +) + + try: from mypy import api except ImportError: From eabc1a76f70a2daad57758d5ef45c190f5b619e3 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 3 Aug 2023 12:47:13 +0200 Subject: [PATCH 007/120] BLD: silence a build warning from a `run_command` call --- numpy/core/meson.build | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/core/meson.build b/numpy/core/meson.build index ab773c56d735..17760efa2fc0 100644 --- a/numpy/core/meson.build +++ b/numpy/core/meson.build @@ -50,7 +50,10 @@ C_API_VERSION = '0x00000011' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. -r = run_command('code_generators/verify_c_api_version.py', '--api-version', C_API_VERSION) +r = run_command( + 'code_generators/verify_c_api_version.py', '--api-version', C_API_VERSION, + check: true +) if r.returncode() != 0 error('verify_c_api_version.py failed with output:\n' + r.stderr().strip()) From 1648a6e7918ef2032aa8cb76c22217046624e793 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 3 Aug 2023 12:49:44 +0200 Subject: [PATCH 008/120] CI: also build PyPy 3.9 wheels --- .github/workflows/wheels.yml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 5a33200089ab..506a6d59f3eb 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -78,7 +78,7 @@ jobs: - [ubuntu-20.04, musllinux_x86_64] - [macos-12, macosx_x86_64] - [windows-2019, win_amd64] - python: ["cp39", "cp310", "cp311", "cp312"] # "pp39" + python: ["cp39", "cp310", "cp311", "cp312", "pp39"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32] diff --git a/pyproject.toml b/pyproject.toml index 47fa0959bd3b..68361be2a881 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -139,7 +139,7 @@ tracker = "https://github.com/numpy/numpy/issues" # Note: the below skip command doesn't do much currently, the platforms to # build wheels for in CI are controlled in `.github/workflows/wheels.yml` and # `tools/ci/cirrus_wheels.yml`. -skip = "cp36-* cp37-* cp-38* pp37-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux_aarch64 *-win32" +skip = "cp36-* cp37-* cp-38* pp37-* pp38-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux_aarch64 *-win32" build-verbosity = "3" before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" # meson has a hard dependency on ninja, and we need meson to build From 155d560d15d03ec088e61b92ecd87d4faf2695b4 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 3 Aug 2023 13:09:28 +0200 Subject: [PATCH 009/120] TST: fix issues with Python 3.12 support [wheel build] Taken over from PR 23991 --- numpy/__init__.py | 2 +- numpy/_pytesttester.py | 13 +- numpy/core/tests/test_array_interface.py | 3 + numpy/core/tests/test_dtype.py | 5 + numpy/core/tests/test_regression.py | 4 + numpy/lib/tests/test_format.py | 1 + numpy/tests/test_ctypeslib.py | 14 +- numpy/tests/test_public_api.py | 171 +++++++++++++---------- numpy/typing/tests/test_isfile.py | 4 +- 9 files changed, 128 insertions(+), 89 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index b4b33320b9f9..cf852aeadd14 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -66,7 +66,7 @@ NumPy testing tools distutils Enhancements to distutils with support for - Fortran compilers support and more. + Fortran compilers support and more (for Python <= 3.11). Utilities --------- diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 01ddaaf98834..1c38291ae331 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -135,12 +135,13 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, # offset verbosity. The "-q" cancels a "-v". pytest_args += ["-q"] - with warnings.catch_warnings(): - warnings.simplefilter("always") - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - from numpy.distutils import cpuinfo + if sys.version_info < (3, 12): + with warnings.catch_warnings(): + warnings.simplefilter("always") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + from numpy.distutils import cpuinfo with warnings.catch_warnings(record=True): # Ignore the warning from importing the array_api submodule. This diff --git a/numpy/core/tests/test_array_interface.py b/numpy/core/tests/test_array_interface.py index 8b1ab27c5cd3..16c719c5a5b9 100644 --- a/numpy/core/tests/test_array_interface.py +++ b/numpy/core/tests/test_array_interface.py @@ -128,6 +128,9 @@ def get_module(tmp_path): more_init=more_init) +# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on +# Python 3.12 and up. +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") @pytest.mark.slow def test_cstruct(get_module): diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 57831f46f431..81692015fc5e 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -756,6 +756,11 @@ def iter_struct_object_dtypes(): yield pytest.param(dt, p, 12, obj, id="") +@pytest.mark.skipif( + sys.version_info >= (3, 12), + reason="Python 3.12 has immortal refcounts, this test will no longer " + "work. See gh-23986" +) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") class TestStructuredObjectRefcounting: """These tests cover various uses of complicated structured types which diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 841144790e31..678c727db479 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -1464,6 +1464,10 @@ def test_structured_arrays_with_objects1(self): x[x.nonzero()] = x.ravel()[:1] assert_(x[0, 1] == x[0, 0]) + @pytest.mark.skipif( + sys.version_info >= (3, 12), + reason="Python 3.12 has immortal refcounts, this test no longer works." + ) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_structured_arrays_with_objects2(self): # Ticket #1299 second test diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 701eebbf0023..3bbbb215bb77 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -527,6 +527,7 @@ def test_load_padded_dtype(tmpdir, dt): assert_array_equal(arr, arr1) +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="see gh-23988") @pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") def test_python2_python3_interoperability(): fname = 'win64python2.npy' diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 1ea0837008b7..63906b0f41cb 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -1,11 +1,12 @@ import sys -import pytest +import sysconfig import weakref from pathlib import Path +import pytest + import numpy as np from numpy.ctypeslib import ndpointer, load_library, as_array -from numpy.distutils.misc_util import get_shared_lib_extension from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal try: @@ -52,12 +53,9 @@ def test_basic2(self): # Regression for #801: load_library with a full library name # (including extension) does not work. try: - try: - so = get_shared_lib_extension(is_python_ext=True) - # Should succeed - load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__) - except ImportError: - print("No distutils available, skipping test.") + so_ext = sysconfig.get_config_var('EXT_SUFFIX') + load_library('_multiarray_umath%s' % so_ext, + np.core._multiarray_umath.__file__) except ImportError as e: msg = ("ctypes is not available on this python: skipping the test" " (import error was: %s)" % str(e)) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index eaa89aa6f749..555f1638413d 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -127,12 +127,6 @@ def test_NPY_NO_EXPORT(): "array_api", "array_api.linalg", "ctypeslib", - "distutils", - "distutils.cpuinfo", - "distutils.exec_command", - "distutils.misc_util", - "distutils.log", - "distutils.system_info", "doc", "doc.constants", "doc.ufuncs", @@ -165,6 +159,18 @@ def test_NPY_NO_EXPORT(): "typing.mypy_plugin", "version", ]] +if sys.version_info < (3, 12): + PUBLIC_MODULES += [ + 'numpy.' + s for s in [ + "distutils", + "distutils.cpuinfo", + "distutils.exec_command", + "distutils.misc_util", + "distutils.log", + "distutils.system_info", + ] + ] + PUBLIC_ALIASED_MODULES = [ @@ -193,62 +199,6 @@ def test_NPY_NO_EXPORT(): "core.records", "core.shape_base", "core.umath", - "core.umath_tests", - "distutils.armccompiler", - "distutils.fujitsuccompiler", - "distutils.ccompiler", - 'distutils.ccompiler_opt', - "distutils.command", - "distutils.command.autodist", - "distutils.command.bdist_rpm", - "distutils.command.build", - "distutils.command.build_clib", - "distutils.command.build_ext", - "distutils.command.build_py", - "distutils.command.build_scripts", - "distutils.command.build_src", - "distutils.command.config", - "distutils.command.config_compiler", - "distutils.command.develop", - "distutils.command.egg_info", - "distutils.command.install", - "distutils.command.install_clib", - "distutils.command.install_data", - "distutils.command.install_headers", - "distutils.command.sdist", - "distutils.conv_template", - "distutils.core", - "distutils.extension", - "distutils.fcompiler", - "distutils.fcompiler.absoft", - "distutils.fcompiler.arm", - "distutils.fcompiler.compaq", - "distutils.fcompiler.environment", - "distutils.fcompiler.g95", - "distutils.fcompiler.gnu", - "distutils.fcompiler.hpux", - "distutils.fcompiler.ibm", - "distutils.fcompiler.intel", - "distutils.fcompiler.lahey", - "distutils.fcompiler.mips", - "distutils.fcompiler.nag", - "distutils.fcompiler.none", - "distutils.fcompiler.pathf95", - "distutils.fcompiler.pg", - "distutils.fcompiler.nv", - "distutils.fcompiler.sun", - "distutils.fcompiler.vast", - "distutils.fcompiler.fujitsu", - "distutils.from_template", - "distutils.intelccompiler", - "distutils.lib2def", - "distutils.line_endings", - "distutils.mingw32ccompiler", - "distutils.msvccompiler", - "distutils.npy_pkg_config", - "distutils.numpy_distribution", - "distutils.pathccompiler", - "distutils.unixccompiler", "f2py.auxfuncs", "f2py.capi_maps", "f2py.cb_rules", @@ -290,6 +240,66 @@ def test_NPY_NO_EXPORT(): "random.bit_generator", "testing.print_coercion_tables", ]] +if sys.version_info < (3, 12): + PRIVATE_BUT_PRESENT_MODULES += [ + 'numpy.' + s for s in [ + "distutils.armccompiler", + "distutils.fujitsuccompiler", + "distutils.ccompiler", + 'distutils.ccompiler_opt', + "distutils.command", + "distutils.command.autodist", + "distutils.command.bdist_rpm", + "distutils.command.build", + "distutils.command.build_clib", + "distutils.command.build_ext", + "distutils.command.build_py", + "distutils.command.build_scripts", + "distutils.command.build_src", + "distutils.command.config", + "distutils.command.config_compiler", + "distutils.command.develop", + "distutils.command.egg_info", + "distutils.command.install", + "distutils.command.install_clib", + "distutils.command.install_data", + "distutils.command.install_headers", + "distutils.command.sdist", + "distutils.conv_template", + "distutils.core", + "distutils.extension", + "distutils.fcompiler", + "distutils.fcompiler.absoft", + "distutils.fcompiler.arm", + "distutils.fcompiler.compaq", + "distutils.fcompiler.environment", + "distutils.fcompiler.g95", + "distutils.fcompiler.gnu", + "distutils.fcompiler.hpux", + "distutils.fcompiler.ibm", + "distutils.fcompiler.intel", + "distutils.fcompiler.lahey", + "distutils.fcompiler.mips", + "distutils.fcompiler.nag", + "distutils.fcompiler.none", + "distutils.fcompiler.pathf95", + "distutils.fcompiler.pg", + "distutils.fcompiler.nv", + "distutils.fcompiler.sun", + "distutils.fcompiler.vast", + "distutils.fcompiler.fujitsu", + "distutils.from_template", + "distutils.intelccompiler", + "distutils.lib2def", + "distutils.line_endings", + "distutils.mingw32ccompiler", + "distutils.msvccompiler", + "distutils.npy_pkg_config", + "distutils.numpy_distribution", + "distutils.pathccompiler", + "distutils.unixccompiler", + ] + ] def is_unexpected(name): @@ -323,10 +333,14 @@ def is_unexpected(name): "numpy.core.code_generators.verify_c_api_version", "numpy.core.cversions", "numpy.core.generate_numpy_api", - "numpy.distutils.msvc9compiler", + "numpy.core.umath_tests", ] +if sys.version_info < (3, 12): + SKIP_LIST += ["numpy.distutils.msvc9compiler"] +# suppressing warnings from deprecated modules +@pytest.mark.filterwarnings("ignore:.*np.compat.*:DeprecationWarning") def test_all_modules_are_expected(): """ Test that we don't add anything that looks like a new public module by @@ -351,9 +365,6 @@ def test_all_modules_are_expected(): # below SKIP_LIST_2 = [ 'numpy.math', - 'numpy.distutils.log.sys', - 'numpy.distutils.log.logging', - 'numpy.distutils.log.warnings', 'numpy.doc.constants.re', 'numpy.doc.constants.textwrap', 'numpy.lib.emath', @@ -369,6 +380,12 @@ def test_all_modules_are_expected(): 'numpy.matlib.ctypeslib', 'numpy.matlib.ma', ] +if sys.version_info < (3, 12): + SKIP_LIST_2 += [ + 'numpy.distutils.log.sys', + 'numpy.distutils.log.logging', + 'numpy.distutils.log.warnings', + ] def test_all_modules_are_expected_2(): @@ -472,11 +489,7 @@ def check_importable(module_name): @pytest.mark.xfail( - hasattr(np.__config__, "_built_with_meson"), - reason = "Meson does not yet support entry points via pyproject.toml", -) -@pytest.mark.xfail( - sysconfig.get_config_var("Py_DEBUG") is not None, + sysconfig.get_config_var("Py_DEBUG") not in (None, 0, "0"), reason=( "NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, " "which does not expose the `array_api` entry point. " @@ -488,6 +501,11 @@ def test_array_api_entry_point(): Entry point for Array API implementation can be found with importlib and returns the numpy.array_api namespace. """ + # For a development install that did not go through meson-python, + # the entrypoint will not have been installed. So ensure this test fails + # only if numpy is inside site-packages. + numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ + eps = importlib.metadata.entry_points() try: xp_eps = eps.select(group="array_api") @@ -497,12 +515,19 @@ def test_array_api_entry_point(): # Array API entry points so that running this test in <=3.9 will # still work - see https://github.com/numpy/numpy/pull/19800. xp_eps = eps.get("array_api", []) - assert len(xp_eps) > 0, "No entry points for 'array_api' found" + if len(xp_eps) == 0: + if numpy_in_sitepackages: + msg = "No entry points for 'array_api' found" + raise AssertionError(msg) from None + return try: ep = next(ep for ep in xp_eps if ep.name == "numpy") except StopIteration: - raise AssertionError("'numpy' not in array_api entry points") from None + if numpy_in_sitepackages: + msg = "'numpy' not in array_api entry points" + raise AssertionError(msg) from None + return xp = ep.load() msg = ( diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index a898b3e285b9..2ca2c9b21f94 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -1,4 +1,5 @@ import os +import sys from pathlib import Path import numpy as np @@ -10,7 +11,6 @@ ROOT / "__init__.pyi", ROOT / "ctypeslib.pyi", ROOT / "core" / "__init__.pyi", - ROOT / "distutils" / "__init__.pyi", ROOT / "f2py" / "__init__.pyi", ROOT / "fft" / "__init__.pyi", ROOT / "lib" / "__init__.pyi", @@ -21,6 +21,8 @@ ROOT / "random" / "__init__.pyi", ROOT / "testing" / "__init__.pyi", ] +if sys.version_info < (3, 12): + FILES += [ROOT / "distutils" / "__init__.pyi"] class TestIsFile: From 1974561c6abd5982700b0afb48e963744ccd6887 Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 1 Aug 2023 12:01:06 +0300 Subject: [PATCH 010/120] BLD: update openblas to newer version --- tools/openblas_support.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/openblas_support.py b/tools/openblas_support.py index d99a45b0bd0f..5fe0d662cdcd 100644 --- a/tools/openblas_support.py +++ b/tools/openblas_support.py @@ -14,7 +14,7 @@ from urllib.error import HTTPError OPENBLAS_V = '0.3.23.dev' -OPENBLAS_LONG = 'v0.3.23-246-g3d31191b' +OPENBLAS_LONG = 'v0.3.23-293-gc2f4bdbb' BASE_LOC = ( 'https://anaconda.org/scientific-python-nightly-wheels/openblas-libs' ) From 7de6fd0b72e68c223ca9c937c82631f296d12d5f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 27 Jul 2023 15:04:08 +0200 Subject: [PATCH 011/120] TYP: Trim down the `_NestedSequence.__getitem__` signature Remove the `slice`-based overload such that it successfully supertypes `deque.__getitem__` --- numpy/_typing/_nested_sequence.py | 8 +------- numpy/typing/tests/data/reveal/array_constructors.pyi | 2 ++ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index 4b6cafc51009..3d0d25ae5b48 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -5,7 +5,6 @@ from collections.abc import Iterator from typing import ( Any, - overload, TypeVar, Protocol, runtime_checkable, @@ -62,12 +61,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - @overload - def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: ... - @overload - def __getitem__(self, index: slice, /) -> _NestedSequence[_T_co]: ... - - def __getitem__(self, index, /): + def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: """Implement ``self[x]``.""" raise NotImplementedError diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 2ff20e9aeeca..759d521c8d2a 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,5 +1,6 @@ from typing import Any, TypeVar from pathlib import Path +from collections import deque import numpy as np import numpy.typing as npt @@ -26,6 +27,7 @@ reveal_type(np.array(A)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.array(B)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.array(B, subok=True)) # E: SubClass[{float64}] reveal_type(np.array([1, 1.0])) # E: ndarray[Any, dtype[Any]] +reveal_type(np.array(deque([1, 2, 3]))) # E: ndarray[Any, dtype[Any]] reveal_type(np.array(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] reveal_type(np.array(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] reveal_type(np.array(A, like=A)) # E: ndarray[Any, dtype[{float64}]] From ec9c0252db71dce8e738d0e2209c5b72156724e8 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Sat, 15 Jul 2023 10:45:20 -0500 Subject: [PATCH 012/120] BUG: fix choose refcount leak * Fixes #22683 * use `copyswap` to avoid the reference count leaking reported above when `np.choose` is used with `out` * my impression from the ticket is that Sebastian doesn't think `copyswap` is a perfect solution, but may suffice short-term? --- numpy/core/src/multiarray/item_selection.c | 8 +++++--- numpy/core/tests/test_multiarray.py | 10 ++++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index f42ae7c2d0d8..aec68418fa4f 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -962,7 +962,8 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, { PyArrayObject *obj = NULL; PyArray_Descr *dtype; - int n, elsize; + PyArray_CopySwapFunc *copyswap; + int n, elsize, swap; npy_intp i; char *ret_data; PyArrayObject **mps, *ap; @@ -1042,6 +1043,8 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } elsize = PyArray_DESCR(obj)->elsize; ret_data = PyArray_DATA(obj); + copyswap = dtype->f->copyswap; + swap = !PyArray_ISNBO(dtype->byteorder); while (PyArray_MultiIter_NOTDONE(multi)) { mi = *((npy_intp *)PyArray_MultiIter_DATA(multi, n)); @@ -1074,12 +1077,11 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, break; } } - memmove(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); + copyswap(ret_data, PyArray_MultiIter_DATA(multi, mi), swap, NULL); ret_data += elsize; PyArray_MultiIter_NEXT(multi); } - PyArray_INCREF(obj); Py_DECREF(multi); for (i = 0; i < n; i++) { Py_XDECREF(mps[i]); diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 514d271f0f6b..f4c472dd7282 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -10031,3 +10031,13 @@ def test_argsort_int(N, dtype): arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype) arr[N-1] = maxv assert_arg_sorted(arr, np.argsort(arr, kind='quick')) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_gh_22683(): + a = np.ones(10000, dtype=object) + refc_start = sys.getrefcount(1) + np.choose(np.zeros(10000, dtype=int), [a], out=a) + np.choose(np.zeros(10000, dtype=int), [a], out=a) + refc_end = sys.getrefcount(1) + assert refc_end - refc_start < 10 From 6e41cd6ccbfdd6a8518ab98738feedaf799bf9cd Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Tue, 18 Jul 2023 14:47:00 -0600 Subject: [PATCH 013/120] BUG: PR 24188 revisions * remove copyswap, carefully controlling the reference counting to pass the testsuite --- numpy/core/src/multiarray/item_selection.c | 30 ++++++++++++++++++---- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index aec68418fa4f..79a647421f1d 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -962,8 +962,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, { PyArrayObject *obj = NULL; PyArray_Descr *dtype; - PyArray_CopySwapFunc *copyswap; - int n, elsize, swap; + int n, elsize; npy_intp i; char *ret_data; PyArrayObject **mps, *ap; @@ -1043,8 +1042,6 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } elsize = PyArray_DESCR(obj)->elsize; ret_data = PyArray_DATA(obj); - copyswap = dtype->f->copyswap; - swap = !PyArray_ISNBO(dtype->byteorder); while (PyArray_MultiIter_NOTDONE(multi)) { mi = *((npy_intp *)PyArray_MultiIter_DATA(multi, n)); @@ -1077,11 +1074,34 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, break; } } - copyswap(ret_data, PyArray_MultiIter_DATA(multi, mi), swap, NULL); + if (out != NULL) { + char *args[2] = {PyArray_MultiIter_DATA(multi, mi), ret_data}; + npy_intp transfer_strides[2] = {elsize, elsize}; + npy_intp one = 1; + NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; + NPY_cast_info cast_info = {.func = NULL}; + PyArrayIterObject *ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); + int is_aligned = IsUintAligned(ind_it->ao); + PyArray_GetDTypeTransferFunction( + is_aligned, + PyArray_DESCR(mps[0])->elsize, + PyArray_DESCR(obj)->elsize, + PyArray_DESCR(mps[0]), + PyArray_DESCR(obj), 0, &cast_info, + &transfer_flags); + cast_info.func(&cast_info.context, args, &one, + transfer_strides, cast_info.auxdata); + } + else { + memmove(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); + } ret_data += elsize; PyArray_MultiIter_NEXT(multi); } + if (out == NULL) { + PyArray_INCREF(obj); + } Py_DECREF(multi); for (i = 0; i < n; i++) { Py_XDECREF(mps[i]); From fbf8dbca92e4e0afbfec39d45f7f077281ad0220 Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Tue, 25 Jul 2023 16:44:20 -0600 Subject: [PATCH 014/120] MAINT: PR 24188 revisions * hoist the special `out` handling code out of the inner loop (to the degree the testsuite allowed me to) * add a missing `NPY_cast_info_xfree` * adjust the regression test such that it fails before/passes after on both Python 3.11 and 3.12 beta 4, to deal with PEP 683 --- numpy/core/src/multiarray/item_selection.c | 31 +++++++++++++--------- numpy/core/tests/test_multiarray.py | 7 ++--- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 79a647421f1d..7b2f38a1a19c 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1042,6 +1042,21 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } elsize = PyArray_DESCR(obj)->elsize; ret_data = PyArray_DATA(obj); + npy_intp transfer_strides[2] = {elsize, elsize}; + npy_intp one = 1; + NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; + NPY_cast_info cast_info = {.func = NULL}; + if (out != NULL) { + PyArrayIterObject *ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); + int is_aligned = IsUintAligned(ind_it->ao); + PyArray_GetDTypeTransferFunction( + is_aligned, + PyArray_DESCR(mps[0])->elsize, + PyArray_DESCR(obj)->elsize, + PyArray_DESCR(mps[0]), + PyArray_DESCR(obj), 0, &cast_info, + &transfer_flags); + } while (PyArray_MultiIter_NOTDONE(multi)) { mi = *((npy_intp *)PyArray_MultiIter_DATA(multi, n)); @@ -1076,19 +1091,6 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } if (out != NULL) { char *args[2] = {PyArray_MultiIter_DATA(multi, mi), ret_data}; - npy_intp transfer_strides[2] = {elsize, elsize}; - npy_intp one = 1; - NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; - NPY_cast_info cast_info = {.func = NULL}; - PyArrayIterObject *ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); - int is_aligned = IsUintAligned(ind_it->ao); - PyArray_GetDTypeTransferFunction( - is_aligned, - PyArray_DESCR(mps[0])->elsize, - PyArray_DESCR(obj)->elsize, - PyArray_DESCR(mps[0]), - PyArray_DESCR(obj), 0, &cast_info, - &transfer_flags); cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata); } @@ -1102,6 +1104,9 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, if (out == NULL) { PyArray_INCREF(obj); } + else { + NPY_cast_info_xfree(&cast_info); + } Py_DECREF(multi); for (i = 0; i < n; i++) { Py_XDECREF(mps[i]); diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index f4c472dd7282..869ebe4d8ac7 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -10035,9 +10035,10 @@ def test_argsort_int(N, dtype): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_gh_22683(): - a = np.ones(10000, dtype=object) - refc_start = sys.getrefcount(1) + b = 777.68760986 + a = np.array([b] * 10000, dtype=object) + refc_start = sys.getrefcount(b) np.choose(np.zeros(10000, dtype=int), [a], out=a) np.choose(np.zeros(10000, dtype=int), [a], out=a) - refc_end = sys.getrefcount(1) + refc_end = sys.getrefcount(b) assert refc_end - refc_start < 10 From e4b880e494ee0a0125ed01ed61b5b2e24802fa48 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 31 Jul 2023 11:58:43 +0200 Subject: [PATCH 015/120] MAINT: Use explicit copy path in choose based on refcheck --- numpy/core/src/multiarray/item_selection.c | 26 ++++++++++------------ 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 7b2f38a1a19c..29175c4844fa 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1046,15 +1046,15 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, npy_intp one = 1; NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; NPY_cast_info cast_info = {.func = NULL}; - if (out != NULL) { + if (PyDataType_REFCHK(dtype)) { PyArrayIterObject *ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); int is_aligned = IsUintAligned(ind_it->ao); PyArray_GetDTypeTransferFunction( is_aligned, - PyArray_DESCR(mps[0])->elsize, - PyArray_DESCR(obj)->elsize, - PyArray_DESCR(mps[0]), - PyArray_DESCR(obj), 0, &cast_info, + dtype->elsize, + dtype->elsize, + dtype, + dtype, 0, &cast_info, &transfer_flags); } @@ -1089,10 +1089,12 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, break; } } - if (out != NULL) { + if (cast_info.func != NULL) { char *args[2] = {PyArray_MultiIter_DATA(multi, mi), ret_data}; - cast_info.func(&cast_info.context, args, &one, - transfer_strides, cast_info.auxdata); + if (cast_info.func(&cast_info.context, args, &one, + transfer_strides, cast_info.auxdata) < 0) { + goto fail; + } } else { memmove(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); @@ -1101,12 +1103,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, PyArray_MultiIter_NEXT(multi); } - if (out == NULL) { - PyArray_INCREF(obj); - } - else { - NPY_cast_info_xfree(&cast_info); - } + NPY_cast_info_xfree(&cast_info); Py_DECREF(multi); for (i = 0; i < n; i++) { Py_XDECREF(mps[i]); @@ -1122,6 +1119,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, return (PyObject *)obj; fail: + NPY_cast_info_xfree(&cast_info); Py_XDECREF(multi); for (i = 0; i < n; i++) { Py_XDECREF(mps[i]); From 1c22bf76e6926d2ce84fba4a3241f6de5d64b76e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 31 Jul 2023 12:16:43 +0200 Subject: [PATCH 016/120] BUG: Remove unnecessary (and now also segfaulting) iterator creation Also hoist the `dtype` definition up and use it. --- numpy/core/src/multiarray/item_selection.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 29175c4844fa..e3cf1e471109 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -993,9 +993,10 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, if (multi == NULL) { goto fail; } + dtype = PyArray_DESCR(mps[0]); + /* Set-up return array */ if (out == NULL) { - dtype = PyArray_DESCR(mps[0]); Py_INCREF(dtype); obj = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(ap), dtype, @@ -1032,7 +1033,6 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, */ flags |= NPY_ARRAY_ENSURECOPY; } - dtype = PyArray_DESCR(mps[0]); Py_INCREF(dtype); obj = (PyArrayObject *)PyArray_FromArray(out, dtype, flags); } @@ -1040,15 +1040,14 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, if (obj == NULL) { goto fail; } - elsize = PyArray_DESCR(obj)->elsize; + elsize = dtype->elsize; ret_data = PyArray_DATA(obj); npy_intp transfer_strides[2] = {elsize, elsize}; npy_intp one = 1; NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; NPY_cast_info cast_info = {.func = NULL}; if (PyDataType_REFCHK(dtype)) { - PyArrayIterObject *ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); - int is_aligned = IsUintAligned(ind_it->ao); + int is_aligned = IsUintAligned(obj); PyArray_GetDTypeTransferFunction( is_aligned, dtype->elsize, From 87a93ef9a19bc3e8fd7d9c9150799895d18c99a3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 31 Jul 2023 12:17:53 +0200 Subject: [PATCH 017/120] ENH: Use `memcpy` and swap order Not sure this makes a difference, but we check for memory overlap so `memmov` isn't necessary and if the compiler keeps the order intact, we want the `memcpy` path to be the hot one. --- numpy/core/src/multiarray/item_selection.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index e3cf1e471109..e935a27edb6c 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1088,16 +1088,17 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, break; } } - if (cast_info.func != NULL) { + if (cast_info.func == NULL) { + /* We ensure memory doesn't overlap, so can use memcpy */ + memcpy(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); + } + else { char *args[2] = {PyArray_MultiIter_DATA(multi, mi), ret_data}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { goto fail; } } - else { - memmove(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); - } ret_data += elsize; PyArray_MultiIter_NEXT(multi); } From b2489f249c392edc5055ee289d38b8ab22fe18d8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 23 Jul 2023 15:11:57 +0200 Subject: [PATCH 018/120] TST: fix running the test suite in builds without BLAS/LAPACK This was broken with `undefined symbol: dlapy3_` because the test suite imports `linalg.lapack_lite` directly. See gh-24200 for more details. --- numpy/linalg/tests/test_linalg.py | 7 +++++++ numpy/linalg/umath_linalg.cpp | 6 ++++++ numpy/testing/_private/utils.py | 4 ++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 17ee400422ab..5dabdfdf010a 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -21,6 +21,13 @@ assert_almost_equal, assert_allclose, suppress_warnings, assert_raises_regex, HAS_LAPACK64, IS_WASM ) +try: + import numpy.linalg.lapack_lite +except ImportError: + # May be broken when numpy was built without BLAS/LAPACK present + # If so, ensure we don't break the whole test suite - the `lapack_lite` + # submodule should be removed, it's only used in two tests in this file. + pass def consistent_subclass(out, in_): diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 68db2b2f1761..b0857ab8ba1d 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -4597,5 +4597,11 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) return NULL; } +#ifdef HAVE_BLAS_ILP64 + PyDict_SetItemString(d, "_ilp64", Py_True); +#else + PyDict_SetItemString(d, "_ilp64", Py_False); +#endif + return m; } diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 3c3d3412a2e7..28dd656c4a4d 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -22,7 +22,7 @@ from numpy.core import ( intp, float32, empty, arange, array_repr, ndarray, isnat, array) from numpy import isfinite, isnan, isinf -import numpy.linalg.lapack_lite +import numpy.linalg._umath_linalg from io import StringIO @@ -54,7 +54,7 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON -HAS_LAPACK64 = numpy.linalg.lapack_lite._ilp64 +HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 _OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy' From 341dd2e371173558e4185ab62d4c442185e24583 Mon Sep 17 00:00:00 2001 From: warren Date: Wed, 19 Jul 2023 14:39:33 -0400 Subject: [PATCH 019/120] BUG: random: Fix generation of nan by dirichlet. Don't call the C function random_beta() with both parameters `a` and `b` set to 0. In the case where this would occur, we know that the remaining values in the random vector being generated must be 0, so can break out of the loop early. After this change, when alpha is all zero, the random variates will also be all zero. Closes gh-24210. --- numpy/random/_generator.pyx | 29 ++++++++++++-------- numpy/random/tests/test_generator_mt19937.py | 24 ++++++++++++++-- numpy/random/tests/test_randomstate.py | 5 +--- 3 files changed, 41 insertions(+), 17 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ee68810dd798..9bd09a2bca33 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -4408,6 +4408,7 @@ cdef class Generator: np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) if np.any(np.less(alpha_arr, 0)): raise ValueError('alpha < 0') + alpha_data = np.PyArray_DATA(alpha_arr) if size is None: @@ -4467,17 +4468,23 @@ cdef class Generator: csum += alpha_data[j] alpha_csum_data[j] = csum - with self.lock, nogil: - while i < totsize: - acc = 1. - for j in range(k - 1): - v = random_beta(&self._bitgen, alpha_data[j], - alpha_csum_data[j + 1]) - val_data[i + j] = acc * v - acc *= (1. - v) - val_data[i + k - 1] = acc - i = i + k - + # If csum == 0, then all the values in alpha are 0, and there is + # nothing to do, because diric was created with np.zeros(). + if csum > 0: + with self.lock, nogil: + while i < totsize: + acc = 1. + for j in range(k - 1): + v = random_beta(&self._bitgen, alpha_data[j], + alpha_csum_data[j + 1]) + val_data[i + j] = acc * v + acc *= (1. - v) + if alpha_csum_data[j + 1] == 0: + # v must be 1, so acc is now 0. All + # remaining elements will be left at 0. + break + val_data[i + k - 1] = acc + i = i + k else: # Standard case: Unit normalisation of a vector of gamma random # variates diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 1c57b3fa5d8d..e744f5ba611b 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -35,6 +35,7 @@ }, ] + @pytest.fixture(scope='module', params=[True, False]) def endpoint(request): return request.param @@ -145,6 +146,7 @@ def test_multinomial_pvals_float32(self): with pytest.raises(ValueError, match=match): random.multinomial(1, pvals) + class TestMultivariateHypergeometric: def setup_method(self): @@ -1238,6 +1240,25 @@ def test_dirichlet_moderately_small_alpha(self): sample_mean = sample.mean(axis=0) assert_allclose(sample_mean, exact_mean, rtol=1e-3) + # This set of parameters includes inputs with alpha.max() >= 0.1 and + # alpha.max() < 0.1 to exercise both generation methods within the + # dirichlet code. + @pytest.mark.parametrize( + 'alpha', + [[5, 9, 0, 8], + [0.5, 0, 0, 0], + [1, 5, 0, 0, 1.5, 0, 0, 0], + [0.01, 0.03, 0, 0.005], + [1e-5, 0, 0, 0], + [0.002, 0.015, 0, 0, 0.04, 0, 0, 0], + [0.0], + [0, 0, 0]], + ) + def test_dirichlet_multiple_zeros_in_alpha(self, alpha): + alpha = np.array(alpha) + y = random.dirichlet(alpha) + assert_equal(y[alpha == 0], 0.0) + def test_exponential(self): random = Generator(MT19937(self.seed)) actual = random.exponential(1.1234, size=(3, 2)) @@ -1467,7 +1488,7 @@ def test_multivariate_normal(self, method): mu, np.empty((3, 2))) assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3)) - + @pytest.mark.parametrize('mean, cov', [([0], [[1+1j]]), ([0j], [[1]])]) def test_multivariate_normal_disallow_complex(self, mean, cov): random = Generator(MT19937(self.seed)) @@ -1847,7 +1868,6 @@ class TestBroadcast: def setup_method(self): self.seed = 123456789 - def test_uniform(self): random = Generator(MT19937(self.seed)) low = [0] diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 3099853d2a8e..524ac7b7c5e0 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -812,10 +812,6 @@ def test_dirichlet_bad_alpha(self): alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, alpha) - def test_dirichlet_zero_alpha(self): - y = random.default_rng().dirichlet([5, 9, 0, 8]) - assert_equal(y[2], 0) - def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] @@ -2061,6 +2057,7 @@ def test_randomstate_ctor_old_style_pickle(): assert_equal(state_a['has_gauss'], state_b['has_gauss']) assert_equal(state_a['gauss'], state_b['gauss']) + def test_hot_swap(restore_singleton_bitgen): # GH 21808 def_bg = np.random.default_rng(0) From 38f00b45104b1d2a86ff14117bba1fc8969e8c2f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 4 Aug 2023 12:02:25 -0600 Subject: [PATCH 020/120] MAINT: Dependabot updates from main --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/dependency-review.yml | 2 +- .github/workflows/linux_meson.yml | 1 - .github/workflows/scorecards.yml | 4 ++-- .github/workflows/windows_clangcl.yml | 2 +- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 20f95a069558..3b34b3a87c99 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@29b1f65c5e92e24fe6b6647da1eaabe529cec70f # v2.3.3 + uses: github/codeql-action/init@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@29b1f65c5e92e24fe6b6647da1eaabe529cec70f # v2.3.3 + uses: github/codeql-action/autobuild@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@29b1f65c5e92e24fe6b6647da1eaabe529cec70f # v2.3.3 + uses: github/codeql-action/analyze@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 03f1343f6570..1f59fba0b1ff 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: 'Dependency Review' - uses: actions/dependency-review-action@f46c48ed6d4f1227fb2d9ea62bf6bcbed315589e # v3.0.4 + uses: actions/dependency-review-action@1360a344ccb0ab6e9475edef90ad2f46bf8003b1 # v3.0.6 diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml index b489c9e3f12f..8ef0e5752119 100644 --- a/.github/workflows/linux_meson.yml +++ b/.github/workflows/linux_meson.yml @@ -68,6 +68,5 @@ jobs: TERM: xterm-256color LD_LIBRARY_PATH: "/usr/local/lib/" # to find libopenblas.so.0 run: | - export NPY_RUN_MYPY_IN_TESTSUITE=1 pip install pytest pytest-xdist hypothesis typing_extensions spin test -j auto diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f7cc4f2de12d..f4cf135a426e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@80e868c13c90f172d68d1f4501dee99e2479f7af # v2.1.3 + uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # v2.2.0 with: results_file: results.sarif results_format: sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@29b1f65c5e92e24fe6b6647da1eaabe529cec70f # v2.1.27 + uses: github/codeql-action/upload-sarif@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/windows_clangcl.yml b/.github/workflows/windows_clangcl.yml index e3ee856fd001..2b270d99eac0 100644 --- a/.github/workflows/windows_clangcl.yml +++ b/.github/workflows/windows_clangcl.yml @@ -28,7 +28,7 @@ jobs: submodules: recursive fetch-depth: 0 - name: Setup Python - uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # v4.6.1 + uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} From 6668f3a35efd52833f2a3c93986223eec2a60a3c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 4 Aug 2023 19:47:54 -0600 Subject: [PATCH 021/120] MAINT: Add back NPY_RUN_MYPY_IN_TESTSUITE=1 --- .github/workflows/linux_meson.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml index 8ef0e5752119..b489c9e3f12f 100644 --- a/.github/workflows/linux_meson.yml +++ b/.github/workflows/linux_meson.yml @@ -68,5 +68,6 @@ jobs: TERM: xterm-256color LD_LIBRARY_PATH: "/usr/local/lib/" # to find libopenblas.so.0 run: | + export NPY_RUN_MYPY_IN_TESTSUITE=1 pip install pytest pytest-xdist hypothesis typing_extensions spin test -j auto From 10177a71141a1e88d8e04b9e8fe04d6731dc7455 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 6 Aug 2023 18:29:35 -0600 Subject: [PATCH 022/120] MAINT: Update ``extbuild.py`` from main. We do not want to use distutils here. In order to avoid DeprecationWarnings in recent Python, we need to use a recent setuptools, and that breaks distutils. --- numpy/testing/_private/extbuild.py | 81 ++++++++++++++---------------- 1 file changed, 39 insertions(+), 42 deletions(-) diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index 9b4e95366d8d..541f551151f5 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -6,8 +6,10 @@ import os import pathlib +import subprocess import sys import sysconfig +import textwrap __all__ = ['build_and_import_extension', 'compile_extension_module'] @@ -51,8 +53,6 @@ def build_and_import_extension( >>> assert not mod.test_bytes(u'abc') >>> assert mod.test_bytes(b'abc') """ - from distutils.errors import CompileError - body = prologue + _make_methods(functions, modname) init = """PyObject *mod = PyModule_Create(&moduledef); """ @@ -67,7 +67,7 @@ def build_and_import_extension( try: mod_so = compile_extension_module( modname, build_dir, include_dirs, source_string) - except CompileError as e: + except Exception as e: # shorten the exception chain raise RuntimeError(f"could not compile in {build_dir}:") from e import importlib.util @@ -186,9 +186,9 @@ def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[], elif sys.platform.startswith('linux'): compile_extra = [ "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] - link_extra = None + link_extra = [] else: - compile_extra = link_extra = None + compile_extra = link_extra = [] pass if sys.platform == 'win32': link_extra = link_extra + ['/DEBUG'] # generate .pdb file @@ -202,49 +202,46 @@ def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[], library_dirs.append(s + 'lib') outputfilename = outputfilename.with_suffix(get_so_suffix()) - saved_environ = os.environ.copy() - try: - build( - cfile, outputfilename, - compile_extra, link_extra, - include_dirs, libraries, library_dirs) - finally: - # workaround for a distutils bugs where some env vars can - # become longer and longer every time it is used - for key, value in saved_environ.items(): - if os.environ.get(key) != value: - os.environ[key] = value + build( + cfile, outputfilename, + compile_extra, link_extra, + include_dirs, libraries, library_dirs) return outputfilename def build(cfile, outputfilename, compile_extra, link_extra, include_dirs, libraries, library_dirs): - "cd into the directory where the cfile is, use distutils to build" - from numpy.distutils.ccompiler import new_compiler - - compiler = new_compiler(force=1, verbose=2) - compiler.customize('') - objects = [] - - old = os.getcwd() - os.chdir(cfile.parent) - try: - res = compiler.compile( - [str(cfile.name)], - include_dirs=include_dirs, - extra_preargs=compile_extra + "use meson to build" + + build_dir = cfile.parent / "build" + os.makedirs(build_dir, exist_ok=True) + so_name = outputfilename.parts[-1] + with open(cfile.parent / "meson.build", "wt") as fid: + includes = ['-I' + d for d in include_dirs] + link_dirs = ['-L' + d for d in library_dirs] + fid.write(textwrap.dedent(f"""\ + project('foo', 'c') + shared_module('{so_name}', '{cfile.parts[-1]}', + c_args: {includes} + {compile_extra}, + link_args: {link_dirs} + {link_extra}, + link_with: {libraries}, + name_prefix: '', + name_suffix: 'dummy', ) - objects += [str(cfile.parent / r) for r in res] - finally: - os.chdir(old) - - compiler.link_shared_object( - objects, str(outputfilename), - libraries=libraries, - extra_preargs=link_extra, - library_dirs=library_dirs) - - + """)) + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", ".."], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--vsenv", ".."], + cwd=build_dir + ) + subprocess.check_call(["meson", "compile"], cwd=build_dir) + os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name) + def get_so_suffix(): ret = sysconfig.get_config_var('EXT_SUFFIX') assert ret From 89b62c58cc79e4fc1a3e018915ffc692c1d926b6 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 7 Aug 2023 11:58:16 +0200 Subject: [PATCH 023/120] TST: fix distutils tests for deprecations in recent setuptools versions Closes gh-24350 [skip circle] [skip travis] --- numpy/distutils/tests/test_system_info.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py index 66304a5e50fe..951ab8c1efca 100644 --- a/numpy/distutils/tests/test_system_info.py +++ b/numpy/distutils/tests/test_system_info.py @@ -3,6 +3,7 @@ import pytest from tempfile import mkstemp, mkdtemp from subprocess import Popen, PIPE +import importlib.metadata from distutils.errors import DistutilsError from numpy.testing import assert_, assert_equal, assert_raises @@ -13,6 +14,16 @@ from numpy.distutils import _shell_utils +try: + if importlib.metadata.version('setuptools') >= '60': + # pkg-resources gives deprecation warnings, and there may be more issues. + # we only support setuptools <60 + pytest.skip("setuptools is too new", allow_module_level=True) +except importlib.metadata.PackageNotFoundError: + # we don't require `setuptools`; if it is not found, continue + pass + + def get_class(name, notfound_action=1): """ notfound_action: From 8c84d42eb1e59569d057bdc723e285c6a387bd45 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 7 Aug 2023 08:32:33 -0600 Subject: [PATCH 024/120] MAINT: Fix long line --- numpy/distutils/tests/test_system_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py index 951ab8c1efca..9bcc09050503 100644 --- a/numpy/distutils/tests/test_system_info.py +++ b/numpy/distutils/tests/test_system_info.py @@ -16,8 +16,8 @@ try: if importlib.metadata.version('setuptools') >= '60': - # pkg-resources gives deprecation warnings, and there may be more issues. - # we only support setuptools <60 + # pkg-resources gives deprecation warnings, and there may be more + # issues. We only support setuptools <60 pytest.skip("setuptools is too new", allow_module_level=True) except importlib.metadata.PackageNotFoundError: # we don't require `setuptools`; if it is not found, continue From 59d009a188be498a029920fa3aa55030a5bd1a56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Aug 2023 15:04:26 +0000 Subject: [PATCH 025/120] MAINT: Bump pypa/cibuildwheel from 2.14.1 to 2.15.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.14.1 to 2.15.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/f21bb8376a051ffb6cb5604b28ccaef7b90e8ab7...39a63b5912f086dd459cf6fcb13dcdd3fe3bc24d) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 506a6d59f3eb..53c280afbff2 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -111,7 +111,7 @@ jobs: python-version: "3.x" - name: Build wheels - uses: pypa/cibuildwheel@f21bb8376a051ffb6cb5604b28ccaef7b90e8ab7 # v2.14.1 + uses: pypa/cibuildwheel@39a63b5912f086dd459cf6fcb13dcdd3fe3bc24d # v2.15.0 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From ef6dc9b1fb4d32b40f846329f1bbf291683fb569 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 9 Aug 2023 09:26:24 -0600 Subject: [PATCH 026/120] MAINT: Update cibuildwheel for cirrus builds Python 3.12.0rc1 is supported in the new cibuildwheel. --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 068fc0486d85..8cb566bc5a33 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -1,6 +1,6 @@ build_and_store_wheels: &BUILD_AND_STORE_WHEELS install_cibuildwheel_script: - - python -m pip install cibuildwheel==2.14.0 + - python -m pip install cibuildwheel==2.15.0 cibuildwheel_script: - cibuildwheel wheels_artifacts: From af2190aa4de20e652bdaa348053b7fbac9f4d8d5 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 10 Aug 2023 05:19:48 -0600 Subject: [PATCH 027/120] MAINT: Fix codespaces setup.sh script (#24381) A change in how codespaces is configured upstream causes the installation script for micromamba to wait for input unless stdin is explicitly made empty, which causes codespaces creation to fail. [skip ci] Co-authored-by: melissawm --- .devcontainer/setup.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh index 4ea718ec927f..1f3005d30d2f 100755 --- a/.devcontainer/setup.sh +++ b/.devcontainer/setup.sh @@ -2,7 +2,7 @@ set -e -curl micro.mamba.pm/install.sh | bash +"${SHELL}" <(curl -Ls micro.mamba.pm/install.sh) < /dev/null conda init --all micromamba shell init -s bash @@ -11,3 +11,7 @@ micromamba env create -f environment.yml --yes # user (same applies to `conda activate`) git submodule update --init + +# Enables users to activate environment without having to specify the full path +echo "envs_dirs: + - /home/codespace/micromamba/envs" > /opt/conda/.condarc From d9c96d800978bb47664e98f80785ecd2b0a3dba0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 9 Aug 2023 14:26:49 +0200 Subject: [PATCH 028/120] VENDOR: vendor our friendly fork of Meson We need this in order to add the "feature" module for SIMD support, which is at https://github.com/mesonbuild/meson/pull/11307. --- .gitmodules | 3 +++ LICENSES_bundled.txt | 9 +++++++++ vendored-meson/meson | 1 + 3 files changed, 13 insertions(+) create mode 160000 vendored-meson/meson diff --git a/.gitmodules b/.gitmodules index d849a3caf5c2..ce025743e7c1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,3 +7,6 @@ [submodule "numpy/core/src/npysort/x86-simd-sort"] path = numpy/core/src/npysort/x86-simd-sort url = https://github.com/intel/x86-simd-sort +[submodule "vendored-meson/meson"] + path = vendored-meson/meson + url = https://github.com/numpy/meson.git diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index 26c7a7829361..f2d9753b89de 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -20,3 +20,12 @@ Name: libdivide Files: numpy/core/include/numpy/libdivide/* License: Zlib For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt + + +Note that the following files are vendored in the repository and sdist but not +installed in built numpy packages: + +Name: Meson +Files: vendored-meson/meson/* +License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING diff --git a/vendored-meson/meson b/vendored-meson/meson new file mode 160000 index 000000000000..1f8351f16f9c --- /dev/null +++ b/vendored-meson/meson @@ -0,0 +1 @@ +Subproject commit 1f8351f16f9ce55965449b8e299c6d0fbca7f5df From 46798e43a48a0cb66f33a018df748703210a3aa4 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 9 Aug 2023 14:36:33 +0200 Subject: [PATCH 029/120] BLD: build with our numpy fork of meson --- .spin/cmds.py | 48 +++++++++++++++++++ pyproject.toml | 5 +- .../build-backend-wrapper/npbuild/__init__.py | 27 +++++++++++ vendored-meson/entrypoint/meson | 23 +++++++++ 4 files changed, 101 insertions(+), 2 deletions(-) create mode 100644 vendored-meson/build-backend-wrapper/npbuild/__init__.py create mode 100755 vendored-meson/entrypoint/meson diff --git a/.spin/cmds.py b/.spin/cmds.py index 05e619615e58..72e58c0f0233 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -11,6 +11,54 @@ from spin import util +# The numpy-vendored version of Meson. Put the directory that the executable +# `meson` is in at the front of the PATH. +curdir = pathlib.Path(__file__).parent.resolve() +meson_executable_dir = str(curdir.parent / 'vendored-meson' / 'entrypoint') +os.environ['PATH'] = meson_executable_dir + os.pathsep + os.environ['PATH'] + +# Check that the meson git submodule is present +meson_import_dir = curdir.parent / 'vendored-meson' / 'meson' / 'mesonbuild' +if not meson_import_dir.exists(): + raise RuntimeError( + 'The `vendored-meson/meson` git submodule does not exist! ' + + 'Run `git submodule update --init` to fix this problem.' + ) + + +@click.command() +@click.option( + "-j", "--jobs", + help="Number of parallel tasks to launch", + type=int +) +@click.option( + "--clean", is_flag=True, + help="Clean build directory before build" +) +@click.option( + "-v", "--verbose", is_flag=True, + help="Print all build output, even installation" +) +@click.argument("meson_args", nargs=-1) +@click.pass_context +def build(ctx, meson_args, jobs=None, clean=False, verbose=False): + """🔧 Build package with Meson/ninja and install + + MESON_ARGS are passed through e.g.: + + spin build -- -Dpkg_config_path=/lib64/pkgconfig + + The package is installed to build-install + + By default builds for release, to be able to use a debugger set CFLAGS + appropriately. For example, for linux use + + CFLAGS="-O0 -g" spin build + """ + ctx.forward(meson.build) + + @click.command() @click.argument("sphinx_target", default="html") @click.option( diff --git a/pyproject.toml b/pyproject.toml index 68361be2a881..9704e3d5cd26 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,6 @@ [build-system] -build-backend = "mesonpy" +build-backend = "npbuild" +backend-path = ['./vendored-meson/build-backend-wrapper'] requires = [ "Cython>=0.29.34,<3.1", "meson-python>=0.13.1,<0.16.0", @@ -184,7 +185,7 @@ repair-wheel-command = "bash ./tools/wheels/repair_windows.sh {wheel} {dest_dir} package = 'numpy' [tool.spin.commands] -"Build" = ["spin.cmds.meson.build", ".spin/cmds.py:test"] +"Build" = [".spin/cmds.py:build", ".spin/cmds.py:test"] "Environments" = [ ".spin/cmds.py:run", ".spin/cmds.py:ipython", ".spin/cmds.py:python", ".spin/cmds.py:gdb" diff --git a/vendored-meson/build-backend-wrapper/npbuild/__init__.py b/vendored-meson/build-backend-wrapper/npbuild/__init__.py new file mode 100644 index 000000000000..6c0711b2bd06 --- /dev/null +++ b/vendored-meson/build-backend-wrapper/npbuild/__init__.py @@ -0,0 +1,27 @@ +import os +import sys +import pathlib + +from mesonpy import ( + build_sdist, + build_wheel, + build_editable, + get_requires_for_build_sdist, + get_requires_for_build_wheel, + get_requires_for_build_editable, +) + + +# The numpy-vendored version of Meson. Put the directory that the executable +# `meson` is in at the front of the PATH. +curdir = pathlib.Path(__file__).parent.resolve() +meson_executable_dir = str(curdir.parent.parent / 'entrypoint') +os.environ['PATH'] = meson_executable_dir + os.pathsep + os.environ['PATH'] + +# Check that the meson git submodule is present +meson_import_dir = curdir.parent.parent / 'meson' / 'mesonbuild' +if not meson_import_dir.exists(): + raise RuntimeError( + 'The `vendored-meson/meson` git submodule does not exist! ' + + 'Run `git submodule update --init` to fix this problem.' + ) diff --git a/vendored-meson/entrypoint/meson b/vendored-meson/entrypoint/meson new file mode 100755 index 000000000000..f440b6ec55c0 --- /dev/null +++ b/vendored-meson/entrypoint/meson @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import re +import sys +import pathlib + + +# The numpy-vendored version of Meson +meson_dir = str(pathlib.Path(__file__).resolve().parent.parent / 'meson') +sys.path.insert(0, meson_dir) + +from mesonbuild.mesonmain import main +import mesonbuild +if not 'vendored-meson' in mesonbuild.__path__[0]: + # Note: only the print statement will show most likely, not the exception. + # If this goes wrong, it first fails inside meson-python on the `meson + # --version` check. + print(f'picking up the wrong `meson`: {mesonbuild.__path__}') + raise RuntimeError('incorrect mesonbuild module, exiting') + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) From 5cf6548842f9a5bddabe34146bcc24bbe93f177e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 9 Aug 2023 21:39:18 +0200 Subject: [PATCH 030/120] DEV: disable auto-invoking `pip` on building docs with spin --- .spin/cmds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 72e58c0f0233..2e02baecebcb 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -80,7 +80,7 @@ def build(ctx, meson_args, jobs=None, clean=False, verbose=False): ) @click.option( "--install-deps/--no-install-deps", - default=True, + default=False, help="Install dependencies before building" ) @click.pass_context From f5560b143c3c1eb23f75e41897f9f86ea7980eb8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 9 Aug 2023 22:17:54 +0200 Subject: [PATCH 031/120] CI: fix sdist and musllinux jobs --- .github/workflows/linux_musl.yml | 1 + tools/travis-test.sh | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml index d7d9258ce255..593549485ba0 100644 --- a/.github/workflows/linux_musl.yml +++ b/.github/workflows/linux_musl.yml @@ -44,6 +44,7 @@ jobs: git checkout $GITHUB_BASE_REF git -c user.email="you@example.com" merge --no-commit my_ref_name fi + git submodule update --init ln -s /usr/local/bin/python3.10 /usr/local/bin/python diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 73d9acc7c5b6..637961ccc32b 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -16,7 +16,7 @@ fi source builds/venv/bin/activate -pip install --upgrade pip 'setuptools<49.2.0' +pip install --upgrade pip 'setuptools<49.2.0' build pip install -r build_requirements.txt @@ -223,7 +223,7 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then $PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)" # ensure some warnings are not issued export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result -Wno-error=undef" - $PYTHON setup.py sdist + $PYTHON -m build --sdist # Make another virtualenv to install into $PYTHON -m venv venv-for-wheel . venv-for-wheel/bin/activate From 2e13b8e653033d828da243e20e360fa28e55cd07 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 10 Aug 2023 21:48:57 +0200 Subject: [PATCH 032/120] BLD: use numpy's friendly fork of meson-python --- .gitmodules | 3 +++ LICENSES_bundled.txt | 5 +++++ pyproject.toml | 17 +++++++++++++---- vendored-meson/meson-python | 1 + 4 files changed, 22 insertions(+), 4 deletions(-) create mode 160000 vendored-meson/meson-python diff --git a/.gitmodules b/.gitmodules index ce025743e7c1..27ee5e6625eb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,3 +10,6 @@ [submodule "vendored-meson/meson"] path = vendored-meson/meson url = https://github.com/numpy/meson.git +[submodule "vendored-meson/meson-python"] + path = vendored-meson/meson-python + url = https://github.com/numpy/meson-python.git diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index f2d9753b89de..c1721b3ffb4a 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -29,3 +29,8 @@ Name: Meson Files: vendored-meson/meson/* License: Apache 2.0 For license text, see vendored-meson/meson/COPYING + +Name: meson-python +Files: vendored-meson/meson-python/* +License: MIT + For license text, see vendored-meson/meson-python/LICENSE diff --git a/pyproject.toml b/pyproject.toml index 9704e3d5cd26..2ad2085000c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,18 @@ [build-system] -build-backend = "npbuild" -backend-path = ['./vendored-meson/build-backend-wrapper'] +build-backend = "mesonpy" +backend-path = ['./vendored-meson/meson-python'] requires = [ - "Cython>=0.29.34,<3.1", - "meson-python>=0.13.1,<0.16.0", + "Cython>=3.0", + # All dependencies of the vendored meson-python (except for meson, because + # we've got that vendored too - that's the point of this exercise). + 'pyproject-metadata >= 0.7.1', + 'tomli >= 1.0.0; python_version < "3.11"', + 'setuptools >= 60.0; python_version >= "3.12"', + 'colorama; os_name == "nt"', + # Note that `ninja` and (on Linux) `patchelf` are added dynamically by + # meson-python if those tools are not already present on the system. No + # need to worry about those unless one does a non-isolated build - in that + # case they must already be installed on the system. ] [project] diff --git a/vendored-meson/meson-python b/vendored-meson/meson-python new file mode 160000 index 000000000000..206a31a96458 --- /dev/null +++ b/vendored-meson/meson-python @@ -0,0 +1 @@ +Subproject commit 206a31a96458af6cf5e29272b4ea1f6ea500b91b From ab24e6f0699ea02fcee3742e8128a4a53fe69d98 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 8 Aug 2023 23:15:27 +0200 Subject: [PATCH 033/120] DEV: vendor spin's meson.py, modified to use our vendored Meson Note that this is a bit messy. I tried to vendor spin's `meson.py` separately, but it's not possible to do so cleanly as far as I can tell. The import machinery is unhappy bouncing around between `bin/spin`, `.spin/cmds.py and `.spin/meson.py`. So it was either folding the `spin/cmds/meson.py` content all into cmds.py, or vendor all of the spin package. This seems to work. --- .spin/LICENSE | 29 +++ .spin/cmds.py | 451 +++++++++++++++++++++++++++++++++++++++++-- LICENSES_bundled.txt | 5 + tools/lint_diff.ini | 2 +- 4 files changed, 472 insertions(+), 15 deletions(-) create mode 100644 .spin/LICENSE diff --git a/.spin/LICENSE b/.spin/LICENSE new file mode 100644 index 000000000000..22ab7d811ffc --- /dev/null +++ b/.spin/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2021--2022, Scientific Python project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.spin/cmds.py b/.spin/cmds.py index 2e02baecebcb..8e9fe86368f7 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -5,11 +5,434 @@ import tempfile import pathlib import shutil +import json +import pathlib import click -from spin.cmds import meson from spin import util +_run = util.run + +# START of spin/cmds/meson.py +install_dir = "build-install" + +# The numpy-vendored version of Meson +meson_cli = [sys.executable, + str(pathlib.Path(__file__).parent.parent.resolve() / + 'vendored-meson' / 'meson' / 'meson.py') + ] + + +def _set_pythonpath(quiet=False): + site_packages = _get_site_packages() + env = os.environ + + if "PYTHONPATH" in env: + env["PYTHONPATH"] = f"{site_packages}{os.pathsep}{env['PYTHONPATH']}" + else: + env["PYTHONPATH"] = site_packages + + if not quiet: + click.secho( + f'$ export PYTHONPATH="{site_packages}"', bold=True, fg="bright_blue" + ) + + return env["PYTHONPATH"] + + +def _get_site_packages(): + candidate_paths = [] + for root, dirs, _files in os.walk(install_dir): + for subdir in dirs: + if subdir == "site-packages" or subdir == "dist-packages": + candidate_paths.append(os.path.abspath(os.path.join(root, subdir))) + + X, Y = sys.version_info.major, sys.version_info.minor + + site_packages = None + if any(f"python{X}." in p for p in candidate_paths): + # We have a system that uses `python3.X/site-packages` or `python3.X/dist-packages` + site_packages = [p for p in candidate_paths if f"python{X}.{Y}" in p] + if len(site_packages) == 0: + raise FileNotFoundError( + f"No site-packages found in {install_dir} for Python {X}.{Y}" + ) + else: + site_packages = site_packages[0] + else: + # A naming scheme that does not encode the Python major/minor version is used, so return + # whatever site-packages path was found + if len(candidate_paths) > 1: + raise FileNotFoundError( + f"Multiple `site-packages` found in `{install_dir}`, but cannot use Python version to disambiguate" + ) + elif len(candidate_paths) == 1: + site_packages = candidate_paths[0] + + if site_packages is None: + raise FileNotFoundError( + f"No `site-packages` or `dist-packages` found under `{install_dir}`" + ) + + return site_packages + + +def _meson_version(): + try: + p = _run(meson_cli + ["--version"], output=False, echo=False) + return p.stdout.decode("ascii").strip() + except: + pass + + +def _meson_version_configured(): + try: + meson_info_fn = os.path.join("build", "meson-info", "meson-info.json") + meson_info = json.load(open(meson_info_fn)) + return meson_info["meson_version"]["full"] + except: + pass + + +@click.command() +@click.option("-j", "--jobs", help="Number of parallel tasks to launch", type=int) +@click.option("--clean", is_flag=True, help="Clean build directory before build") +@click.option( + "-v", "--verbose", is_flag=True, help="Print all build output, even installation" +) +@click.argument("meson_args", nargs=-1) +def meson_build(meson_args, jobs=None, clean=False, verbose=False): + """🔧 Build package with Meson/ninja and install + + MESON_ARGS are passed through e.g.: + + spin build -- -Dpkg_config_path=/lib64/pkgconfig + + The package is installed to build-install + + By default builds for release, to be able to use a debugger set CFLAGS + appropriately. For example, for linux use + + CFLAGS="-O0 -g" spin build + """ + build_dir = "build" + setup_cmd = meson_cli + ["setup", build_dir, "--prefix=/usr"] + list(meson_args) + + if clean: + print(f"Removing `{build_dir}`") + if os.path.isdir(build_dir): + shutil.rmtree(build_dir) + print(f"Removing `{install_dir}`") + if os.path.isdir(install_dir): + shutil.rmtree(install_dir) + + if not (os.path.exists(build_dir) and _meson_version_configured()): + p = _run(setup_cmd, sys_exit=False) + if p.returncode != 0: + raise RuntimeError( + "Meson configuration failed; please try `spin build` again with the `--clean` flag." + ) + else: + # Build dir has been configured; check if it was configured by + # current version of Meson + + if _meson_version() != _meson_version_configured(): + _run(setup_cmd + ["--reconfigure"]) + + # Any other conditions that warrant a reconfigure? + + p = _run(meson_cli + ["compile", "-C", build_dir], sys_exit=False) + p = _run(meson_cli + + [ + "install", + "--only-changed", + "-C", + build_dir, + "--destdir", + f"../{install_dir}", + ], + output=verbose, + ) + + +def _get_configured_command(command_name): + from spin.cmds.util import get_commands + command_groups = get_commands() + commands = [cmd for section in command_groups for cmd in command_groups[section]] + return next((cmd for cmd in commands if cmd.name == command_name), None) + + +@click.command() +@click.argument("pytest_args", nargs=-1) +@click.pass_context +def meson_test(ctx, pytest_args): + """🔧 Run tests + + PYTEST_ARGS are passed through directly to pytest, e.g.: + + spin test -- -v + + To run tests on a directory or file: + + \b + spin test numpy/linalg + spin test numpy/linalg/tests/test_linalg.py + + To run specific tests, by module, function, class, or method: + + \b + spin test -- --pyargs numpy.random + spin test -- --pyargs numpy.random.tests.test_generator_mt19937 + spin test -- --pyargs numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric + spin test -- --pyargs numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases + + To report the durations of the N slowest tests: + + spin test -- --durations=N + + To run tests that match a given pattern: + + \b + spin test -- -k "geometric" + spin test -- -k "geometric and not rgeometric" + + To skip tests with a given marker: + + spin test -- -m "not slow" + + To parallelize test runs (requires `pytest-xdist`): + + spin test -- -n NUM_JOBS + + For more, see `pytest --help`. + + """ + from spin.cmds.util import get_config + cfg = get_config() + + build_cmd = _get_configured_command("build") + if build_cmd: + click.secho( + "Invoking `build` prior to running tests:", bold=True, fg="bright_green" + ) + ctx.invoke(build_cmd) + + package = cfg.get("tool.spin.package", None) + if not pytest_args: + pytest_args = (package,) + if pytest_args == (None,): + print( + "Please specify `package = packagename` under `tool.spin` section of `pyproject.toml`" + ) + sys.exit(1) + + site_path = _set_pythonpath() + + # Sanity check that library built properly + if sys.version_info[:2] >= (3, 11): + p = _run([sys.executable, "-P", "-c", f"import {package}"], sys_exit=False) + if p.returncode != 0: + print(f"As a sanity check, we tried to import {package}.") + print("Stopping. Please investigate the build error.") + sys.exit(1) + + print(f'$ export PYTHONPATH="{site_path}"') + _run( + [sys.executable, "-m", "pytest", f"--rootdir={site_path}"] + list(pytest_args), + cwd=site_path, + replace=True, + ) + + +@click.command() +@click.argument("ipython_args", nargs=-1) +def ipython(ipython_args): + """💻 Launch IPython shell with PYTHONPATH set + + IPYTHON_ARGS are passed through directly to IPython, e.g.: + + spin ipython -- -i myscript.py + """ + p = _set_pythonpath() + print(f'💻 Launching IPython with PYTHONPATH="{p}"') + _run(["ipython", "--ignore-cwd"] + list(ipython_args), replace=True) + + +@click.command() +@click.argument("shell_args", nargs=-1) +def meson_shell(shell_args=[]): + """💻 Launch shell with PYTHONPATH set + + SHELL_ARGS are passed through directly to the shell, e.g.: + + spin shell -- -c 'echo $PYTHONPATH' + + Ensure that your shell init file (e.g., ~/.zshrc) does not override + the PYTHONPATH. + """ + p = _set_pythonpath() + shell = os.environ.get("SHELL", "sh") + cmd = [shell] + list(shell_args) + print(f'💻 Launching shell with PYTHONPATH="{p}"') + print("⚠ Change directory to avoid importing source instead of built package") + print("⚠ Ensure that your ~/.shellrc does not unset PYTHONPATH") + _run(cmd, replace=True) + + +@click.command() +@click.argument("python_args", nargs=-1) +def meson_python(python_args): + """🐍 Launch Python shell with PYTHONPATH set + + PYTHON_ARGS are passed through directly to Python, e.g.: + + spin python -- -c 'import sys; print(sys.path)' + """ + p = _set_pythonpath() + v = sys.version_info + if (v.major < 3) or (v.major == 3 and v.minor < 11): + print("We're sorry, but this feature only works on Python 3.11 and greater 😢") + print() + print( + "Why? Because we need the '-P' flag so the interpreter doesn't muck with PYTHONPATH" + ) + print() + print("However! You can still launch your own interpreter:") + print() + print(f" PYTHONPATH='{p}' python") + print() + print("And then call:") + print() + print("import sys; del(sys.path[0])") + sys.exit(-1) + + print(f'🐍 Launching Python with PYTHONPATH="{p}"') + + _run(["/usr/bin/env", "python", "-P"] + list(python_args), replace=True) + + +@click.command(context_settings={"ignore_unknown_options": True}) +@click.argument("args", nargs=-1) +def meson_run(args): + """🏁 Run a shell command with PYTHONPATH set + + \b + spin run make + spin run 'echo $PYTHONPATH' + spin run python -c 'import sys; del sys.path[0]; import mypkg' + + If you'd like to expand shell variables, like `$PYTHONPATH` in the example + above, you need to provide a single, quoted command to `run`: + + spin run 'echo $SHELL && echo $PWD' + + On Windows, all shell commands are run via Bash. + Install Git for Windows if you don't have Bash already. + """ + if not len(args) > 0: + raise RuntimeError("No command given") + + is_posix = sys.platform in ("linux", "darwin") + shell = len(args) == 1 + if shell: + args = args[0] + + if shell and not is_posix: + # On Windows, we're going to try to use bash + args = ["bash", "-c", args] + + _set_pythonpath(quiet=True) + _run(args, echo=False, shell=shell) + + +@click.command() +@click.argument("sphinx_target", default="html") +@click.option( + "--clean", + is_flag=True, + default=False, + help="Clean previously built docs before building", +) +@click.option( + "--build/--no-build", + "first_build", + default=True, + help="Build numpy before generating docs", +) +@click.option("--jobs", "-j", default="auto", help="Number of parallel build jobs") +@click.pass_context +def meson_docs(ctx, sphinx_target, clean, first_build, jobs): + """📖 Build Sphinx documentation + + By default, SPHINXOPTS="-W", raising errors on warnings. + To build without raising on warnings: + + SPHINXOPTS="" spin docs + + To list all Sphinx targets: + + spin docs targets + + To build another Sphinx target: + + spin docs TARGET + + """ + # Detect docs dir + doc_dir_candidates = ("doc", "docs") + doc_dir = next((d for d in doc_dir_candidates if os.path.exists(d)), None) + if doc_dir is None: + print( + f"No documentation folder found; one of {', '.join(doc_dir_candidates)} must exist" + ) + sys.exit(1) + + if sphinx_target in ("targets", "help"): + clean = False + first_build = False + sphinx_target = "help" + + if clean: + doc_dirs = [ + "./doc/build/", + "./doc/source/api/", + "./doc/source/auto_examples/", + "./doc/source/jupyterlite_contents/", + ] + for doc_dir in doc_dirs: + if os.path.isdir(doc_dir): + print(f"Removing {doc_dir!r}") + shutil.rmtree(doc_dir) + + build_cmd = _get_configured_command("build") + + if build_cmd and first_build: + click.secho( + "Invoking `build` prior to building docs:", bold=True, fg="bright_green" + ) + ctx.invoke(build_cmd) + + try: + site_path = _get_site_packages() + except FileNotFoundError: + print("No built numpy found; run `spin build` first.") + sys.exit(1) + + opts = os.environ.get("SPHINXOPTS", "-W") + os.environ["SPHINXOPTS"] = f"{opts} -j {jobs}" + click.secho( + f"$ export SPHINXOPTS={os.environ['SPHINXOPTS']}", bold=True, fg="bright_blue" + ) + + os.environ["PYTHONPATH"] = f'{site_path}{os.sep}:{os.environ.get("PYTHONPATH", "")}' + click.secho( + f"$ export PYTHONPATH={os.environ['PYTHONPATH']}", bold=True, fg="bright_blue" + ) + _run(["make", "-C", "doc", sphinx_target], replace=True) + + +# END of spin/cmds/meson.py + # The numpy-vendored version of Meson. Put the directory that the executable # `meson` is in at the front of the PATH. @@ -56,7 +479,7 @@ def build(ctx, meson_args, jobs=None, clean=False, verbose=False): CFLAGS="-O0 -g" spin build """ - ctx.forward(meson.build) + ctx.forward(meson_build) @click.command() @@ -105,9 +528,9 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, install_deps): if install_deps: util.run(['pip', 'install', '-q', '-r', 'doc_requirements.txt']) - meson.docs.ignore_unknown_options = True + meson_docs.ignore_unknown_options = True del ctx.params['install_deps'] - ctx.forward(meson.docs) + ctx.forward(meson_docs) @click.command() @@ -193,7 +616,7 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): del ctx.params[extra_param] - ctx.forward(meson.test) + ctx.forward(meson_test) @click.command() @@ -220,7 +643,7 @@ def gdb(code, gdb_args): spin gdb my_tests.py spin gdb -- my_tests.py --mytest-flag """ - meson._set_pythonpath() + _set_pythonpath() gdb_args = list(gdb_args) if gdb_args and gdb_args[0].endswith('.py'): @@ -392,9 +815,9 @@ def bench(ctx, tests, compare, verbose, commits): "Invoking `build` prior to running benchmarks:", bold=True, fg="bright_green" ) - ctx.invoke(meson.build) + ctx.invoke(build) - meson._set_pythonpath() + _set_pythonpath() p = util.run( ['python', '-c', 'import numpy as np; print(np.__version__)'], @@ -450,8 +873,8 @@ def python(ctx, python_args): """ env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') - ctx.invoke(meson.build) - ctx.forward(meson.python) + ctx.invoke(build) + ctx.forward(meson_python) @click.command(context_settings={ @@ -469,9 +892,9 @@ def ipython(ctx, ipython_args): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') - ctx.invoke(meson.build) + ctx.invoke(build) - ppath = meson._set_pythonpath() + ppath = _set_pythonpath() print(f'💻 Launching IPython with PYTHONPATH="{ppath}"') preimport = (r"import numpy as np; " @@ -500,5 +923,5 @@ def run(ctx, args): On Windows, all shell commands are run via Bash. Install Git for Windows if you don't have Bash already. """ - ctx.invoke(meson.build) - ctx.forward(meson.run) + ctx.invoke(build) + ctx.forward(meson_run) diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index c1721b3ffb4a..26faf7ff3021 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -34,3 +34,8 @@ Name: meson-python Files: vendored-meson/meson-python/* License: MIT For license text, see vendored-meson/meson-python/LICENSE + +Name: spin +Files: .spin/cmds.py +License: BSD-3 + For license text, see .spin/LICENSE diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index cfa2c5af9dae..f73536e230c0 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -2,4 +2,4 @@ max_line_length = 79 statistics = True ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 -exclude = versioneer.py,numpy/_version.py,numpy/__config__.py,numpy/typing/tests/data +exclude = versioneer.py,numpy/_version.py,numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py From 6e55a04f22b938c6cfac543ced3eb942e6eadc44 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 10 Aug 2023 15:09:34 +0200 Subject: [PATCH 034/120] CI: fix Windows GHA jobs for vendored meson-python and spin --- .github/workflows/windows_clangcl.yml | 55 +++++++++++---------------- .github/workflows/windows_meson.yml | 48 +++++++++-------------- 2 files changed, 40 insertions(+), 63 deletions(-) diff --git a/.github/workflows/windows_clangcl.yml b/.github/workflows/windows_clangcl.yml index 2b270d99eac0..223d4809da89 100644 --- a/.github/workflows/windows_clangcl.yml +++ b/.github/workflows/windows_clangcl.yml @@ -20,7 +20,7 @@ jobs: meson: name: Meson windows build/test runs-on: windows-2019 - # if: "github.repository == 'numpy/numpy'" + if: "github.repository == 'numpy/numpy'" steps: - name: Checkout uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 @@ -32,14 +32,14 @@ jobs: with: python-version: ${{ env.PYTHON_VERSION }} - - name: Install dependencies + - name: Install build dependencies from PyPI run: | - pip install -r build_requirements.txt - - name: openblas-libs + pip install spin Cython + + - name: Install OpenBLAS and Clang-cl run: | - # Download and install pre-built OpenBLAS library - # with 32-bit interfaces - # Unpack it in the pkg-config hardcoded path + # Download and install pre-built OpenBLAS library with 32-bit + # interfaces Unpack it in the pkg-config hardcoded path choco install unzip -y choco install wget -y # Install llvm, which contains clang-cl @@ -48,24 +48,23 @@ jobs: wget https://anaconda.org/multibuild-wheels-staging/openblas-libs/v0.3.21/download/openblas-v0.3.21-win_amd64-gcc_10_3_0.zip unzip -d c:\opt openblas-v0.3.21-win_amd64-gcc_10_3_0.zip echo "PKG_CONFIG_PATH=c:\opt\64\lib\pkgconfig;" >> $env:GITHUB_ENV - - name: meson-configure - run: | - "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-build.ini -Encoding ascii - meson setup build --prefix=$PWD\build-install --native-file=$PWD/clang-cl-build.ini -Ddebug=false --optimization 2 --vsenv - - name: meson-build - run: | - meson compile -C build -v - - name: meson-install + - name: Write native file for Clang-cl binaries run: | - cd build - meson install --no-rebuild - - name: build-path + # TODO: this job is identical to the one in `windows_meson.yml` aside + # from installing Clang-cl and usage of this .ini file. So merge the + # two and use a matrix'ed CI job run. + "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-build.ini -Encoding ascii + + - name: Install NumPy run: | - echo "installed_path=$PWD\build-install\Lib\site-packages" >> $env:GITHUB_ENV - - name: post-install + spin build -j2 -- --vsenv --native-file=$PWD/clang-cl-build.ini + + - name: Copy OpenBLAS DLL, write _distributor_init.py run: | - $numpy_path = "${env:installed_path}\numpy" + # Getting the OpenBLAS DLL to the right place so it loads + $installed_path = "$PWD\build-install\usr\Lib\site-packages" + $numpy_path = "${installed_path}\numpy" $libs_path = "${numpy_path}\.libs" mkdir ${libs_path} $ob_path = "C:/opt/64/bin/" @@ -73,19 +72,11 @@ jobs: # Write _distributor_init.py to load .libs DLLs. python -c "from tools import openblas_support; openblas_support.make_init(r'${numpy_path}')" - - name: prep-test + - name: Install test dependencies run: | - echo "PYTHONPATH=${env:installed_path}" >> $env:GITHUB_ENV python -m pip install -r test_requirements.txt python -m pip install threadpoolctl - - name: test + - name: Run test suite run: | - mkdir tmp - cd tmp - echo "============================================" - python -c "import numpy; print(numpy.show_runtime())" - echo "============================================" - echo "LASTEXITCODE is '$LASTEXITCODE'" - python -c "import numpy, sys; sys.exit(numpy.test(verbose=3) is False)" - echo "LASTEXITCODE is '$LASTEXITCODE'" + spin test diff --git a/.github/workflows/windows_meson.yml b/.github/workflows/windows_meson.yml index 97dfa41eaa2c..f6f5c932ee10 100644 --- a/.github/workflows/windows_meson.yml +++ b/.github/workflows/windows_meson.yml @@ -27,42 +27,36 @@ jobs: with: submodules: recursive fetch-depth: 0 + - name: Setup Python uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} - - name: Install dependencies + - name: Install build dependencies from PyPI run: | - pip install -r build_requirements.txt - - name: openblas-libs + python -m pip install spin Cython + + - name: Install OpenBLAS (MacPython build) run: | - # Download and install pre-built OpenBLAS library - # with 32-bit interfaces - # Unpack it in the pkg-config hardcoded path + # Download and install pre-built OpenBLAS library with 32-bit + # interfaces. Unpack it in the pkg-config hardcoded path choco install unzip -y choco install wget -y choco install -y --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite wget https://anaconda.org/multibuild-wheels-staging/openblas-libs/v0.3.21/download/openblas-v0.3.21-win_amd64-gcc_10_3_0.zip unzip -d c:\opt openblas-v0.3.21-win_amd64-gcc_10_3_0.zip echo "PKG_CONFIG_PATH=c:\opt\64\lib\pkgconfig;" >> $env:GITHUB_ENV - - name: meson-configure - run: | - meson setup build --prefix=$PWD\build-install -Ddebug=false --optimization 2 --vsenv - - name: meson-build - run: | - meson compile -C build -v - - name: meson-install - run: | - cd build - meson install --no-rebuild - - name: build-path + - name: Install NumPy run: | - echo "installed_path=$PWD\build-install\Lib\site-packages" >> $env:GITHUB_ENV - - name: post-install + spin build -j2 -- --vsenv + + - name: Copy OpenBLAS DLL, write _distributor_init.py run: | - $numpy_path = "${env:installed_path}\numpy" + # Getting the OpenBLAS DLL to the right place so it loads + $installed_path = "$PWD\build-install\usr\Lib\site-packages" + $numpy_path = "${installed_path}\numpy" $libs_path = "${numpy_path}\.libs" mkdir ${libs_path} $ob_path = "C:/opt/64/bin/" @@ -70,22 +64,14 @@ jobs: # Write _distributor_init.py to load .libs DLLs. python -c "from tools import openblas_support; openblas_support.make_init(r'${numpy_path}')" - - name: prep-test + - name: Install test dependencies run: | - echo "PYTHONPATH=${env:installed_path}" >> $env:GITHUB_ENV python -m pip install -r test_requirements.txt python -m pip install threadpoolctl - - name: test + - name: Run test suite run: | - mkdir tmp - cd tmp - echo "============================================" - python -c "import numpy; print(numpy.show_runtime())" - echo "============================================" - echo "LASTEXITCODE is '$LASTEXITCODE'" - python -c "import numpy, sys; sys.exit(numpy.test(verbose=3) is False)" - echo "LASTEXITCODE is '$LASTEXITCODE'" + spin test msvc_32bit_python_openblas: name: MSVC, 32-bit Python, no BLAS From ee6c23319a204c0ff9ae42b6c6212ca69cf06f1e Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 11 Aug 2023 22:27:02 +0400 Subject: [PATCH 035/120] BLD, SIMD: The meson CPU dispatcher implementation (#23096) Almost gives the same functionality as Distutils/CCompiler Opt, with a few changes to the way we specify the targets. Also, it abandons the idea of wrapping the dispatchable sources, instead it counts on static libraries to enable different paths and flags. --- .github/meson_actions/action.yml | 29 ++ .github/workflows/build_test.yml | 20 +- MANIFEST.in | 2 + build_requirements.txt | 4 +- doc/source/user/quickstart.rst | 2 +- meson.build | 3 +- meson_cpu/arm/meson.build | 58 +++ meson_cpu/main_config.h.in | 351 ++++++++++++++++ meson_cpu/meson.build | 307 ++++++++++++++ meson_cpu/ppc64/meson.build | 38 ++ meson_cpu/s390x/meson.build | 18 + meson_cpu/x86/meson.build | 227 ++++++++++ meson_options.txt | 24 +- numpy/core/meson.build | 443 +++++++++++++++----- numpy/core/src/_simd/_simd.c | 10 +- numpy/core/src/_simd/_simd.dispatch.c.src | 4 +- numpy/core/src/common/npy_cpu_dispatch.h | 3 +- numpy/core/src/common/simd/sse/arithmetic.h | 4 +- 18 files changed, 1421 insertions(+), 126 deletions(-) create mode 100644 .github/meson_actions/action.yml create mode 100644 meson_cpu/arm/meson.build create mode 100644 meson_cpu/main_config.h.in create mode 100644 meson_cpu/meson.build create mode 100644 meson_cpu/ppc64/meson.build create mode 100644 meson_cpu/s390x/meson.build create mode 100644 meson_cpu/x86/meson.build diff --git a/.github/meson_actions/action.yml b/.github/meson_actions/action.yml new file mode 100644 index 000000000000..aff70da169bc --- /dev/null +++ b/.github/meson_actions/action.yml @@ -0,0 +1,29 @@ +name: MesonBuildTest +description: "checkout repo, build, and test numpy" +runs: + using: composite + steps: + - name: Install dependencies + shell: bash + run: pip install -r build_requirements.txt + - name: Build + shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' + env: + TERM: xterm-256color + run: + spin build -- ${MESON_ARGS[@]} + - name: Check build-internal dependencies + shell: bash + run: + ninja -C build -t missingdeps + - name: Check installed test and stub files + shell: bash + run: + python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') + - name: Test + shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' + env: + TERM: xterm-256color + run: | + pip install pytest pytest-xdist hypothesis typing_extensions + spin test -j auto diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 928018b13905..b0a24d7730a1 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -49,7 +49,7 @@ jobs: if: "github.repository == 'numpy/numpy'" runs-on: ubuntu-latest env: - WITHOUT_SIMD: 1 + MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: @@ -58,7 +58,7 @@ jobs: - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} - - uses: ./.github/actions + - uses: ./.github/meson_actions basic: needs: [smoke_test] @@ -122,7 +122,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' env: - WITHOUT_OPTIMIZATIONS: 1 + MESON_ARGS: "-Dallow-noblas=true -Ddisable-optimization=true" steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: @@ -131,14 +131,14 @@ jobs: - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} - - uses: ./.github/actions + - uses: ./.github/meson_actions with_baseline_only: needs: [smoke_test] runs-on: ubuntu-latest if: github.event_name != 'push' env: - CPU_DISPATCH: "none" + MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: @@ -147,14 +147,14 @@ jobs: - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} - - uses: ./.github/actions + - uses: ./.github/meson_actions without_avx512: needs: [smoke_test] runs-on: ubuntu-latest if: github.event_name != 'push' env: - CPU_DISPATCH: "max -xop -fma4 -avx512f -avx512cd -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl" + MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C,AVX2,FMA3" steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: @@ -163,14 +163,14 @@ jobs: - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} - - uses: ./.github/actions + - uses: ./.github/meson_actions without_avx512_avx2_fma3: needs: [smoke_test] runs-on: ubuntu-latest if: github.event_name != 'push' env: - CPU_DISPATCH: "SSSE3 SSE41 POPCNT SSE42 AVX F16C" + MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C" steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: @@ -179,7 +179,7 @@ jobs: - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 with: python-version: ${{ env.PYTHON_VERSION }} - - uses: ./.github/actions + - uses: ./.github/meson_actions debug: needs: [smoke_test] diff --git a/MANIFEST.in b/MANIFEST.in index ab6ecd518e1b..4803b39131e1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -21,6 +21,8 @@ recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in include numpy/py.typed include numpy/random/include/* include numpy/*.pxd +# Meson CPU Dispatcher +recursive-include meson_cpu *.build *.in # Add build support that should go in sdist, but not go in bdist/be installed # Note that sub-directories that don't have __init__ are apparently not # included by 'recursive-include', so list those separately diff --git a/build_requirements.txt b/build_requirements.txt index 3627f1b91685..e7e776a7de89 100644 --- a/build_requirements.txt +++ b/build_requirements.txt @@ -1,5 +1,5 @@ -meson-python>=0.10.0 -Cython +meson-python>=0.13.1 +Cython>=3.0 wheel==0.38.1 ninja spin==0.4 diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 783d5a447df9..bc6c3b3818d2 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -517,7 +517,7 @@ and other Python sequences. >>> for i in a: ... print(i**(1 / 3.)) ... - 9.999999999999998 + 9.999999999999998 # may vary 1.0 9.999999999999998 3.0 diff --git a/meson.build b/meson.build index 8bfe987715d1..33d0e7b462ef 100644 --- a/meson.build +++ b/meson.build @@ -6,7 +6,7 @@ project( # See `numpy/__init__.py` version: '1.26.0.dev0', license: 'BSD-3', - meson_version: '>= 1.1.0', + meson_version: '>=1.2.99', # version in vendored-meson is 1.2.99 default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', @@ -80,4 +80,5 @@ else meson.add_dist_script(py, versioneer, '-o', '_version_meson.py') endif +subdir('meson_cpu') subdir('numpy') diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build new file mode 100644 index 000000000000..f968b2e99682 --- /dev/null +++ b/meson_cpu/arm/meson.build @@ -0,0 +1,58 @@ +source_root = meson.project_source_root() +mod_features = import('features') +NEON = mod_features.new( + 'NEON', 1, + test_code: files(source_root + '/numpy/distutils/checks/cpu_neon.c')[0] +) +NEON_FP16 = mod_features.new( + 'NEON_FP16', 2, implies: NEON, + test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_fp16.c')[0] +) +# FMA +NEON_VFPV4 = mod_features.new( + 'NEON_VFPV4', 3, implies: NEON_FP16, + test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_vfpv4.c')[0] +) +# Advanced SIMD +ASIMD = mod_features.new( + 'ASIMD', 4, implies: NEON_VFPV4, detect: {'val': 'ASIMD', 'match': 'NEON.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_asimd.c')[0] +) +cpu_family = host_machine.cpu_family() +if cpu_family == 'aarch64' + # hardware baseline + NEON.update(implies: [NEON_FP16, NEON_VFPV4, ASIMD]) + NEON_FP16.update(implies: [NEON, NEON_VFPV4, ASIMD]) + NEON_VFPV4.update(implies: [NEON, NEON_FP16, ASIMD]) +elif cpu_family == 'arm' + NEON.update(args: '-mfpu=neon') + NEON_FP16.update(args: ['-mfp16-format=ieee', {'val': '-mfpu=neon-fp16', 'match': '-mfpu=.*'}]) + NEON_VFPV4.update(args: [{'val': '-mfpu=neon-vfpv4', 'match': '-mfpu=.*'}]) + ASIMD.update(args: [ + {'val': '-mfpu=neon-fp-armv8', 'match': '-mfpu=.*'}, + '-march=armv8-a+simd' + ]) +endif +# ARMv8.2 half-precision & vector arithm +ASIMDHP = mod_features.new( + 'ASIMDHP', 5, implies: ASIMD, + args: {'val': '-march=armv8.2-a+fp16', 'match': '-march=.*', 'mfilter': '\+.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdhp.c')[0] +) +## ARMv8.2 dot product +ASIMDDP = mod_features.new( + 'ASIMDDP', 6, implies: ASIMD, + args: {'val': '-march=armv8.2-a+dotprod', 'match': '-march=.*', 'mfilter': '\+.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_asimddp.c')[0] +) +## ARMv8.2 Single & half-precision Multiply +ASIMDFHM = mod_features.new( + 'ASIMDFHM', 7, implies: ASIMDHP, + args: {'val': '-march=armv8.2-a+fp16fml', 'match': '-march=.*', 'mfilter': '\+.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdfhm.c')[0] +) +# TODO: Add support for MSVC +ARM_FEATURES = { + 'NEON': NEON, 'NEON_FP16': NEON_FP16, 'NEON_VFPV4': NEON_VFPV4, + 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDFHM': ASIMDFHM +} diff --git a/meson_cpu/main_config.h.in b/meson_cpu/main_config.h.in new file mode 100644 index 000000000000..c7c13b2c7eb1 --- /dev/null +++ b/meson_cpu/main_config.h.in @@ -0,0 +1,351 @@ +/* + * Main configuration header of the CPU dispatcher. + * + * This header is autogenerated by the Meson build script located at `meson_cpu/meson.build`. + * It provides a set of utilities that are required for the runtime dispatching process. + * + * The most important macros in this header are: + * - @ref @P@CPU_DISPATCH_DECLARE: Used to declare the dispatched functions and variables. + * - @ref @P@CPU_DISPATCH_CURFX: Used to define the dispatched functions with target-specific suffixes. + * - @ref @P@CPU_DISPATCH_CALL: Used for runtime dispatching of the exported functions and variables. + */ +#ifndef @P@_CPU_DISPATCHER_CONF_H_ +#define @P@_CPU_DISPATCHER_CONF_H_ +/// This definition is required to provides comptablity with NumPy distutils +#define @P@_CPU_MESON_BUILD +/** + * @def @P@WITH_CPU_BASELINE + * Enabled baseline features names as a single string where each is separated by a single space. + * For example: "SSE SSE2 SSE3" + * Required for logging purposes only. + */ +#define @P@WITH_CPU_BASELINE "@WITH_CPU_BASELINE@" +/** + * @def @P@WITH_CPU_BASELINE_N + * Number of enabled baseline features. + */ +#define @P@WITH_CPU_BASELINE_N @WITH_CPU_BASELINE_N@ +/** + * @def @P@WITH_CPU_DISPATCH + * Dispatched features names as a single string where each is separated by a single space. + */ +#define @P@WITH_CPU_DISPATCH "@WITH_CPU_DISPATCH@" +/** + * @def @P@WITH_CPU_DISPATCH_N + * Number of enabled dispatched features. + */ +#define @P@WITH_CPU_DISPATCH_N @WITH_CPU_DISPATCH_N@ +// Expand a macro, used by the following macros +#define @P@_CPU_EXPAND(X) X +#define @P@_CPU_CAT__(a, b) a ## b +#define @P@_CPU_CAT_(a, b) @P@_CPU_CAT__(a, b) +#define @P@_CPU_CAT(a, b) @P@_CPU_CAT_(a, b) + +/** + * @def @P@WITH_CPU_BASELINE_CALL(EXEC_CB, ...) + * Call each enabled baseline feature sorted by lowest interest + * using preprocessor callback without testing whiher the + * feature is supported by CPU or not. + * + * Required for logging purposes only, for example, generating + * a Python list to hold the information of the enabled features. + * + * Unwrapped Version: + * @code + * #define @P@WITH_CPU_BASELINE_CALL(EXEC_CB, ...) \ + * @P@_CPU_EXPAND(EXEC_CB(SSE, __VA_ARGS__)) \ + * @P@_CPU_EXPAND(EXEC_CB(SSE2, __VA_ARGS__)) \ + * @P@_CPU_EXPAND(EXEC_CB(SSE3, __VA_ARGS__)) + * @endcode + * + * @param EXEC_CB The preprocessor callback to be called for each enabled baseline feature. + * @param ... Additional arguments to be passed to the preprocessor callback. + */ +#define @P@WITH_CPU_BASELINE_CALL(EXEC_CB, ...) \ +@WITH_CPU_BASELINE_CALL@ + +/** + * @def @P@WITH_CPU_DISPATCH_CALL(EXEC_CB, ...) + * Similar to the above but for enabled dispatched features. + * + * @param EXEC_CB The preprocessor callback to be called for each enabled dispatched feature. + * @param ... Additional arguments to be passed to the preprocessor callback. + */ +#define @P@WITH_CPU_DISPATCH_CALL(EXEC_CB, ...) \ +@WITH_CPU_DISPATCH_CALL@ + +/* + * Defines the default behavior for the configurable macros derived from the configuration header + * that is generated by the meson function `mod_features.multi_targets()`. + * + * Note: Providing fallback in case of optimization disabled is no longer needed for meson + * since we always guarantee having configuration headers. + * + * However, it is still needed for compatibility with Numpy distutils. + */ +#ifndef @P@DISABLE_OPTIMIZATION + #define @P@MTARGETS_CONF_BASELINE(CB, ...) \ + &&"Expected config header that generated by mod_features.multi_targets()"; + #define @P@MTARGETS_CONF_DISPATCH(TEST_FEATURE_CB, CB, ...) \ + &&"Expected config header that generated by mod_features.multi_targets()"; +#else + #define @P@MTARGETS_CONF_BASELINE(CB, ...) @P@_CPU_EXPAND(CB(__VA_ARGS__)) + #define @P@MTARGETS_CONF_DISPATCH(CHK, CB, ...) +#endif +/** + * @def @P@CPU_DISPATCH_CURFX(NAME) + * + * Returns `NAME` suffixed with "_" + "the current target" during compiling + * the generated static libraries that are derived from the Meson function + * `mod_features.multi_targets()`. + * + * It also returns `NAME` as-is without any suffix when it comes to the baseline features or + * in case if the optimization is disabled. + * + * Note: `mod_features.multi_targets()` provides a unique target name within the compiler #definition + * called `@P@MTARGETS_CURRENT` on each generated library based on the specified features + * within its parameter 'dispatch:'. + * + * For example: + * + * @code + * # from meson + * mod_features.multi_targets( + * 'arithmetic.dispatch.h', 'arithmetic.c', + * baseline: [SSE3], dispatch: [AVX512_SKX, AVX2], + * prefix: '@P@' + * ) + * @code + * + * @code + * void @P@CPU_DISPATCH_CURFX(add)(const int *src0, const int *src1, int *dst) + * { + * #ifdef @P@HAVE_AVX512F // one of the implied feature of AVX512_SKX + * // code + * #elif defined(@P@HAVE_AVX2) + * // code + * #elif defined(@P@HAVE_SSE3) + * // CODE + * #else + * // Fallback code in case of features enabled + * #endif + * } + * @endif + * + * // Unwrapped version : + * void add_AVX512_SKX(const int *src0, const int *src1, int *dst) + * {...} + * void add_AVX2(const int *src0, const int *src1, int *dst) + * {...} + * // baseline + * void add(const int *src0, const int *src1, int *dst) + * {...} + * @endcode + * + * @param NAME The base name of the dispatched function or variable. + */ +#ifdef @P@MTARGETS_CURRENT + // '@P@MTARGETS_CURRENT': only defined by the dispatchable sources + #define @P@CPU_DISPATCH_CURFX(NAME) @P@_CPU_CAT(@P@_CPU_CAT(NAME, _), @P@MTARGETS_CURRENT) +#else + #define @P@CPU_DISPATCH_CURFX(NAME) @P@_CPU_EXPAND(NAME) +#endif + +/** + * @def @P@CPU_DISPATCH_DECLARE(...) + * + * Provides forward declarations for the exported variables and functions + * based on the enabled baseline and dispatched features. + * + * This macro requires include the config file that been generated + * by meson function `mod_features.multi_targets()` to determine the enabled + * baseline and dispatched features. + * + * For example: + * + * @code + * # from meson + * mod_features.multi_targets( + * 'arithmetic.dispatch.h', 'arithmetic.c', + * baseline: [SSE3], dispatch: [AVX512_SKX, AVX2], + * prefix: '@P@' + * ) + * @code + * + * @code + * // from C + * #include "arithmetic.dispatch.h" + * @P@CPU_DISPATCH_DECLARE(void add, (const int *src0, const int *src1, int *dst)) + * + * // Unwrapped version: + * void add_AVX512_SKX(const int *src0, const int *src1, int *dst); + * void add_AVX2(const int *src0, const int *src1, int *dst); + * void add(const int *src0, const int *src1, int *dst); // baseline + * @endcode + * + * @param ... The function or variable prototype to be declared, + * with the target-specific suffix added automatically. + */ +#define @P@CPU_DISPATCH_DECLARE(...) \ + @P@MTARGETS_CONF_DISPATCH(@P@CPU_DISPATCH_DECLARE_CHK_, @P@CPU_DISPATCH_DECLARE_CB_, __VA_ARGS__) \ + @P@MTARGETS_CONF_BASELINE(@P@CPU_DISPATCH_DECLARE_BASE_CB_, __VA_ARGS__) + +// Preprocessor callbacks +#define @P@CPU_DISPATCH_DECLARE_CB_(DUMMY, TARGET_NAME, LEFT, ...) \ + @P@_CPU_CAT(@P@_CPU_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__; +#define @P@CPU_DISPATCH_DECLARE_BASE_CB_(LEFT, ...) \ + LEFT __VA_ARGS__; +// Dummy CPU runtime checking +#define @P@CPU_DISPATCH_DECLARE_CHK_(FEATURE_NAME) + +/** + * @def @P@CPU_DISPATCH_DECLARE_XB(LEFT, ...) + * + * Same as `@P@CPU_DISPATCH_DECLARE` but exclude the baseline declaration even + * if it was enabled within `mod_features.multi_targets()`. + */ +#define @P@CPU_DISPATCH_DECLARE_XB(...) \ + @P@MTARGETS_CONF_DISPATCH(@P@CPU_DISPATCH_DECLARE_CHK_, @P@CPU_DISPATCH_DECLARE_CB_, __VA_ARGS__) + +/** + * @def @P@CPU_DISPATCH_CALL(...) + * + * Helper macro used for runtime dispatching of the exported functions and variables + * within the meson `mod_features.multi_targets()` function. + * + * This macro dispatches only one symbol based on the order of the specified features within the meson function + * `mod_features.multi_targets()`. For example, if `mod_features.multi_targets()` is called with + * `dispatch: [features_highest_1, features_highest_2]`, the macro will test each enabled feature against + * the CPU at runtime. Once it fails, it will move to the next order until falling back to the baseline. + * + * Similar to `@P@CPU_DISPATCH_DECLARE`, this macro requires including the config file that has been generated + * by the meson function `mod_features.multi_targets()` to determine the enabled baseline and dispatched features. + * + * Example usage: + * + * @code + * # from meson + * mod_features.multi_targets( + * 'arithmetic.dispatch.h', 'arithmetic.c', + * baseline: [SSE3], dispatch: [AVX512_SKX, AVX2], + * prefix: '@P@' + * ) + * @endcode + * + * @code + * // from C + * #include "arithmetic.dispatch.h" + * + * // Example 1: + * @P@CPU_DISPATCH_CALL(add, (src0, src1, dst)); + * + * // Unwrapped version: + * @P@CPU_HAVE(AVX512_SKX) ? add_AVX512_SKX(src0, src1, dst) : + * (@P@CPU_HAVE(AVX2) ? add_AVX2(src0, src1, dst) : + * add(src0, src1, dst); // baseline + * + * // Example 2: + * typedef void (*func_type)(const int*, const int*, int*); + * func_type func = @P@CPU_DISPATCH_CALL(add); + * + * // Unwrapped version: + * func_type func2 = @P@CPU_HAVE(AVX512_SKX) ? add_AVX512_SKX : + * (@P@CPU_HAVE(AVX2) ? add_AVX2 : + * add; // baseline + * + * // Example 3: + * func_type func3; + * @P@CPU_DISPATCH_CALL(func3 = add); + * + * // Unwrapped version: + * func_type func2 = @P@CPU_HAVE(AVX512_SKX) ? func3 = add_AVX512_SKX : + * (@P@CPU_HAVE(AVX2) ? func3 = add_AVX2 : + * func3 = add; // baseline + * + * @endcode + * + * @param ... The function or variable prototype to be called or assigned, + * with the target-specific suffix added automatically. + */ +#define @P@CPU_DISPATCH_CALL(...) \ + @P@MTARGETS_CONF_DISPATCH(@P@CPU_HAVE, @P@CPU_DISPATCH_CALL_CB_, __VA_ARGS__) \ + @P@MTARGETS_CONF_BASELINE(@P@CPU_DISPATCH_CALL_BASE_CB_, __VA_ARGS__) +// Preprocessor callbacks +#define @P@CPU_DISPATCH_CALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \ + (TESTED_FEATURES) ? (@P@_CPU_CAT(@P@_CPU_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : +#define @P@CPU_DISPATCH_CALL_BASE_CB_(LEFT, ...) \ + (LEFT __VA_ARGS__) + +/** + * @def @P@CPU_DISPATCH_CALL_XB(LEFT, ...) + * + * Same as `@P@CPU_DISPATCH_CALL` but exclude the baseline call even + * if it was provided within meson `mod_features.multi_targets()`. + * + * Note: This macro returns void + */ +#define @P@CPU_DISPATCH_CALL_XB_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \ + (TESTED_FEATURES) ? (void) (@P@_CPU_CAT(@P@_CPU_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : +#define @P@CPU_DISPATCH_CALL_XB(...) \ + @P@MTARGETS_CONF_DISPATCH(@P@CPU_HAVE, @P@CPU_DISPATCH_CALL_XB_CB_, __VA_ARGS__) \ + ((void) 0 /* discarded expression value */) + +/** + * Macro @P@CPU_DISPATCH_CALL_ALL(...) + * + * Same as `@P@CPU_DISPATCH_CALL` but dispatching all the required optimizations for + * the exported functions and variables instead of highest interested one. + * Returns void. + */ +#define @P@CPU_DISPATCH_CALL_ALL(...) \ + (@P@MTARGETS_CONF_DISPATCH(@P@CPU_HAVE, @P@CPU_DISPATCH_CALL_ALL_CB_, __VA_ARGS__) \ + @P@MTARGETS_CONF_BASELINE(@P@CPU_DISPATCH_CALL_ALL_BASE_CB_, __VA_ARGS__)) +// Preprocessor callbacks +#define @P@CPU_DISPATCH_CALL_ALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \ + ((TESTED_FEATURES) ? (@P@_CPU_CAT(@P@_CPU_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : (void) 0), +#define @P@CPU_DISPATCH_CALL_ALL_BASE_CB_(LEFT, ...) \ + ( LEFT __VA_ARGS__ ) + +// Brings the headers files of enabled CPU features +#ifdef @P@HAVE_SSE + #include +#endif +#ifdef @P@HAVE_SSE2 + #include +#endif +#ifdef @P@HAVE_SSE3 + #include +#endif +#ifdef @P@HAVE_SSSE3 + #include +#endif +#ifdef @P@HAVE_SSE41 + #include +#endif +#ifdef @P@HAVE_POPCNT + #ifdef _MSC_VER + #include + #else + #include + #endif +#endif +#ifdef @P@HAVE_AVX + #include +#endif + +#if defined(@P@HAVE_XOP) || defined(@P@HAVE_FMA4) + #include +#endif + +#ifdef @P@HAVE_VSX + #include +#endif + +#ifdef @P@HAVE_VX + #include +#endif + +#ifdef @P@HAVE_NEON + #include +#endif +#endif // @P@_CPU_DISPATCHER_CONF_H_ diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build new file mode 100644 index 000000000000..b99638bfc24f --- /dev/null +++ b/meson_cpu/meson.build @@ -0,0 +1,307 @@ +# The CPU Dispatcher implementation. +# +# This script handles the CPU dispatcher and requires the Meson module +# 'features'. +# +# The CPU dispatcher script is responsible for three main tasks: +# +# 1. Defining the enabled baseline and dispatched features by parsing build +# options or compiler arguments, including detection of native flags. +# +# 2. Specifying the baseline arguments and definitions across all sources. +# +# 3. Generating the main configuration file, which contains information about +# the enabled features, along with a collection of C macros necessary for +# runtime dispatching. For more details, see the template file +# `main_config.h.in`. +# +# This script exposes the following variables: +# +# - `CPU_BASELINE`: A set of CPU feature objects obtained from +# `mod_features.new()`, representing the minimum CPU features +# specified by the build option `-Dcpu-baseline`. +# +# - `CPU_BASELINE_NAMES`: A set of enabled CPU feature names, representing the +# minimum CPU features specified by the build option +# `-Dcpu-baseline`. +# +# - `CPU_DISPATCH_NAMES`: A set of enabled CPU feature names, representing the +# additional CPU features that can be dispatched at +# runtime, specified by the build option +# `-Dcpu-dispatch`. +# +# - `CPU_FEATURES`: A dictionary containing all supported CPU feature objects. +# +# Additionally, this script exposes a set of variables that represent each +# supported feature to be used within the Meson function +# `mod_features.multi_targets()`. + +# Prefix used by all macros and features definitions +CPU_CONF_PREFIX = 'NPY_' +# main configuration name +CPU_CONF_CONFIG = 'npy_cpu_dispatch_config.h' + +if get_option('disable-optimization') + add_project_arguments('-D' + CPU_CONF_PREFIX + 'DISABLE_OPTIMIZATION', language: ['c', 'cpp']) + CPU_CONF_BASELINE = 'none' + CPU_CONF_DISPATCH = 'none' +else + baseline_detect = false + c_args = get_option('c_args') + foreach arg : c_args + foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] + if arg.contains(carch) + message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') + baseline_detect = true + break + endif + endforeach + if baseline_detect + break + endif + endforeach + # The required minimal set of required CPU features. + CPU_CONF_BASELINE = get_option('cpu-baseline') + if baseline_detect + CPU_CONF_BASELINE += '+detect' + endif + # The required dispatched set of additional CPU features. + CPU_CONF_DISPATCH = get_option('cpu-dispatch') +endif + +# Initialize the CPU features Export the X86 features objects 'SSE', 'AVX', +# etc. plus a dictionary "X86_FEATURES" which maps to each object by its name +subdir('x86') +subdir('ppc64') +subdir('s390x') +subdir('arm') + +CPU_FEATURES = {} +CPU_FEATURES += ARM_FEATURES +CPU_FEATURES += X86_FEATURES +CPU_FEATURES += PPC64_FEATURES +CPU_FEATURES += S390X_FEATURES + +# Parse the requsted baseline (CPU_CONF_BASELINE) and dispatch features +# (CPU_CONF_DISPATCH). +cpu_family = host_machine.cpu_family() +# Used by build option 'min' +min_features = { + 'x86': [SSE2], + 'x86_64': [SSE3], + 'ppc64': [], + 's390x': [], + 'arm': [], + 'aarch64': [ASIMD] +}.get(cpu_family, []) +if host_machine.endian() == 'little' and cpu_family == 'ppc64' + min_features = [VSX2] +endif + +# Used by build option 'max' +max_features_dict = { + 'x86': X86_FEATURES, + 'x86_64': X86_FEATURES, + 'ppc64': PPC64_FEATURES, + 's390x': S390X_FEATURES, + 'arm': ARM_FEATURES, + 'aarch64': ARM_FEATURES, +}.get(cpu_family, []) +max_features = [] +foreach fet_name, fet_obj : max_features_dict + max_features += [fet_obj] +endforeach + +parse_options = { + 'cpu-baseline': CPU_CONF_BASELINE, + 'cpu-dispatch': CPU_CONF_DISPATCH +} +parse_result = { + 'cpu-baseline': [], + 'cpu-dispatch': [] +} +mod_features = import('features') +foreach opt_name, conf : parse_options + # no support for regex :(? + tokens = conf.replace(',', ' ').replace('+', ' + ').replace('-', ' - ').strip().to_upper().split() + result = [] + ignored = [] + # append is the default + append = true + foreach tok : tokens + if tok == '+' + append = true + continue + elif tok == '-' + append = false + continue + elif tok == 'NONE' + continue + elif tok == 'NATIVE' + if not is_variable('cpu_native_features') + compiler_id = meson.get_compiler('c').get_id() + native_flags = { + 'intel': '-xHost', + 'intel-cl': '/QxHost', + # FIXME: Add support for fcc(-mcpu=a64fx) compiler + }.get(compiler_id, '-march=native') + test_native = mod_features.test( + max_features, anyfet: true, + force_args: [native_flags] + '-DDETECT_FEATURES' + ) + if not test_native[0] + error('Option "native" doesn\'t support compiler', compiler_id) + endif + cpu_native_features = [] + foreach fet_name : test_native[1].get('features') + cpu_native_features += CPU_FEATURES[fet_name] + endforeach + endif + accumulate = cpu_native_features + elif tok == 'DETECT' + if not is_variable('cpu_detect_features') + test_detect = mod_features.test( + max_features, anyfet: true, + force_args: ['-DDETECT_FEATURES'] + get_option('c_args') + ) + cpu_detect_features = [] + foreach fet_name : test_detect[1].get('features') + cpu_detect_features += CPU_FEATURES[fet_name] + endforeach + endif + accumulate = cpu_detect_features + elif tok == 'MIN' + accumulate = min_features + elif tok == 'MAX' + accumulate = max_features + elif tok in CPU_FEATURES + tokobj = CPU_FEATURES[tok] + if tokobj not in max_features + ignored += tok + continue + endif + accumulate = [tokobj] + else + error('Invalid token "'+tok+'" within option --'+opt_name) + endif + if append + foreach fet : accumulate + if fet not in result + result += fet + endif + endforeach + else + filterd = [] + foreach fet : result + if fet not in accumulate + filterd += fet + endif + endforeach + result = filterd + endif # append + endforeach # tok : tokens + if ignored.length() > 0 + message( + 'During parsing ' + opt_name + + ': The following CPU features were ignored due to platform ' + + 'incompatibility or lack of support:\n"' + ' '.join(ignored) + '"' + ) + endif + if result.length() > 0 + parse_result += {opt_name: mod_features.implicit_c(result)} + endif +endforeach # opt_name, conf : parse_options + +# Test the baseline and dispatch features and set their flags and #definitions +# across all sources. +# +# It is important to know that this test enables the maximum supported features +# by the platform depending on the required features. +# +# For example, if the user specified `--cpu-baseline=avx512_skx`, and the +# compiler doesn't support it, but still supports any of the implied features, +# then we enable the maximum supported implied features, e.g., AVX2, which can +# be done by specifying `anyfet: true` to the test function. +if parse_result['cpu-baseline'].length() > 0 + baseline = mod_features.test(parse_result['cpu-baseline'], anyfet: true)[1] + baseline_args = baseline['args'] + foreach baseline_fet : baseline['defines'] + baseline_args += ['-D' + CPU_CONF_PREFIX + 'HAVE_' + baseline_fet] + endforeach + add_project_arguments(baseline_args, language: ['c', 'cpp']) +else + baseline = {} +endif +# The name of the baseline features including its implied features. +CPU_BASELINE_NAMES = baseline.get('features', []) +CPU_BASELINE = [] +foreach fet_name : CPU_BASELINE_NAMES + CPU_BASELINE += [CPU_FEATURES[fet_name]] +endforeach +# Loop over all initialized features and disable any feature that is not part +# of the requested baseline and dispatch features to avoid it enabled by +# import('feature').multi_targets +foreach fet_name, fet_obj : CPU_FEATURES + if fet_obj in parse_result['cpu-dispatch'] or fet_name in CPU_BASELINE_NAMES + continue + endif + fet_obj.update(disable: 'Not part of the requsted features') +endforeach + +CPU_DISPATCH_NAMES = [] +foreach fet_obj : parse_result['cpu-dispatch'] + # skip baseline features + if fet_obj.get('name') in CPU_BASELINE_NAMES + continue + endif + fet_test = mod_features.test(fet_obj) + if not fet_test[0] + continue + endif + CPU_DISPATCH_NAMES += [fet_obj.get('name')] +endforeach +# Generate main configuration header see 'main_config.h.in' for more +# clarification. +main_config = { + 'P': CPU_CONF_PREFIX, + 'WITH_CPU_BASELINE': ' '.join(CPU_BASELINE_NAMES), + 'WITH_CPU_BASELINE_N': CPU_BASELINE_NAMES.length(), + 'WITH_CPU_DISPATCH': ' '.join(CPU_DISPATCH_NAMES), + 'WITH_CPU_DISPATCH_N': CPU_DISPATCH_NAMES.length(), +} +clines = [] +macro_tpl = '@0@_CPU_EXPAND(EXEC_CB(@1@, __VA_ARGS__)) \\' +foreach fet : CPU_BASELINE_NAMES + clines += macro_tpl.format(CPU_CONF_PREFIX, fet) +endforeach +main_config += {'WITH_CPU_BASELINE_CALL': '\n'.join(clines)} +clines = [] +foreach fet : CPU_DISPATCH_NAMES + clines += macro_tpl.format(CPU_CONF_PREFIX, fet) +endforeach +main_config += {'WITH_CPU_DISPATCH_CALL': '\n'.join(clines)} + +configure_file( + input : 'main_config.h.in', + output : CPU_CONF_CONFIG, + configuration : configuration_data(main_config) +) +add_project_arguments( + '-I' + meson.current_build_dir(), + language: ['c', 'cpp'] +) + +message( +''' +CPU Optimization Options + baseline: + Requested : @0@ + Enabled : @1@ + dispatch: + Requested : @2@ + Enabled : @3@ +'''.format( + CPU_CONF_BASELINE, ' '.join(CPU_BASELINE_NAMES), + CPU_CONF_DISPATCH, ' '.join(CPU_DISPATCH_NAMES) + ) +) diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build new file mode 100644 index 000000000000..d14b23703fe3 --- /dev/null +++ b/meson_cpu/ppc64/meson.build @@ -0,0 +1,38 @@ +source_root = meson.project_source_root() +mod_features = import('features') +compiler_id = meson.get_compiler('c').get_id() + +VSX = mod_features.new( + 'VSX', 1, args: '-mvsx', + test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx.c')[0], + extra_tests: { + 'VSX_ASM': files(source_root + '/numpy/distutils/checks/extra_vsx_asm.c')[0] + } +) +if compiler_id == 'clang' + VSX.update(args: ['-mvsx', '-maltivec']) +endif +VSX2 = mod_features.new( + 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, + detect: {'val': 'VSX2', 'match': 'VSX'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx2.c')[0], +) +# VSX2 is hardware baseline feature on ppc64le since the first little-endian +# support was part of Power8 +if host_machine.endian() == 'little' + VSX.update(implies: VSX2) +endif +VSX3 = mod_features.new( + 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*[mcpu=|vsx].*'}, + detect: {'val': 'VSX3', 'match': 'VSX.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], +) +VSX4 = mod_features.new( + 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*[mcpu=|vsx].*'}, + detect: {'val': 'VSX4', 'match': 'VSX.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], + extra_tests: { + 'VSX4_MMA': files(source_root + '/numpy/distutils/checks/extra_vsx4_mma.c')[0] + } +) +PPC64_FEATURES = {'VSX': VSX, 'VSX2': VSX2, 'VSX3': VSX3, 'VSX4': VSX4} diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build new file mode 100644 index 000000000000..a69252d1607c --- /dev/null +++ b/meson_cpu/s390x/meson.build @@ -0,0 +1,18 @@ +source_root = meson.project_source_root() +mod_features = import('features') + +VX = mod_features.new( + 'VX', 1, args: ['-mzvector', '-march=arch11'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_vx.c')[0], +) +VXE = mod_features.new( + 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, + detect: {'val': 'VXE', 'match': 'VX'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], +) +VXE2 = mod_features.new( + 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, + detect: {'val': 'VXE2', 'match': 'VX.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], +) +S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build new file mode 100644 index 000000000000..caf6bf09c14e --- /dev/null +++ b/meson_cpu/x86/meson.build @@ -0,0 +1,227 @@ +source_root = meson.project_source_root() +mod_features = import('features') + +SSE = mod_features.new( + 'SSE', 1, args: '-msse', + test_code: files(source_root + '/numpy/distutils/checks/cpu_sse.c')[0] +) +SSE2 = mod_features.new( + 'SSE2', 2, implies: SSE, + args: '-msse2', + test_code: files(source_root + '/numpy/distutils/checks/cpu_sse2.c')[0] +) +# enabling SSE without SSE2 is useless also it's non-optional for x86_64 +SSE.update(implies: SSE2) +SSE3 = mod_features.new( + 'SSE3', 3, implies: SSE2, + args: '-msse3', + test_code: files(source_root + '/numpy/distutils/checks/cpu_sse3.c')[0] +) +SSSE3 = mod_features.new( + 'SSSE3', 4, implies: SSE3, + args: '-mssse3', + test_code: files(source_root + '/numpy/distutils/checks/cpu_ssse3.c')[0] +) +SSE41 = mod_features.new( + 'SSE41', 5, implies: SSSE3, + args: '-msse4.1', + test_code: files(source_root + '/numpy/distutils/checks/cpu_sse41.c')[0] +) +POPCNT = mod_features.new( + 'POPCNT', 6, implies: SSE41, + args: '-mpopcnt', + test_code: files(source_root + '/numpy/distutils/checks/cpu_popcnt.c')[0] +) +SSE42 = mod_features.new( + 'SSE42', 7, implies: POPCNT, args: '-msse4.2', + test_code: files(source_root + '/numpy/distutils/checks/cpu_sse42.c')[0] +) +# 7-20 left as margin for any extra features +AVX = mod_features.new( + 'AVX', 20, implies: SSE42, args: '-mavx', + detect: {'val': 'AVX', 'match': '.*SSE.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx.c')[0] +) +XOP = mod_features.new( + 'XOP', 21, implies: AVX, args: '-mxop', + test_code: files(source_root + '/numpy/distutils/checks/cpu_xop.c')[0] +) +FMA4 = mod_features.new( + 'FMA4', 22, implies: AVX, args: '-mfma4', + test_code: files(source_root + '/numpy/distutils/checks/cpu_fma4.c')[0] +) +# x86 half-precision +F16C = mod_features.new( + 'F16C', 23, implies: AVX, args: '-mf16c', + test_code: files(source_root + '/numpy/distutils/checks/cpu_f16c.c')[0] +) +FMA3 = mod_features.new( + 'FMA3', 24, implies: F16C, args: '-mfma', + test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] +) +AVX2 = mod_features.new( + 'AVX2', 25, implies: F16C, args: '-mavx2', + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] +) +# 25-40 left as margin for any extra features +AVX512F = mod_features.new( + 'AVX512F', 40, implies: [FMA3, AVX2], + # Disables mmx because of stack corruption that may happen during mask + # conversions. + # TODO (seiko2plus): provide more clarification + args: ['-mno-mmx', '-mavx512f'], + detect: {'val': 'AVX512F', 'match': '.*'}, + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512f.c')[0], + extra_tests: { + 'AVX512F_REDUCE': files(source_root + '/numpy/distutils/checks/extra_avx512f_reduce.c')[0] + } +) +AVX512CD = mod_features.new( + 'AVX512CD', 41, implies: AVX512F, args: '-mavx512cd', + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512cd.c')[0] +) +AVX512_KNL = mod_features.new( + 'AVX512_KNL', 42, implies: AVX512CD, args: ['-mavx512er', '-mavx512pf'], + group: ['AVX512ER', 'AVX512PF'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knl.c')[0] +) +AVX512_KNM = mod_features.new( + 'AVX512_KNM', 43, implies: AVX512_KNL, + args: ['-mavx5124fmaps', '-mavx5124vnniw', '-mavx512vpopcntdq'], + group: ['AVX5124FMAPS', 'AVX5124VNNIW', 'AVX512VPOPCNTDQ'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knm.c')[0] +) +AVX512_SKX = mod_features.new( + 'AVX512_SKX', 50, implies: AVX512CD, + args: ['-mavx512vl', '-mavx512bw', '-mavx512dq'], + group: ['AVX512VL', 'AVX512BW', 'AVX512DQ'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_skx.c')[0], + extra_tests: { + 'AVX512BW_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512bw_mask.c')[0], + 'AVX512DQ_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512dq_mask.c')[0] + } +) +AVX512_CLX = mod_features.new( + 'AVX512_CLX', 51, implies: AVX512_SKX, args: '-mavx512vnni', + group: ['AVX512VNNI'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_clx.c')[0] +) +AVX512_CNL = mod_features.new( + 'AVX512_CNL', 52, implies: AVX512_SKX, + args: ['-mavx512ifma', '-mavx512vbmi'], + group: ['AVX512IFMA', 'AVX512VBMI'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_cnl.c')[0] +) +AVX512_ICL = mod_features.new( + 'AVX512_ICL', 53, implies: [AVX512_CLX, AVX512_CNL], + args: ['-mavx512vbmi2', '-mavx512bitalg', '-mavx512vpopcntdq'], + group: ['AVX512VBMI2', 'AVX512BITALG', 'AVX512VPOPCNTDQ'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] +) +# TODO add support for zen4 +AVX512_SPR = mod_features.new( + 'AVX512_SPR', 55, implies: AVX512_ICL, + args: ['-mavx512fp16'], + group: ['AVX512FP16'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] +) + +# Specializations for non unix-like compilers +# ------------------------------------------- +cpu_family = host_machine.cpu_family() +compiler_id = meson.get_compiler('c').get_id() +if compiler_id not in ['gcc', 'clang'] + AVX512_SPR.update(disable: compiler_id + ' compiler does not support it') +endif + +# Common specializations between both Intel compilers (unix-like and msvc-like) +if compiler_id in ['intel', 'intel-cl'] + # POPCNT, and F16C don't own private FLAGS however the compiler still + # provides ISA capability for them. + POPCNT.update(args: '') + F16C.update(args: '') + # Intel compilers don't support the following features independently + FMA3.update(implies: [F16C, AVX2]) + AVX2.update(implies: [F16C, FMA3]) + AVX512F.update(implies: [AVX2, AVX512CD, AVX512_SKX]) + AVX512CD.update(implies: [AVX512F, AVX512_SKX]) + XOP.update(disable: 'Intel Compiler does not support it') + FMA4.update(disable: 'Intel Compiler does not support it') +endif + +if compiler_id == 'intel-cl' + foreach fet : [SSE, SSE2, SSE3, SSSE3, AVX] + fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': '/arch:.*'}) + endforeach + SSE41.update(args: {'val': '/arch:SSE4.1', 'match': '/arch:.*'}) + SSE42.update(args: {'val': '/arch:SSE4.2', 'match': '/arch:.*'}) + FMA3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) + AVX2.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) + AVX512F.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) + AVX512CD.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) + AVX512_KNL.update(args: {'val': '/Qx:KNL', 'match': '/[arch|Qx]:.*'}) + AVX512_KNM.update(args: {'val': '/Qx:KNM', 'match': '/[arch|Qx]:.*'}) + AVX512_SKX.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) + AVX512_CLX.update(args: {'val': '/Qx:CASCADELAKE', 'match': '/[arch|Qx]:.*'}) + AVX512_CNL.update(args: {'val': '/Qx:CANNONLAKE', 'match': '/[arch|Qx]:.*'}) + AVX512_ICL.update(args: {'val': '/Qx:ICELAKE-CLIENT', 'match': '/[arch|Qx]:.*'}) +endif + +if compiler_id == 'intel' + clear_m = '^(-mcpu=|-march=)' + clear_any = '^(-mcpu=|-march=|-x[A-Z0-9\-])' + FMA3.update(args: {'val': '-march=core-avx2', 'match': clear_m}) + AVX2.update(args: {'val': '-march=core-avx2', 'match': clear_m}) + AVX512F.update(args: {'val': '-march=common-avx512', 'match': clear_m}) + AVX512CD.update(args: {'val': '-march=common-avx512', 'match': clear_m}) + AVX512_KNL.update(args: {'val': '-xKNL', 'match': clear_any}) + AVX512_KNM.update(args: {'val': '-xKNM', 'match': clear_any}) + AVX512_SKX.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) + AVX512_CLX.update(args: {'val': '-xCASCADELAKE', 'match': clear_any}) + AVX512_CNL.update(args: {'val': '-xCANNONLAKE', 'match': clear_any}) + AVX512_ICL.update(args: {'val': '-xICELAKE-CLIENT', 'match': clear_any}) +endif + +if compiler_id == 'msvc' + # MSVC compiler doesn't support the following features + foreach fet : [AVX512_KNL, AVX512_KNM] + fet.update(disable: compiler_id + ' compiler does not support it') + endforeach + # The following features don't own private FLAGS, however the compiler still + # provides ISA capability for them. + foreach fet : [ + SSE3, SSSE3, SSE41, POPCNT, SSE42, AVX, F16C, XOP, FMA4, + AVX512F, AVX512CD, AVX512_CLX, AVX512_CNL, + AVX512_ICL + ] + fet.update(args: '') + endforeach + # MSVC compiler doesn't support the following features independently + FMA3.update(implies: [F16C, AVX2]) + AVX2.update(implies: [F16C, FMA3]) + AVX512F.update(implies: [AVX2, AVX512CD, AVX512_SKX]) + AVX512CD.update(implies: [AVX512F, AVX512_SKX]) + clear_arch = '/arch:.*' + # only available on 32-bit. Its enabled by default on 64-bit mode + foreach fet : [SSE, SSE2] + if cpu_family == 'x86' + fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': clear_arch}) + else + fet.update(args: '') + endif + endforeach + FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) +endif + +X86_FEATURES = { + 'SSE': SSE, 'SSE2': SSE2, 'SSE3': SSE3, 'SSSE3': SSSE3, + 'SSE41': SSE41, 'POPCNT': POPCNT, 'SSE42': SSE42, 'AVX': AVX, + 'XOP': XOP, 'FMA4': FMA4, 'F16C': F16C, 'FMA3': FMA3, + 'AVX2': AVX2, 'AVX512F': AVX512F, 'AVX512CD': AVX512CD, + 'AVX512_KNL': AVX512_KNL, 'AVX512_KNM': AVX512_KNM, + 'AVX512_SKX': AVX512_SKX, 'AVX512_CLX': AVX512_CLX, + 'AVX512_CNL': AVX512_CNL, 'AVX512_ICL': AVX512_ICL, + 'AVX512_SPR': AVX512_SPR +} diff --git a/meson_options.txt b/meson_options.txt index 7ce4eefacd89..8b1fad6c4041 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -1,7 +1,7 @@ option('blas', type: 'string', value: 'openblas', - description: 'option for BLAS library switching') + description: 'Option for BLAS library switching') option('lapack', type: 'string', value: 'openblas', - description: 'option for LAPACK library switching') + description: 'Option for LAPACK library switching') option('allow-noblas', type: 'boolean', value: false, description: 'If set to true, allow building with (slow!) internal fallback routines') option('use-ilp64', type: 'boolean', value: false, @@ -12,8 +12,22 @@ option('disable-svml', type: 'boolean', value: false, description: 'Disable building against SVML') option('disable-threading', type: 'boolean', value: false, description: 'Disable threading support (see `NPY_ALLOW_THREADS` docs)') -# TODO: flip value to 'false' once we have `npy_cpu_dispatch_config.h` & co. -option('disable-simd-optimizations', type: 'boolean', value: true, - description: 'Disable SIMD features beyond the baseline ones') +option('disable-optimization', type: 'boolean', value: false, + description: 'Disable CPU optimized code (dispatch,simd,unroll...)') +option('cpu-baseline', type: 'string', value: 'min', + description: 'Minimal set of required CPU features') +option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', + description: 'Dispatched set of additional CPU features') +option('test-simd', type: 'array', + value: [ + 'BASELINE', 'SSE2', 'SSE42', 'XOP', 'FMA4', + 'AVX2', 'FMA3', 'AVX2,FMA3', 'AVX512F', 'AVX512_SKX', + 'VSX', 'VSX2', 'VSX3', 'VSX4', + 'NEON', 'ASIMD', + 'VX', 'VXE', 'VXE2', + ], + description: 'Specify a list of CPU features to be tested against NumPy SIMD interface') +option('test-simd-args', type: 'string', value: '', + description: 'Extra args to be passed to the `_simd` module that is used for testing the NumPy SIMD interface') option('relaxed-strides-debug', type: 'boolean', value: false, description: 'Enable relaxed strides debug mode (see `NPY_RELAXED_STRIDES_DEBUG` docs)') diff --git a/numpy/core/meson.build b/numpy/core/meson.build index 17760efa2fc0..ccc060aacb96 100644 --- a/numpy/core/meson.build +++ b/numpy/core/meson.build @@ -84,6 +84,7 @@ cdata.set('NPY_API_VERSION', C_API_VERSION) use_svml = ( host_machine.system() == 'linux' and host_machine.cpu_family() == 'x86_64' and + ('AVX512_SKX' in CPU_DISPATCH_NAMES or 'AVX512_SKX' in CPU_BASELINE_NAMES) and not get_option('disable-svml') ) if use_svml @@ -291,9 +292,6 @@ endforeach # SSE headers only enabled automatically on amd64/x32 builds optional_headers = [ - 'xmmintrin.h', # SSE - 'emmintrin.h', # SSE2 - 'immintrin.h', # AVX 'features.h', # for glibc version linux 'xlocale.h', # see GH#8367 'dlfcn.h', # dladdr @@ -322,6 +320,15 @@ optional_function_attributes = [ # endif #endforeach +# Max possible optimization flags. We pass this flags to all our dispatch-able +# (multi_targets) sources. +compiler_id = cc.get_id() +max_opt = { + 'msvc': ['/O2'], + 'intel-cl': ['/O3'], +}.get(compiler_id, ['-O3']) +max_opt = cc.has_multi_arguments(max_opt) ? max_opt : [] + # Optional GCC compiler builtins and their call arguments. # If given, a required header and definition name (HAVE_ prepended) # Call arguments are required as the compiler will do strict signature checking @@ -513,12 +520,6 @@ if cc.get_id() == 'msvc' staticlib_cflags += '-d2VolatileMetadata-' endif endif -# TODO: change to "feature" option in meson_options.txt? See -# https://mesonbuild.com/Build-options.html#build-options -if get_option('disable-simd-optimizations') - staticlib_cflags += '-DNPY_DISABLE_OPTIMIZATION' - staticlib_cppflags += '-DNPY_DISABLE_OPTIMIZATION' -endif npy_math_internal_h = custom_target( output: 'npy_math_internal.h', @@ -626,19 +627,10 @@ src_ufunc_api = custom_target('__ufunc_api', # Set common build flags for C and C++ code # ----------------------------------------- - -# TODO: change to "feature" option in meson_options.txt? See -# https://mesonbuild.com/Build-options.html#build-options -disable_simd_optimizations = [] -if get_option('disable-simd-optimizations') - disable_simd_optimizations = '-DNPY_DISABLE_OPTIMIZATION' -endif - # Common build flags c_args_common = [ '-DNPY_INTERNAL_BUILD', '-DHAVE_NPY_CONFIG_H', - disable_simd_optimizations, cflags_large_file_support, ] @@ -667,11 +659,9 @@ np_core_dep = declare_dependency( '.', 'include', 'src/common', - ], - compile_args: disable_simd_optimizations + ] ) - # Build multiarray_tests module # ----------------------------- py.extension_module('_multiarray_tests', @@ -691,15 +681,30 @@ py.extension_module('_multiarray_tests', subdir: 'numpy/core', ) +_umath_tests_mtargets = mod_features.multi_targets( + '_umath_tests.dispatch.h', + 'src/umath/_umath_tests.dispatch.c', + dispatch: [ + AVX2, SSE41, SSE2, + ASIMDHP, ASIMD, NEON, + VSX3, VSX2, VSX, + VXE, VX, + ], + baseline: CPU_BASELINE, + prefix: 'NPY_', + dependencies: [py_dep, np_core_dep] +) + test_modules_src = [ ['_umath_tests', [ src_file.process('src/umath/_umath_tests.c.src'), - 'src/umath/_umath_tests.dispatch.c', 'src/common/npy_cpu_features.c', - ]], - ['_rational_tests', 'src/umath/_rational_tests.c'], - ['_struct_ufunc_tests', 'src/umath/_struct_ufunc_tests.c'], - ['_operand_flag_tests', 'src/umath/_operand_flag_tests.c'], + ], + _umath_tests_mtargets.static_lib('_umath_tests_mtargets') + ], + ['_rational_tests', 'src/umath/_rational_tests.c', []], + ['_struct_ufunc_tests', 'src/umath/_struct_ufunc_tests.c', []], + ['_operand_flag_tests', 'src/umath/_operand_flag_tests.c', []], ] foreach gen: test_modules_src py.extension_module(gen[0], @@ -709,7 +714,261 @@ foreach gen: test_modules_src dependencies: np_core_dep, install: true, subdir: 'numpy/core', + link_with: gen[2], + ) +endforeach + +# Build multiarray dispatch-able sources +# -------------------------------------- +multiarray_gen_headers = [ + src_file.process('src/multiarray/arraytypes.h.src'), + src_file.process('src/common/npy_sort.h.src'), +] +foreach gen_mtargets : [ + [ + 'argfunc.dispatch.h', + src_file.process('src/multiarray/argfunc.dispatch.c.src'), + [ + AVX512_SKX, AVX2, XOP, SSE42, SSE2, + VSX2, + ASIMD, NEON, + VXE, VX + ] + ], +] + mtargets = mod_features.multi_targets( + gen_mtargets[0], multiarray_gen_headers + gen_mtargets[1], + dispatch: gen_mtargets[2], + baseline: CPU_BASELINE, + prefix: 'NPY_', + dependencies: [py_dep, np_core_dep], + c_args: c_args_common + max_opt, + cpp_args: cpp_args_common + max_opt, + include_directories: [ + 'include', + 'src/common', + 'src/multiarray', + 'src/npymath', + 'src/umath' + ] + ) + if not is_variable('multiarray_umath_mtargets') + multiarray_umath_mtargets = mtargets + else + multiarray_umath_mtargets.extend(mtargets) + endif +endforeach + +# Build npysort dispatch-able sources +# ----------------------------------- +foreach gen_mtargets : [ + [ + 'simd_qsort.dispatch.h', + 'src/npysort/simd_qsort.dispatch.cpp', + [AVX512_SKX] + ], + [ + 'simd_qsort_16bit.dispatch.h', + 'src/npysort/simd_qsort_16bit.dispatch.cpp', + [AVX512_SPR, AVX512_ICL] + ], +] + mtargets = mod_features.multi_targets( + gen_mtargets[0], multiarray_gen_headers + gen_mtargets[1], + dispatch: gen_mtargets[2], + # baseline: CPU_BASELINE, it doesn't provide baseline fallback + prefix: 'NPY_', + dependencies: [py_dep, np_core_dep], + c_args: c_args_common + max_opt, + cpp_args: cpp_args_common + max_opt, + include_directories: [ + 'include', + 'src/common', + 'src/multiarray', + 'src/npymath', + 'src/umath' + ] ) + if not is_variable('multiarray_umath_mtargets') + multiarray_umath_mtargets = mtargets + else + multiarray_umath_mtargets.extend(mtargets) + endif +endforeach + +# Build umath dispatch-able sources +# --------------------------------- +mod_features = import('features') +umath_gen_headers = [ + src_file.process('src/umath/loops.h.src'), + src_file.process('src/umath/loops_utils.h.src'), +] + +foreach gen_mtargets : [ + [ + 'loops_arithm_fp.dispatch.h', + src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), + [ + [AVX2, FMA3], SSE2, + ASIMD, NEON, + VSX3, VSX2, + VXE, VX, + ] + ], + [ + 'loops_arithmetic.dispatch.h', + src_file.process('src/umath/loops_arithmetic.dispatch.c.src'), + [ + AVX512_SKX, AVX512F, AVX2, SSE41, SSE2, + NEON, + VSX4, VSX2, + VX, + ] + ], + [ + 'loops_comparison.dispatch.h', + src_file.process('src/umath/loops_comparison.dispatch.c.src'), + [ + AVX512_SKX, AVX512F, AVX2, SSE42, SSE2, + VSX3, VSX2, + NEON, + VXE, VX, + ] + ], + [ + 'loops_exponent_log.dispatch.h', + src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), + # Enabling SIMD on clang-cl raises spurious FP exceptions + # TODO (seiko2plus): debug spurious FP exceptions for single-precision log/exp + compiler_id == 'clang-cl' ? [] : [ + AVX512_SKX, AVX512F, [AVX2, FMA3] + ] + ], + [ + 'loops_hyperbolic.dispatch.h', + src_file.process('src/umath/loops_hyperbolic.dispatch.c.src'), + [ + AVX512_SKX, [AVX2, FMA3], + VSX4, VSX2, + NEON_VFPV4, + VXE, VX + ] + ], + [ + 'loops_logical.dispatch.h', + src_file.process('src/umath/loops_logical.dispatch.c.src'), + [ + ASIMD, NEON, + AVX512_SKX, AVX2, SSE2, + VSX2, + VX, + ] + ], + [ + 'loops_minmax.dispatch.h', + src_file.process('src/umath/loops_minmax.dispatch.c.src'), + [ + ASIMD, NEON, + AVX512_SKX, AVX2, SSE2, + VSX2, + VXE, VX, + ] + ], + [ + 'loops_modulo.dispatch.h', + src_file.process('src/umath/loops_modulo.dispatch.c.src'), + [ + VSX4 + ] + ], + [ + 'loops_trigonometric.dispatch.h', + src_file.process('src/umath/loops_trigonometric.dispatch.c.src'), + # Enabling SIMD on clang-cl raises spurious FP exceptions + # TODO (seiko2plus): debug spurious FP exceptions for single-precision sin/cos + compiler_id == 'clang-cl' ? [] : [ + AVX512F, [AVX2, FMA3], + VSX4, VSX3, VSX2, + NEON_VFPV4, + VXE2, VXE + ] + ], + [ + 'loops_umath_fp.dispatch.h', + src_file.process('src/umath/loops_umath_fp.dispatch.c.src'), + [AVX512_SKX] + ], + [ + 'loops_unary.dispatch.h', + src_file.process('src/umath/loops_unary.dispatch.c.src'), + [ + ASIMD, NEON, + AVX512_SKX, AVX2, SSE2, + VSX2, + VXE, VX + ] + ], + [ + 'loops_unary_fp.dispatch.h', + src_file.process('src/umath/loops_unary_fp.dispatch.c.src'), + [ + SSE41, SSE2, + VSX2, + ASIMD, NEON, + VXE, VX + ] + ], + [ + 'loops_unary_fp_le.dispatch.h', + src_file.process('src/umath/loops_unary_fp_le.dispatch.c.src'), + [ + SSE41, SSE2, + VSX2, + ASIMD, NEON, + ] + ], + [ + 'loops_unary_complex.dispatch.h', + src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), + [ + AVX512F, [AVX2, FMA3], SSE2, + ASIMD, NEON, + VSX3, VSX2, + VXE, VX, + ] + ], + [ + 'loops_autovec.dispatch.h', + src_file.process('src/umath/loops_autovec.dispatch.c.src'), + [ + AVX2, SSE2, + NEON, + VSX2, + VX, + ] + ], +] + mtargets = mod_features.multi_targets( + gen_mtargets[0], umath_gen_headers + gen_mtargets[1], + dispatch: gen_mtargets[2], + baseline: CPU_BASELINE, + prefix: 'NPY_', + dependencies: [py_dep, np_core_dep], + c_args: c_args_common + max_opt, + cpp_args: cpp_args_common + max_opt, + include_directories: [ + 'include', + 'src/common', + 'src/multiarray', + 'src/npymath', + 'src/umath' + ] + ) + if not is_variable('multiarray_umath_mtargets') + multiarray_umath_mtargets = mtargets + else + multiarray_umath_mtargets.extend(mtargets) + endif endforeach # Build _multiarray_umath module @@ -733,12 +992,10 @@ if have_blas ] endif -src_multiarray = [ +src_multiarray = multiarray_gen_headers + [ 'src/multiarray/abstractdtypes.c', 'src/multiarray/alloc.c', - src_file.process('src/multiarray/argfunc.dispatch.c.src'), 'src/multiarray/arrayobject.c', - src_file.process('src/multiarray/arraytypes.h.src'), 'src/multiarray/array_coercion.c', 'src/multiarray/array_method.c', 'src/multiarray/array_assign_scalar.c', @@ -792,9 +1049,6 @@ src_multiarray = [ 'src/multiarray/typeinfo.c', 'src/multiarray/usertypes.c', 'src/multiarray/vdot.c', - src_file.process('src/common/npy_sort.h.src'), - 'src/npysort/simd_qsort.dispatch.cpp', - 'src/npysort/simd_qsort_16bit.dispatch.cpp', 'src/npysort/quicksort.cpp', 'src/npysort/mergesort.cpp', 'src/npysort/timsort.cpp', @@ -817,26 +1071,9 @@ src_multiarray = [ 'src/npymath/arm64_exports.c', ] -src_umath = [ +src_umath = umath_gen_headers + [ src_file.process('src/umath/funcs.inc.src'), - src_file.process('src/umath/loops.h.src'), - src_file.process('src/umath/loops_utils.h.src'), src_file.process('src/umath/loops.c.src'), - src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), - src_file.process('src/umath/loops_arithmetic.dispatch.c.src'), - src_file.process('src/umath/loops_comparison.dispatch.c.src'), - src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), - src_file.process('src/umath/loops_hyperbolic.dispatch.c.src'), - src_file.process('src/umath/loops_logical.dispatch.c.src'), - src_file.process('src/umath/loops_minmax.dispatch.c.src'), - src_file.process('src/umath/loops_modulo.dispatch.c.src'), - src_file.process('src/umath/loops_trigonometric.dispatch.c.src'), - src_file.process('src/umath/loops_umath_fp.dispatch.c.src'), - src_file.process('src/umath/loops_unary.dispatch.c.src'), - src_file.process('src/umath/loops_unary_fp.dispatch.c.src'), - src_file.process('src/umath/loops_unary_fp_le.dispatch.c.src'), - src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), - src_file.process('src/umath/loops_autovec.dispatch.c.src'), src_file.process('src/umath/matmul.c.src'), src_file.process('src/umath/matmul.h.src'), 'src/umath/ufunc_type_resolution.c', @@ -863,52 +1100,24 @@ src_umath = [ # may be able to avoid the accuracy regressions in SVML. svml_objects = [] if use_svml - svml_objects += [ - 'src/umath/svml/linux/avx512/svml_z0_acos_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_acos_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_acosh_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_acosh_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_asin_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_asin_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_asinh_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_asinh_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_atan2_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_atan2_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_atan_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_atan_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_atanh_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_atanh_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_cbrt_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_cbrt_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_cos_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_cos_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_cosh_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_cosh_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_exp2_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_exp2_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_exp_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_exp_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_expm1_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_expm1_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log10_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log10_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log1p_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log1p_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log2_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log2_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_log_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_pow_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_pow_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_sin_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_sin_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_sinh_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_sinh_s_la.s', - 'src/umath/svml/linux/avx512/svml_z0_tan_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_tan_s_la.s', - # 'src/umath/svml/linux/avx512/svml_z0_tanh_d_la.s', - 'src/umath/svml/linux/avx512/svml_z0_tanh_s_la.s', + foreach svml_func : [ + 'acos', 'acosh', 'asin', + 'asinh', 'atan2', + 'atan', 'atanh', + 'cbrt', 'cos', + 'cosh', 'exp2', + 'exp', 'expm1', + 'log10', 'log1p', + 'log2', 'log', + 'pow', 'sin', 'sinh', 'tan', + 'tanh' ] + foreach svml_sfx : ['d_la', 's_la', 'd_ha', 's_la'] + svml_objects += [ + 'src/umath/svml/linux/avx512/svml_z0_'+svml_func+'_'+svml_sfx+'.s' + ] + endforeach + endforeach endif py.extension_module('_multiarray_umath', @@ -934,26 +1143,60 @@ py.extension_module('_multiarray_umath', 'src/umath', ], dependencies: blas_dep, - link_with: npymath_lib, + link_with: [npymath_lib, multiarray_umath_mtargets.static_lib('_multiarray_umath_mtargets')], install: true, subdir: 'numpy/core', ) # Build SIMD module # ----------------- +_simd_dispatch = [] +_simd_baseline = [] +foreach target : get_option('test-simd') + target = target.strip().to_upper().split(',') + mfeatures = [] + foreach fet_name : target + if fet_name == 'BASELINE' + _simd_baseline = CPU_BASELINE + break + endif + if fet_name not in CPU_FEATURES + error('Expected a valid feature name, got('+fet_name+')') + endif + mfeatures += CPU_FEATURES[fet_name] + endforeach + _simd_dispatch += [mfeatures] +endforeach -py.extension_module('_simd', +_simd_mtargets = mod_features.multi_targets( + '_simd.dispatch.h', [ - 'src/common/npy_cpu_features.c', - 'src/_simd/_simd.c', src_file.process('src/_simd/_simd_inc.h.src'), src_file.process('src/_simd/_simd_data.inc.src'), src_file.process('src/_simd/_simd.dispatch.c.src'), ], + # Skip validating the order of `_simd_dispatch` because we execute all these + # features, not just the highest interest one. The sorting doesn't matter + # here, given the nature of this testing unit. + keep_sort: true, + dispatch: _simd_dispatch, + baseline: _simd_baseline, + prefix: 'NPY_', + dependencies: [py_dep, np_core_dep], + include_directories: ['src/_simd', 'src/npymath'], + c_args: c_args_common, + cpp_args: cpp_args_common, +) + +py.extension_module('_simd', + [ + 'src/common/npy_cpu_features.c', + 'src/_simd/_simd.c', + ], c_args: c_args_common, include_directories: ['src/_simd', 'src/npymath'], dependencies: np_core_dep, - link_with: npymath_lib, + link_with: [npymath_lib, _simd_mtargets.static_lib('_simd_mtargets')], install: true, subdir: 'numpy/core', ) diff --git a/numpy/core/src/_simd/_simd.c b/numpy/core/src/_simd/_simd.c index 52b66e7652a8..5a113fe57876 100644 --- a/numpy/core/src/_simd/_simd.c +++ b/numpy/core/src/_simd/_simd.c @@ -85,9 +85,13 @@ PyMODINIT_FUNC PyInit__simd(void) goto err; \ } \ } - - NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) - NPY__CPU_DISPATCH_BASELINE_CALL(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) + #ifdef NPY__CPU_MESON_BUILD + NPY_MTARGETS_CONF_DISPATCH(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) + NPY_MTARGETS_CONF_BASELINE(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) + #else + NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) + NPY__CPU_DISPATCH_BASELINE_CALL(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) + #endif return m; err: Py_DECREF(m); diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src index f532c9e022f7..51f5ddd54b22 100644 --- a/numpy/core/src/_simd/_simd.dispatch.c.src +++ b/numpy/core/src/_simd/_simd.dispatch.c.src @@ -919,7 +919,9 @@ NPY_CPU_DISPATCH_CURFX(simd_create_module)(void) { static struct PyModuleDef defs = { .m_base = PyModuleDef_HEAD_INIT, - #ifdef NPY__CPU_TARGET_CURRENT + #if defined(NPY_MTARGETS_CURRENT) // meson build + .m_name = "numpy.core._simd." NPY_TOSTRING(NPY_MTARGETS_CURRENT), + #elif defined(NPY__CPU_TARGET_CURRENT) .m_name = "numpy.core._simd." NPY_TOSTRING(NPY__CPU_TARGET_CURRENT), #else .m_name = "numpy.core._simd.baseline", diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h index 4d5addec809e..699f8536f6a2 100644 --- a/numpy/core/src/common/npy_cpu_dispatch.h +++ b/numpy/core/src/common/npy_cpu_dispatch.h @@ -43,6 +43,7 @@ #endif #endif #endif // !NPY_DISABLE_OPTIMIZATION +#ifndef NPY__CPU_MESON_BUILD /** * Macro NPY_CPU_DISPATCH_CURFX(NAME) * @@ -261,5 +262,5 @@ ((TESTED_FEATURES) ? (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : (void) 0), #define NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_(LEFT, ...) \ ( LEFT __VA_ARGS__ ) - +#endif // NPY__CPU_MESON_BUILD #endif // NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_H_ diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h index 72a87eac1715..357b136d25cd 100644 --- a/numpy/core/src/common/simd/sse/arithmetic.h +++ b/numpy/core/src/common/simd/sse/arithmetic.h @@ -321,7 +321,7 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) NPY_FINLINE npyv_f32 npyv_muladdsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c) { npyv_f32 m = npyv_mul_f32(a, b); - #if NPY_HAVE_SSE3 + #ifdef NPY_HAVE_SSE3 return _mm_addsub_ps(m, c); #else const npyv_f32 msign = npyv_set_f32(-0.0f, 0.0f, -0.0f, 0.0f); @@ -331,7 +331,7 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) NPY_FINLINE npyv_f64 npyv_muladdsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c) { npyv_f64 m = npyv_mul_f64(a, b); - #if NPY_HAVE_SSE3 + #ifdef NPY_HAVE_SSE3 return _mm_addsub_pd(m, c); #else const npyv_f64 msign = npyv_set_f64(-0.0, 0.0); From 7eb8cd9a4aed64d02b0eb4454921b3adcf3411ef Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Fri, 11 Aug 2023 13:40:16 -0700 Subject: [PATCH 036/120] MAINT: Remove versioneer (#24196) Co-authored-by: Ralf Gommers --- .gitattributes | 5 +- .gitignore | 2 + MANIFEST.in | 2 +- doc/Makefile | 4 +- generate_version.py | 40 - meson.build | 26 +- numpy/__init__.py | 8 +- numpy/__init__.pyi | 1 - numpy/_build_utils/gitversion.py | 98 + numpy/_version.py | 658 ------ numpy/core/__init__.py | 5 +- numpy/core/setup.py | 1 - numpy/lib/__init__.py | 2 - numpy/meson.build | 33 +- numpy/tests/test_numpy_version.py | 9 +- numpy/tests/test_public_api.py | 2 +- numpy/typing/tests/data/pass/modules.py | 1 - numpy/typing/tests/data/reveal/modules.pyi | 1 - numpy/typing/tests/data/reveal/version.pyi | 8 - numpy/version.py | 23 - pyproject.toml.setuppy | 4 + setup.py | 27 +- tools/lint_diff.ini | 2 +- versioneer.py | 2194 -------------------- 24 files changed, 169 insertions(+), 2987 deletions(-) delete mode 100644 generate_version.py create mode 100644 numpy/_build_utils/gitversion.py delete mode 100644 numpy/_version.py delete mode 100644 numpy/typing/tests/data/reveal/version.pyi delete mode 100644 numpy/version.py delete mode 100644 versioneer.py diff --git a/.gitattributes b/.gitattributes index 537650d395fe..9282851b8014 100644 --- a/.gitattributes +++ b/.gitattributes @@ -17,10 +17,9 @@ numpy/core/src/common/dlpack/dlpack.h linguist-vendored # Mark some files as generated numpy/linalg/lapack_lite/f2c_*.c linguist-generated numpy/linalg/lapack_lite/lapack_lite_names.h linguist-generated -numpy/_version.py linguist-generated -# versioneer config -numpy/_version.py export-subst +# version generated from pyproject.toml during build +numpy/version.py linguist-generated # Configuration files *.ini text diff --git a/.gitignore b/.gitignore index e5784971e9df..860aa9aa81d4 100644 --- a/.gitignore +++ b/.gitignore @@ -119,6 +119,8 @@ doc/source/savefig/ # Things specific to this project # ################################### +# The line below should change to numpy/_version.py for NumPy 2.0 +numpy/version.py numpy/core/__svn_version__.py doc/numpy.scipy.org/_build numpy/__config__.py diff --git a/MANIFEST.in b/MANIFEST.in index 4803b39131e1..fc498962a642 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -16,7 +16,7 @@ exclude azure-*.yml include .coveragerc # Sub-directories. Included are: numpy/, doc/, benchmarks/, tools/ -include numpy/_version.py +include numpy/version.py recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in include numpy/py.typed include numpy/random/include/* diff --git a/doc/Makefile b/doc/Makefile index 6d3c93203b55..7e81d95058e0 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -81,8 +81,8 @@ gitwash-update: # #SPHINXBUILD="LANG=C sphinx-build" -NUMPYVER:=$(shell $(PYTHON) -c "import numpy; print(numpy.version.git_revision[:10])" 2>/dev/null) -GITVER ?= $(shell cd ..; $(PYTHON) -c "import versioneer as v; print(v.get_versions()['full-revisionid'][:10])") +NUMPYVER:=$(shell $(PYTHON) -c "import numpy; print(numpy.version.git_revision[:7])" 2>/dev/null) +GITVER ?= $(shell (cd ..; set -o pipefail && git rev-parse HEAD 2>/dev/null | cut -c1-7) || echo Unknown) version-check: ifeq "$(GITVER)" "Unknown" diff --git a/generate_version.py b/generate_version.py deleted file mode 100644 index b3c8a39789a1..000000000000 --- a/generate_version.py +++ /dev/null @@ -1,40 +0,0 @@ -# Note: This file has to live next to versioneer.py or it will not work -import argparse -import os - -import versioneer - - -def write_version_info(path): - vinfo = versioneer.get_versions() - full_version = vinfo['version'] - git_revision = vinfo['full-revisionid'] - - if os.environ.get("MESON_DIST_ROOT"): - path = os.path.join(os.environ.get("MESON_DIST_ROOT"), path) - - with open(path, "w") as f: - f.write("def get_versions():\n") - f.write(" return {\n") - f.write(f" 'full-revisionid': '{git_revision}',\n") - f.write(f" 'version': '{full_version}'\n") - f.write("}") - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "-o", "--outfile", type=str, help="Path to write version info to" - ) - args = parser.parse_args() - - if not args.outfile.endswith(".py"): - raise ValueError( - f"Output file must be a Python file. " - f"Got: {args.outfile} as filename instead" - ) - - write_version_info(args.outfile) - - -main() diff --git a/meson.build b/meson.build index 33d0e7b462ef..a9e68c2eb94b 100644 --- a/meson.build +++ b/meson.build @@ -1,10 +1,10 @@ project( 'NumPy', 'c', 'cpp', 'cython', - # Note that the git commit hash cannot be added dynamically here - # It is dynamically added upon import by versioneer - # See `numpy/__init__.py` - version: '1.26.0.dev0', + version: run_command( + # This should become `numpy/_version.py` in NumPy 2.0 + ['python', 'numpy/_build_utils/gitversion.py'], + check: true).stdout().strip(), license: 'BSD-3', meson_version: '>=1.2.99', # version in vendored-meson is 1.2.99 default_options: [ @@ -62,23 +62,5 @@ if cc.get_id() == 'clang' ) endif -# Generate version number. Note that this will not (yet) update the version -# number seen by pip or reflected in wheel filenames. See -# https://github.com/mesonbuild/meson-python/issues/159 for that. -versioneer = files('generate_version.py') -if fs.exists('_version_meson.py') - py.install_sources('_version_meson.py', subdir: 'numpy') -else - custom_target('write_version_file', - output: '_version_meson.py', - command: [py, versioneer, '-o', '@OUTPUT@'], - build_by_default: true, - build_always_stale: true, - install: true, - install_dir: py.get_install_dir() / 'numpy' - ) - meson.add_dist_script(py, versioneer, '-o', '_version_meson.py') -endif - subdir('meson_cpu') subdir('numpy') diff --git a/numpy/__init__.py b/numpy/__init__.py index cf852aeadd14..47703b7d492d 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -108,6 +108,11 @@ ComplexWarning, ModuleDeprecationWarning, VisibleDeprecationWarning, TooHardError, AxisError) + +# If a version with git hash was stored, use that instead +from . import version +from .version import __version__ + # We first need to detect if we're being called as part of the numpy setup # procedure itself in a reliable manner. try: @@ -447,8 +452,5 @@ def _pyinstaller_hooks_dir(): del os -# get the version using versioneer -from .version import __version__, git_revision as __git_version__ - # Remove symbols imported for internal use del sys, warnings diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 45c3023c6a0d..6f6acd893aea 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -666,7 +666,6 @@ class _SupportsWrite(Protocol[_AnyStr_contra]): __all__: list[str] __path__: list[str] __version__: str -__git_version__: str test: PytestTester # TODO: Move placeholders to their respective module once diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py new file mode 100644 index 000000000000..6d98de6eacf5 --- /dev/null +++ b/numpy/_build_utils/gitversion.py @@ -0,0 +1,98 @@ +import os +import textwrap + + +def init_version(): + init = os.path.join(os.path.dirname(__file__), '../../pyproject.toml') + data = open(init).readlines() + + version_line = next( + line for line in data if line.startswith('version =') + ) + + version = version_line.strip().split(' = ')[1] + version = version.replace('"', '').replace("'", '') + + return version + + +def git_version(version): + # Append last commit date and hash to dev version information, + # if available + + import subprocess + import os.path + + try: + p = subprocess.Popen( + ['git', 'log', '-1', '--format="%H %aI"'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=os.path.dirname(__file__), + ) + except FileNotFoundError: + pass + else: + out, err = p.communicate() + if p.returncode == 0: + git_hash, git_date = ( + out.decode('utf-8') + .strip() + .replace('"', '') + .split('T')[0] + .replace('-', '') + .split() + ) + + # Only attach git tag to development versions + if 'dev' in version: + version += f'+git{git_date}.{git_hash[:7]}' + else: + git_hash = '' + + return version, git_hash + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--write', help="Save version to this file") + parser.add_argument( + '--meson-dist', + help='Output path is relative to MESON_DIST_ROOT', + action='store_true' + ) + args = parser.parse_args() + + version, git_hash = git_version(init_version()) + + # For NumPy 2.0, this should only have one field: `version` + template = textwrap.dedent(f''' + version = "{version}" + __version__ = version + full_version = version + + git_revision = "{git_hash}" + release = 'dev' not in version and '+' not in version + short_version = version.split("+")[0] + ''') + + if args.write: + outfile = args.write + if args.meson_dist: + outfile = os.path.join( + os.environ.get('MESON_DIST_ROOT', ''), + outfile + ) + + # Print human readable output path + relpath = os.path.relpath(outfile) + if relpath.startswith('.'): + relpath = outfile + + with open(outfile, 'w') as f: + print(f'Saving version to {relpath}') + f.write(template) + else: + print(version) diff --git a/numpy/_version.py b/numpy/_version.py deleted file mode 100644 index 565eb317bf17..000000000000 --- a/numpy/_version.py +++ /dev/null @@ -1,658 +0,0 @@ - -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. -# Generated by versioneer-0.26 -# https://github.com/python-versioneer/python-versioneer - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys -from typing import Callable, Dict -import functools - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - git_date = "$Format:%ci$" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "v" - cfg.parentdir_prefix = "numpy-" - cfg.versionfile_source = "numpy/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY: Dict[str, str] = {} -HANDLERS: Dict[str, Dict[str, Callable]] = {} - - -def register_vcs_handler(vcs, method): # decorator - """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - process = None - - popen_kwargs = {} - if sys.platform == "win32": - # This hides the console window if pythonw.exe is used - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - popen_kwargs["startupinfo"] = startupinfo - - for command in commands: - try: - dispcmd = str([command] + args) - # remember shell=False, so use git.cmd on windows, not just git - process = subprocess.Popen([command] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None), **popen_kwargs) - break - except OSError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = process.communicate()[0].strip().decode() - if process.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, process.returncode - return stdout, process.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for _ in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - with open(versionfile_abs, "r") as fobj: - for line in fobj: - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - except OSError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if "refnames" not in keywords: - raise NotThisMethod("Short version file found") - date = keywords.get("date") - if date is not None: - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = {r.strip() for r in refnames.strip("()").split(",")} - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = {r for r in refs if re.search(r'\d', r)} - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - # Filter out refs that exactly match prefix or that don't start - # with a number once the prefix is stripped (mostly a concern - # when prefix is '') - if not re.match(r'\d', r): - continue - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - # GIT_DIR can interfere with correct operation of Versioneer. - # It may be intended to be passed to the Versioneer-versioned project, - # but that should not change where we get our version from. - env = os.environ.copy() - env.pop("GIT_DIR", None) - runner = functools.partial(runner, env=env) - - _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=not verbose) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = runner(GITS, [ - "describe", "--tags", "--dirty=", "--always", "--long", - "--match", f"{tag_prefix}[[:digit:]]*" - ], cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], - cwd=root) - # --abbrev-ref was added in git-1.6.3 - if rc != 0 or branch_name is None: - raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") - branch_name = branch_name.strip() - - if branch_name == "HEAD": - # If we aren't exactly on a branch, pick a branch which represents - # the current commit. If all else fails, we are on a branchless - # commit. - branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) - # --contains was added in git-1.5.4 - if rc != 0 or branches is None: - raise NotThisMethod("'git branch --contains' returned error") - branches = branches.split("\n") - - # Remove the first line if we're running detached - if "(" in branches[0]: - branches.pop(0) - - # Strip off the leading "* " from the list of branches. - branches = [branch[2:] for branch in branches] - if "master" in branches: - branch_name = "master" - elif not branches: - branch_name = None - else: - # Pick the first branch that is returned. Good or bad. - branch_name = branches[0] - - pieces["branch"] = branch_name - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparsable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) - pieces["distance"] = len(out.split()) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_branch(pieces): - """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . - - The ".dev0" means not master branch. Note that .dev0 sorts backwards - (a feature branch will appear "older" than the master branch). - - Exceptions: - 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0" - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def pep440_split_post(ver): - """Split pep440 version string at the post-release segment. - - Returns the release segments before the post-release and the - post-release version number (or -1 if no post-release segment is present). - """ - vc = str.split(ver, ".post") - return vc[0], int(vc[1] or 0) if len(vc) == 2 else None - - -def render_pep440_pre(pieces): - """TAG[.postN.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post0.devDISTANCE - """ - if pieces["closest-tag"]: - if pieces["distance"]: - # update the post release segment - tag_version, post_version = pep440_split_post(pieces["closest-tag"]) - rendered = tag_version - if post_version is not None: - rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) - else: - rendered += ".post0.dev%d" % (pieces["distance"]) - else: - # no commits, use the tag as the version - rendered = pieces["closest-tag"] - else: - # exception #1 - rendered = "0.post0.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_post_branch(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . - - The ".dev0" means not master branch. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-branch": - rendered = render_pep440_branch(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-post-branch": - rendered = render_pep440_post_branch(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for _ in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index 08e717363635..2d59b89e6b8a 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -6,11 +6,12 @@ """ -from numpy.version import version as __version__ - import os import warnings +from numpy.version import version as __version__ + + # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core env_added = [] diff --git a/numpy/core/setup.py b/numpy/core/setup.py index c6cdd4025966..a05b5da7963b 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -417,7 +417,6 @@ def configuration(parent_package='',top_path=None): exec_mod_from_location) from numpy.distutils.system_info import (get_info, blas_opt_info, lapack_opt_info) - from numpy.version import release as is_released config = Configuration('core', parent_package, top_path) local_dir = config.local_path diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index d3cc9fee4aeb..cbab200e0918 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -12,8 +12,6 @@ """ -from numpy.version import version as __version__ - # Public submodules # Note: recfunctions and (maybe) format are public too, but not imported from . import mixins diff --git a/numpy/meson.build b/numpy/meson.build index 76ef7b52ece5..9b9a3581a9c6 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -188,6 +188,14 @@ if not have_lapack and not allow_noblas 'for some linear algebra operations).') endif +# Generate version.py for sdist +gitversion = files('_build_utils/gitversion.py')[0] +python_bin = py.full_path() +meson.add_dist_script( + py, + [gitversion, '--meson-dist', '--write', 'numpy/version.py'] +) + # Copy the main __init__.py|pxd files to the build dir (needed for Cython) __init__py = fs.copyfile('__init__.py') __init__pxd = fs.copyfile('__init__.pxd') @@ -203,7 +211,6 @@ python_sources = [ '_globals.py', '_pytesttester.py', '_pytesttester.pyi', - '_version.py', 'conftest.py', 'ctypeslib.py', 'ctypeslib.pyi', @@ -212,8 +219,7 @@ python_sources = [ 'dtypes.py', 'dtypes.pyi', 'matlib.py', - 'py.typed', - 'version.py' + 'py.typed' ] py.install_sources( @@ -243,7 +249,7 @@ pure_subdirs = [ 'polynomial', 'testing', 'tests', - 'typing', + 'typing' ] if py.version().version_compare('<3.12') pure_subdirs += 'distutils' @@ -251,6 +257,25 @@ endif np_dir = py.get_install_dir() / 'numpy' +if not fs.exists('version.py') + generate_version = custom_target( + 'generate-version', + install: true, + build_always_stale: true, + build_by_default: true, + output: 'version.py', + input: '_build_utils/gitversion.py', + command: [py, '@INPUT@', '--write', '@OUTPUT@'], + install_dir: np_dir + ) +else + # When building from sdist, version.py exists and should be included + py.install_sources( + ['version.py'], + subdir : 'numpy' + ) +endif + foreach subdir: pure_subdirs install_subdir(subdir, install_dir: np_dir) endforeach diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index bccbcb8e9cf7..61643426c8d7 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -24,12 +24,9 @@ def test_valid_numpy_version(): # Verify that the numpy version is a valid one (no .post suffix or other # nonsense). See gh-6431 for an issue caused by an invalid version. - version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9]|)" - dev_suffix = r"(\.dev0|)(\+[0-9]*\.g[0-9a-f]+|)" - if np.version.release: - res = re.match(version_pattern + '$', np.__version__) - else: - res = re.match(version_pattern + dev_suffix + '$', np.__version__) + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9])?" + dev_suffix = r"(\.dev[0-9]+(\+git[0-9]+\.[0-9a-f]+)?)?" + res = re.match(version_pattern + dev_suffix + '$', np.__version__) assert_(res is not None, np.__version__) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 555f1638413d..54bf3dacf972 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -157,7 +157,7 @@ def test_NPY_NO_EXPORT(): "testing.overrides", "typing", "typing.mypy_plugin", - "version", + "version" # Should be removed for NumPy 2.0 ]] if sys.version_info < (3, 12): PUBLIC_MODULES += [ diff --git a/numpy/typing/tests/data/pass/modules.py b/numpy/typing/tests/data/pass/modules.py index 9261874d565a..f2d779e20e63 100644 --- a/numpy/typing/tests/data/pass/modules.py +++ b/numpy/typing/tests/data/pass/modules.py @@ -29,7 +29,6 @@ np.__path__ np.__version__ -np.__git_version__ np.__all__ np.char.__all__ diff --git a/numpy/typing/tests/data/reveal/modules.pyi b/numpy/typing/tests/data/reveal/modules.pyi index 4191c564af8c..48730110e1e3 100644 --- a/numpy/typing/tests/data/reveal/modules.pyi +++ b/numpy/typing/tests/data/reveal/modules.pyi @@ -33,7 +33,6 @@ reveal_type(np.polynomial.polynomial) # E: ModuleType reveal_type(np.__path__) # E: list[builtins.str] reveal_type(np.__version__) # E: str -reveal_type(np.__git_version__) # E: str reveal_type(np.test) # E: _pytesttester.PytestTester reveal_type(np.test.module_name) # E: str diff --git a/numpy/typing/tests/data/reveal/version.pyi b/numpy/typing/tests/data/reveal/version.pyi deleted file mode 100644 index e53837647655..000000000000 --- a/numpy/typing/tests/data/reveal/version.pyi +++ /dev/null @@ -1,8 +0,0 @@ -import numpy.version - -reveal_type(numpy.version.version) # E: str -reveal_type(numpy.version.__version__) # E: str -reveal_type(numpy.version.full_version) # E: str -reveal_type(numpy.version.git_revision) # E: str -reveal_type(numpy.version.release) # E: bool -reveal_type(numpy.version.short_version) # E: str diff --git a/numpy/version.py b/numpy/version.py deleted file mode 100644 index d9d2fe1b7616..000000000000 --- a/numpy/version.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import annotations - -from ._version import get_versions - -__ALL__ = ['version', '__version__', 'full_version', 'git_revision', 'release'] - - -_built_with_meson = False -try: - from ._version_meson import get_versions - _built_with_meson = True -except ImportError: - from ._version import get_versions - -vinfo: dict[str, str] = get_versions() -version = vinfo["version"] -__version__ = vinfo.get("closest-tag", vinfo["version"]) -git_revision = vinfo['full-revisionid'] -release = 'dev0' not in version and '+' not in version -full_version = version -short_version = version.split("+")[0] - -del get_versions, vinfo diff --git a/pyproject.toml.setuppy b/pyproject.toml.setuppy index 044cf5538af4..d6c0f3d72688 100644 --- a/pyproject.toml.setuppy +++ b/pyproject.toml.setuppy @@ -1,6 +1,10 @@ # pyproject.toml needed to build with setup.py # This file is used temporarily to replace the main pyproject.toml when needing # to avoid building with Meson (e.g., in the Emscripten/Pyodide CI job) +[project] +name = "numpy" +version = "2.0.0.dev0" + [build-system] requires = [ "setuptools==59.2.0", diff --git a/setup.py b/setup.py index c924bb999ebc..6bd2153d7835 100755 --- a/setup.py +++ b/setup.py @@ -22,9 +22,6 @@ raise RuntimeError("Python version >= 3.9 required.") -import versioneer - - # This is a bit hackish: we are setting a global variable so that the main # numpy __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet. While ugly, it's @@ -34,7 +31,16 @@ # Needed for backwards code compatibility below and in some CI scripts. # The version components are changed from ints to strings, but only VERSION # seems to matter outside of this module and it was already a str. -FULLVERSION = versioneer.get_version() +FULLVERSION = subprocess.check_output([ + sys.executable, + 'numpy/_build_utils/gitversion.py' +]).strip().decode('ascii') + +# Write git version to disk +subprocess.check_output([ + sys.executable, + 'numpy/_build_utils/gitversion.py', '--write', 'numpy/version.py' +]) # Capture the version string: # 1.22.0.dev0+ ... -> ISRELEASED == False, VERSION == 1.22.0 @@ -80,10 +86,6 @@ raise RuntimeError("setuptools versions >= '60.0.0' require " "SETUPTOOLS_USE_DISTUTILS=stdlib in the environment") -# Initialize cmdclass from versioneer -from numpy.distutils.core import numpy_cmdclass -cmdclass = versioneer.get_cmdclass(numpy_cmdclass) - CLASSIFIERS = """\ Development Status :: 5 - Production/Stable Intended Audience :: Science/Research @@ -106,7 +108,6 @@ Operating System :: MacOS """ - def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration @@ -173,10 +174,9 @@ def __exit__(self, exception_type, exception_value, traceback): with open(self.f1, 'w') as f: f.write(self.bsd_text) - # Need to inherit from versioneer version of sdist to get the encoded # version information. -class sdist_checked(cmdclass['sdist']): +class sdist_checked: """ check submodules on sdist to prevent incomplete tarballs """ def run(self): check_submodules() @@ -480,6 +480,8 @@ def get_docs_url(): return "https://numpy.org/doc/{}.{}".format(MAJOR, MINOR) +from numpy.distutils.core import numpy_cmdclass as cmdclass + def setup_package(): src_path = os.path.dirname(os.path.abspath(__file__)) old_path = os.getcwd() @@ -498,7 +500,6 @@ def setup_package(): 'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2], ] - cmdclass["sdist"] = sdist_checked metadata = dict( name='numpy', maintainer="NumPy Developers", @@ -518,7 +519,7 @@ def setup_package(): classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='pytest', - version=versioneer.get_version(), + version=VERSION, cmdclass=cmdclass, python_requires='>=3.9', zip_safe=False, diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index f73536e230c0..dbebe483b4ab 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -2,4 +2,4 @@ max_line_length = 79 statistics = True ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 -exclude = versioneer.py,numpy/_version.py,numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py +exclude = numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py diff --git a/versioneer.py b/versioneer.py deleted file mode 100644 index 07ecc67bf2b4..000000000000 --- a/versioneer.py +++ /dev/null @@ -1,2194 +0,0 @@ - -# Version: 0.26 - -"""The Versioneer - like a rocketeer, but for versions. - -The Versioneer -============== - -* like a rocketeer, but for versions! -* https://github.com/python-versioneer/python-versioneer -* Brian Warner -* License: Public Domain (Unlicense) -* Compatible with: Python 3.7, 3.8, 3.9, 3.10 and pypy3 -* [![Latest Version][pypi-image]][pypi-url] -* [![Build Status][travis-image]][travis-url] - -This is a tool for managing a recorded version number in setuptools-based -python projects. The goal is to remove the tedious and error-prone "update -the embedded version string" step from your release process. Making a new -release should be as easy as recording a new tag in your version-control -system, and maybe making new tarballs. - - -## Quick Install - -Versioneer provides two installation modes. The "classic" vendored mode installs -a copy of versioneer into your repository. The experimental build-time dependency mode -is intended to allow you to skip this step and simplify the process of upgrading. - -### Vendored mode - -* `pip install versioneer` to somewhere in your $PATH -* add a `[tool.versioneer]` section to your `pyproject.toml or a - `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) -* run `versioneer install --vendor` in your source tree, commit the results -* verify version information with `python setup.py version` - -### Build-time dependency mode - -* `pip install versioneer` to somewhere in your $PATH -* add a `[tool.versioneer]` section to your `pyproject.toml or a - `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) -* add `versioneer` to the `requires` key of the `build-system` table in - `pyproject.toml`: - ```toml - [build-system] - requires = ["setuptools", "versioneer"] - build-backend = "setuptools.build_meta" - ``` -* run `versioneer install --no-vendor` in your source tree, commit the results -* verify version information with `python setup.py version` - -## Version Identifiers - -Source trees come from a variety of places: - -* a version-control system checkout (mostly used by developers) -* a nightly tarball, produced by build automation -* a snapshot tarball, produced by a web-based VCS browser, like github's - "tarball from tag" feature -* a release tarball, produced by "setup.py sdist", distributed through PyPI - -Within each source tree, the version identifier (either a string or a number, -this tool is format-agnostic) can come from a variety of places: - -* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows - about recent "tags" and an absolute revision-id -* the name of the directory into which the tarball was unpacked -* an expanded VCS keyword ($Id$, etc) -* a `_version.py` created by some earlier build step - -For released software, the version identifier is closely related to a VCS -tag. Some projects use tag names that include more than just the version -string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool -needs to strip the tag prefix to extract the version identifier. For -unreleased software (between tags), the version identifier should provide -enough information to help developers recreate the same tree, while also -giving them an idea of roughly how old the tree is (after version 1.2, before -version 1.3). Many VCS systems can report a description that captures this, -for example `git describe --tags --dirty --always` reports things like -"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the -0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes). - -The version identifier is used for multiple purposes: - -* to allow the module to self-identify its version: `myproject.__version__` -* to choose a name and prefix for a 'setup.py sdist' tarball - -## Theory of Operation - -Versioneer works by adding a special `_version.py` file into your source -tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. - -`_version.py` also contains `$Revision$` markers, and the installation -process marks `_version.py` to have this marker rewritten with a tag name -during the `git archive` command. As a result, generated tarballs will -contain enough information to get the proper version. - -To allow `setup.py` to compute a version too, a `versioneer.py` is added to -the top level of your source tree, next to `setup.py` and the `setup.cfg` -that configures it. This overrides several distutils/setuptools commands to -compute the version when invoked, and changes `setup.py build` and `setup.py -sdist` to replace `_version.py` with a small static file that contains just -the generated version data. - -## Installation - -See [INSTALL.md](./INSTALL.md) for detailed installation instructions. - -## Version-String Flavors - -Code which uses Versioneer can learn about its version string at runtime by -importing `_version` from your main `__init__.py` file and running the -`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can -import the top-level `versioneer.py` and run `get_versions()`. - -Both functions return a dictionary with different flavors of version -information: - -* `['version']`: A condensed version string, rendered using the selected - style. This is the most commonly used value for the project's version - string. The default "pep440" style yields strings like `0.11`, - `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section - below for alternative styles. - -* `['full-revisionid']`: detailed revision identifier. For Git, this is the - full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". - -* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the - commit date in ISO 8601 format. This will be None if the date is not - available. - -* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that - this is only accurate if run in a VCS checkout, otherwise it is likely to - be False or None - -* `['error']`: if the version string could not be computed, this will be set - to a string describing the problem, otherwise it will be None. It may be - useful to throw an exception in setup.py if this is set, to avoid e.g. - creating tarballs with a version string of "unknown". - -Some variants are more useful than others. Including `full-revisionid` in a -bug report should allow developers to reconstruct the exact code being tested -(or indicate the presence of local changes that should be shared with the -developers). `version` is suitable for display in an "about" box or a CLI -`--version` output: it can be easily compared against release notes and lists -of bugs fixed in various releases. - -The installer adds the following text to your `__init__.py` to place a basic -version in `YOURPROJECT.__version__`: - - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - -## Styles - -The setup.cfg `style=` configuration controls how the VCS information is -rendered into a version string. - -The default style, "pep440", produces a PEP440-compliant string, equal to the -un-prefixed tag name for actual releases, and containing an additional "local -version" section with more detail for in-between builds. For Git, this is -TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags ---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the -tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and -that this commit is two revisions ("+2") beyond the "0.11" tag. For released -software (exactly equal to a known tag), the identifier will only contain the -stripped tag, e.g. "0.11". - -Other styles are available. See [details.md](details.md) in the Versioneer -source tree for descriptions. - -## Debugging - -Versioneer tries to avoid fatal errors: if something goes wrong, it will tend -to return a version of "0+unknown". To investigate the problem, run `setup.py -version`, which will run the version-lookup code in a verbose mode, and will -display the full contents of `get_versions()` (including the `error` string, -which may help identify what went wrong). - -## Known Limitations - -Some situations are known to cause problems for Versioneer. This details the -most significant ones. More can be found on Github -[issues page](https://github.com/python-versioneer/python-versioneer/issues). - -### Subprojects - -Versioneer has limited support for source trees in which `setup.py` is not in -the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are -two common reasons why `setup.py` might not be in the root: - -* Source trees which contain multiple subprojects, such as - [Buildbot](https://github.com/buildbot/buildbot), which contains both - "master" and "slave" subprojects, each with their own `setup.py`, - `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI - distributions (and upload multiple independently-installable tarballs). -* Source trees whose main purpose is to contain a C library, but which also - provide bindings to Python (and perhaps other languages) in subdirectories. - -Versioneer will look for `.git` in parent directories, and most operations -should get the right version string. However `pip` and `setuptools` have bugs -and implementation details which frequently cause `pip install .` from a -subproject directory to fail to find a correct version string (so it usually -defaults to `0+unknown`). - -`pip install --editable .` should work correctly. `setup.py install` might -work too. - -Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in -some later version. - -[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking -this issue. The discussion in -[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the -issue from the Versioneer side in more detail. -[pip PR#3176](https://github.com/pypa/pip/pull/3176) and -[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve -pip to let Versioneer work correctly. - -Versioneer-0.16 and earlier only looked for a `.git` directory next to the -`setup.cfg`, so subprojects were completely unsupported with those releases. - -### Editable installs with setuptools <= 18.5 - -`setup.py develop` and `pip install --editable .` allow you to install a -project into a virtualenv once, then continue editing the source code (and -test) without re-installing after every change. - -"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a -convenient way to specify executable scripts that should be installed along -with the python package. - -These both work as expected when using modern setuptools. When using -setuptools-18.5 or earlier, however, certain operations will cause -`pkg_resources.DistributionNotFound` errors when running the entrypoint -script, which must be resolved by re-installing the package. This happens -when the install happens with one version, then the egg_info data is -regenerated while a different version is checked out. Many setup.py commands -cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into -a different virtualenv), so this can be surprising. - -[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes -this one, but upgrading to a newer version of setuptools should probably -resolve it. - - -## Updating Versioneer - -To upgrade your project to a new release of Versioneer, do the following: - -* install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg` and `pyproject.toml`, if necessary, - to include any new configuration settings indicated by the release notes. - See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install --[no-]vendor` in your source tree, to replace - `SRC/_version.py` -* commit any changed files - -## Future Directions - -This tool is designed to make it easily extended to other version-control -systems: all VCS-specific components are in separate directories like -src/git/ . The top-level `versioneer.py` script is assembled from these -components by running make-versioneer.py . In the future, make-versioneer.py -will take a VCS name as an argument, and will construct a version of -`versioneer.py` that is specific to the given VCS. It might also take the -configuration arguments that are currently provided manually during -installation by editing setup.py . Alternatively, it might go the other -direction and include code from all supported VCS systems, reducing the -number of intermediate scripts. - -## Similar projects - -* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time - dependency -* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of - versioneer -* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools - plugin - -## License - -To make Versioneer easier to embed, all its code is dedicated to the public -domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . - -[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg -[pypi-url]: https://pypi.python.org/pypi/versioneer/ -[travis-image]: -https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg -[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer - -""" -# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring -# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements -# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error -# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with -# pylint:disable=attribute-defined-outside-init,too-many-arguments - -import configparser -import errno -import json -import os -import re -import subprocess -import sys -from pathlib import Path -from typing import Callable, Dict -import functools -try: - import tomli - have_tomli = True -except ImportError: - have_tomli = False - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_root(): - """Get the project root directory. - - We require that all commands are run from the project root, i.e. the - directory that contains setup.py, setup.cfg, and versioneer.py . - """ - root = os.path.realpath(os.path.abspath(os.getcwd())) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - # allow 'python path/to/setup.py COMMAND' - root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") - raise VersioneerBadRootError(err) - try: - # Certain runtime workflows (setup.py install/develop in a setuptools - # tree) execute all dependencies in a single python process, so - # "versioneer" may be imported multiple times, and python's shared - # module-import table will cache the first one. So we can't use - # os.path.dirname(__file__), as that will find whichever - # versioneer.py was first imported, even in later projects. - my_path = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(my_path)[0]) - vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals(): - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(my_path), versioneer_py)) - except NameError: - pass - return root - - -def get_config_from_root(root): - """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise OSError (if setup.cfg is missing), or - # configparser.NoSectionError (if it lacks a [versioneer] section), or - # configparser.NoOptionError (if it lacks "VCS="). See the docstring at - # the top of versioneer.py for instructions on writing your setup.cfg . - root = Path(root) - pyproject_toml = root / "pyproject.toml" - setup_cfg = root / "setup.cfg" - section = None - if pyproject_toml.exists() and have_tomli: - try: - with open(pyproject_toml, 'rb') as fobj: - pp = tomli.load(fobj) - section = pp['tool']['versioneer'] - except (tomli.TOMLDecodeError, KeyError): - pass - if not section: - parser = configparser.ConfigParser() - with open(setup_cfg) as cfg_file: - parser.read_file(cfg_file) - parser.get("versioneer", "VCS") # raise error if missing - - section = parser["versioneer"] - - cfg = VersioneerConfig() - cfg.VCS = section['VCS'] - cfg.style = section.get("style", "") - cfg.versionfile_source = section.get("versionfile_source") - cfg.versionfile_build = section.get("versionfile_build") - cfg.tag_prefix = section.get("tag_prefix") - if cfg.tag_prefix in ("''", '""', None): - cfg.tag_prefix = "" - cfg.parentdir_prefix = section.get("parentdir_prefix") - cfg.verbose = section.get("verbose") - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -# these dictionaries contain VCS-specific tools -LONG_VERSION_PY: Dict[str, str] = {} -HANDLERS: Dict[str, Dict[str, Callable]] = {} - - -def register_vcs_handler(vcs, method): # decorator - """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - HANDLERS.setdefault(vcs, {})[method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - process = None - - popen_kwargs = {} - if sys.platform == "win32": - # This hides the console window if pythonw.exe is used - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - popen_kwargs["startupinfo"] = startupinfo - - for command in commands: - try: - dispcmd = str([command] + args) - # remember shell=False, so use git.cmd on windows, not just git - process = subprocess.Popen([command] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None), **popen_kwargs) - break - except OSError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = process.communicate()[0].strip().decode() - if process.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, process.returncode - return stdout, process.returncode - - -LONG_VERSION_PY['git'] = r''' -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. -# Generated by versioneer-0.26 -# https://github.com/python-versioneer/python-versioneer - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys -from typing import Callable, Dict -import functools - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" - git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" - git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "%(STYLE)s" - cfg.tag_prefix = "%(TAG_PREFIX)s" - cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" - cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY: Dict[str, str] = {} -HANDLERS: Dict[str, Dict[str, Callable]] = {} - - -def register_vcs_handler(vcs, method): # decorator - """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - process = None - - popen_kwargs = {} - if sys.platform == "win32": - # This hides the console window if pythonw.exe is used - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - popen_kwargs["startupinfo"] = startupinfo - - for command in commands: - try: - dispcmd = str([command] + args) - # remember shell=False, so use git.cmd on windows, not just git - process = subprocess.Popen([command] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None), **popen_kwargs) - break - except OSError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %%s" %% dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %%s" %% (commands,)) - return None, None - stdout = process.communicate()[0].strip().decode() - if process.returncode != 0: - if verbose: - print("unable to run %%s (error)" %% dispcmd) - print("stdout was %%s" %% stdout) - return None, process.returncode - return stdout, process.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for _ in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %%s but none started with prefix %%s" %% - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - with open(versionfile_abs, "r") as fobj: - for line in fobj: - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - except OSError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if "refnames" not in keywords: - raise NotThisMethod("Short version file found") - date = keywords.get("date") - if date is not None: - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - - # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = {r.strip() for r in refnames.strip("()").split(",")} - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %%d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = {r for r in refs if re.search(r'\d', r)} - if verbose: - print("discarding '%%s', no digits" %% ",".join(refs - tags)) - if verbose: - print("likely tags: %%s" %% ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - # Filter out refs that exactly match prefix or that don't start - # with a number once the prefix is stripped (mostly a concern - # when prefix is '') - if not re.match(r'\d', r): - continue - if verbose: - print("picking %%s" %% r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - # GIT_DIR can interfere with correct operation of Versioneer. - # It may be intended to be passed to the Versioneer-versioned project, - # but that should not change where we get our version from. - env = os.environ.copy() - env.pop("GIT_DIR", None) - runner = functools.partial(runner, env=env) - - _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=not verbose) - if rc != 0: - if verbose: - print("Directory %%s not under git control" %% root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = runner(GITS, [ - "describe", "--tags", "--dirty=", "--always", "--long", - "--match", f"{tag_prefix}[[:digit:]]*" - ], cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], - cwd=root) - # --abbrev-ref was added in git-1.6.3 - if rc != 0 or branch_name is None: - raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") - branch_name = branch_name.strip() - - if branch_name == "HEAD": - # If we aren't exactly on a branch, pick a branch which represents - # the current commit. If all else fails, we are on a branchless - # commit. - branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) - # --contains was added in git-1.5.4 - if rc != 0 or branches is None: - raise NotThisMethod("'git branch --contains' returned error") - branches = branches.split("\n") - - # Remove the first line if we're running detached - if "(" in branches[0]: - branches.pop(0) - - # Strip off the leading "* " from the list of branches. - branches = [branch[2:] for branch in branches] - if "master" in branches: - branch_name = "master" - elif not branches: - branch_name = None - else: - # Pick the first branch that is returned. Good or bad. - branch_name = branches[0] - - pieces["branch"] = branch_name - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparsable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%%s'" - %% describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%%s' doesn't start with prefix '%%s'" - print(fmt %% (full_tag, tag_prefix)) - pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" - %% (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) - pieces["distance"] = len(out.split()) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_branch(pieces): - """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . - - The ".dev0" means not master branch. Note that .dev0 sorts backwards - (a feature branch will appear "older" than the master branch). - - Exceptions: - 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0" - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def pep440_split_post(ver): - """Split pep440 version string at the post-release segment. - - Returns the release segments before the post-release and the - post-release version number (or -1 if no post-release segment is present). - """ - vc = str.split(ver, ".post") - return vc[0], int(vc[1] or 0) if len(vc) == 2 else None - - -def render_pep440_pre(pieces): - """TAG[.postN.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post0.devDISTANCE - """ - if pieces["closest-tag"]: - if pieces["distance"]: - # update the post release segment - tag_version, post_version = pep440_split_post(pieces["closest-tag"]) - rendered = tag_version - if post_version is not None: - rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"]) - else: - rendered += ".post0.dev%%d" %% (pieces["distance"]) - else: - # no commits, use the tag as the version - rendered = pieces["closest-tag"] - else: - # exception #1 - rendered = "0.post0.dev%%d" %% pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - return rendered - - -def render_pep440_post_branch(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . - - The ".dev0" means not master branch. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-branch": - rendered = render_pep440_branch(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-post-branch": - rendered = render_pep440_post_branch(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%%s'" %% style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for _ in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} -''' - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - with open(versionfile_abs, "r") as fobj: - for line in fobj: - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - except OSError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if "refnames" not in keywords: - raise NotThisMethod("Short version file found") - date = keywords.get("date") - if date is not None: - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = {r.strip() for r in refnames.strip("()").split(",")} - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = {r for r in refs if re.search(r'\d', r)} - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - # Filter out refs that exactly match prefix or that don't start - # with a number once the prefix is stripped (mostly a concern - # when prefix is '') - if not re.match(r'\d', r): - continue - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - # GIT_DIR can interfere with correct operation of Versioneer. - # It may be intended to be passed to the Versioneer-versioned project, - # but that should not change where we get our version from. - env = os.environ.copy() - env.pop("GIT_DIR", None) - runner = functools.partial(runner, env=env) - - _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=not verbose) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = runner(GITS, [ - "describe", "--tags", "--dirty=", "--always", "--long", - "--match", f"{tag_prefix}[[:digit:]]*" - ], cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], - cwd=root) - # --abbrev-ref was added in git-1.6.3 - if rc != 0 or branch_name is None: - raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") - branch_name = branch_name.strip() - - if branch_name == "HEAD": - # If we aren't exactly on a branch, pick a branch which represents - # the current commit. If all else fails, we are on a branchless - # commit. - branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) - # --contains was added in git-1.5.4 - if rc != 0 or branches is None: - raise NotThisMethod("'git branch --contains' returned error") - branches = branches.split("\n") - - # Remove the first line if we're running detached - if "(" in branches[0]: - branches.pop(0) - - # Strip off the leading "* " from the list of branches. - branches = [branch[2:] for branch in branches] - if "master" in branches: - branch_name = "master" - elif not branches: - branch_name = None - else: - # Pick the first branch that is returned. Good or bad. - branch_name = branches[0] - - pieces["branch"] = branch_name - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparsable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) - pieces["distance"] = len(out.split()) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() - # Use only the last line. Previous lines may contain GPG signature - # information. - date = date.splitlines()[-1] - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def do_vcs_install(versionfile_source, ipy): - """Git-specific installation logic for Versioneer. - - For Git, this means creating/changing .gitattributes to mark _version.py - for export-subst keyword substitution. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - files = [versionfile_source] - if ipy: - files.append(ipy) - if "VERSIONEER_PEP518" not in globals(): - try: - my_path = __file__ - if my_path.endswith(".pyc") or my_path.endswith(".pyo"): - my_path = os.path.splitext(my_path)[0] + ".py" - versioneer_file = os.path.relpath(my_path) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) - present = False - try: - with open(".gitattributes", "r") as fobj: - for line in fobj: - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - break - except OSError: - pass - if not present: - with open(".gitattributes", "a+") as fobj: - fobj.write(f"{versionfile_source} export-subst\n") - files.append(".gitattributes") - run_command(GITS, ["add", "--"] + files) - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for _ in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.26) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json - -version_json = ''' -%s -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) -""" - - -def versions_from_file(filename): - """Try to determine the version from _version.py if present.""" - try: - with open(filename) as f: - contents = f.read() - except OSError: - raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - raise NotThisMethod("no version_json in _version.py") - return json.loads(mo.group(1)) - - -def write_to_version_file(filename, versions): - """Write the given version number to the given _version.py file.""" - os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) - with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % contents) - - print("set %s to '%s'" % (filename, versions["version"])) - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_branch(pieces): - """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . - - The ".dev0" means not master branch. Note that .dev0 sorts backwards - (a feature branch will appear "older" than the master branch). - - Exceptions: - 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0" - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def pep440_split_post(ver): - """Split pep440 version string at the post-release segment. - - Returns the release segments before the post-release and the - post-release version number (or -1 if no post-release segment is present). - """ - vc = str.split(ver, ".post") - return vc[0], int(vc[1] or 0) if len(vc) == 2 else None - - -def render_pep440_pre(pieces): - """TAG[.postN.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post0.devDISTANCE - """ - if pieces["closest-tag"]: - if pieces["distance"]: - # update the post release segment - tag_version, post_version = pep440_split_post(pieces["closest-tag"]) - rendered = tag_version - if post_version is not None: - rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) - else: - rendered += ".post0.dev%d" % (pieces["distance"]) - else: - # no commits, use the tag as the version - rendered = pieces["closest-tag"] - else: - # exception #1 - rendered = "0.post0.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_post_branch(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . - - The ".dev0" means not master branch. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["branch"] != "master": - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-branch": - rendered = render_pep440_branch(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-post-branch": - rendered = render_pep440_post_branch(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -class VersioneerBadRootError(Exception): - """The project root directory is unknown or missing key files.""" - - -def get_versions(verbose=False): - """Get the project version from whatever source is available. - - Returns dict with two keys: 'version' and 'full'. - """ - if "versioneer" in sys.modules: - # see the discussion in cmdclass.py:get_cmdclass() - del sys.modules["versioneer"] - - root = get_root() - cfg = get_config_from_root(root) - - assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" - handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" - assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - - versionfile_abs = os.path.join(root, cfg.versionfile_source) - - # extract version from first of: _version.py, VCS command (e.g. 'git - # describe'), parentdir. This is meant to work for developers using a - # source checkout, for users of a tarball created by 'setup.py sdist', - # and for users of a tarball/zipball created by 'git archive' or github's - # download-from-tag feature or the equivalent in other VCSes. - - get_keywords_f = handlers.get("get_keywords") - from_keywords_f = handlers.get("keywords") - if get_keywords_f and from_keywords_f: - try: - keywords = get_keywords_f(versionfile_abs) - ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) - if verbose: - print("got version from expanded keyword %s" % ver) - return ver - except NotThisMethod: - pass - - try: - ver = versions_from_file(versionfile_abs) - if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) - return ver - except NotThisMethod: - pass - - from_vcs_f = handlers.get("pieces_from_vcs") - if from_vcs_f: - try: - pieces = from_vcs_f(cfg.tag_prefix, root, verbose) - ver = render(pieces, cfg.style) - if verbose: - print("got version from VCS %s" % ver) - return ver - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - if verbose: - print("got version from parentdir %s" % ver) - return ver - except NotThisMethod: - pass - - if verbose: - print("unable to compute version") - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} - - -def get_version(): - """Get the short version string for this project.""" - return get_versions()["version"] - - -def get_cmdclass(cmdclass=None): - """Get the custom setuptools subclasses used by Versioneer. - - If the package uses a different cmdclass (e.g. one from numpy), it - should be provide as an argument. - """ - if "versioneer" in sys.modules: - del sys.modules["versioneer"] - # this fixes the "python setup.py develop" case (also 'install' and - # 'easy_install .'), in which subdependencies of the main project are - # built (using setup.py bdist_egg) in the same python process. Assume - # a main project A and a dependency B, which use different versions - # of Versioneer. A's setup.py imports A's Versioneer, leaving it in - # sys.modules by the time B's setup.py is executed, causing B to run - # with the wrong versioneer. Setuptools wraps the sub-dep builds in a - # sandbox that restores sys.modules to it's pre-build state, so the - # parent is protected against the child's "import versioneer". By - # removing ourselves from sys.modules here, before the child build - # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/python-versioneer/python-versioneer/issues/52 - - cmds = {} if cmdclass is None else cmdclass.copy() - - # we add "version" to setuptools - from setuptools import Command - - class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - print(" date: %s" % vers.get("date")) - if vers["error"]: - print(" error: %s" % vers["error"]) - cmds["version"] = cmd_version - - # we override "build_py" in setuptools - # - # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? - # pip install: - # copies source tree to a tempdir before running egg_info/etc - # if .git isn't copied too, 'git describe' will fail - # then does setup.py bdist_wheel, or sometimes setup.py install - # setup.py egg_info -> ? - - # pip install -e . and setuptool/editable_wheel will invoke build_py - # but the build_py command is not expected to copy any files. - - # we override different "build_py" commands for both environments - if 'build_py' in cmds: - _build_py = cmds['build_py'] - else: - from setuptools.command.build_py import build_py as _build_py - - class cmd_build_py(_build_py): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_py.run(self) - if getattr(self, "editable_mode", False): - # During editable installs `.py` and data files are - # not copied to build_lib - return - # now locate _version.py in the new build/ directory and replace - # it with an updated value - if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - cmds["build_py"] = cmd_build_py - - if 'build_ext' in cmds: - _build_ext = cmds['build_ext'] - else: - from setuptools.command.build_ext import build_ext as _build_ext - - class cmd_build_ext(_build_ext): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_ext.run(self) - if self.inplace: - # build_ext --inplace will only build extensions in - # build/lib<..> dir with no _version.py to write to. - # As in place builds will already have a _version.py - # in the module dir, we do not need to write one. - return - # now locate _version.py in the new build/ directory and replace - # it with an updated value - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - if not os.path.exists(target_versionfile): - print(f"Warning: {target_versionfile} does not exist, skipping " - "version update. This can happen if you are running build_ext " - "without first running build_py.") - return - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - cmds["build_ext"] = cmd_build_ext - - if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - # nczeczulin reports that py2exe won't like the pep440-style string - # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. - # setup(console=[{ - # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION - # "product_version": versioneer.get_version(), - # ... - - class cmd_build_exe(_build_exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["build_exe"] = cmd_build_exe - del cmds["build_py"] - - if 'py2exe' in sys.modules: # py2exe enabled? - try: - from py2exe.setuptools_buildexe import py2exe as _py2exe - except ImportError: - from py2exe.distutils_buildexe import py2exe as _py2exe - - class cmd_py2exe(_py2exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _py2exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["py2exe"] = cmd_py2exe - - # sdist farms its file list building out to egg_info - if 'egg_info' in cmds: - _egg_info = cmds['egg_info'] - else: - from setuptools.command.egg_info import egg_info as _egg_info - - class cmd_egg_info(_egg_info): - def find_sources(self): - # egg_info.find_sources builds the manifest list and writes it - # in one shot - super().find_sources() - - # Modify the filelist and normalize it - root = get_root() - cfg = get_config_from_root(root) - self.filelist.append('versioneer.py') - if cfg.versionfile_source: - # There are rare cases where versionfile_source might not be - # included by default, so we must be explicit - self.filelist.append(cfg.versionfile_source) - self.filelist.sort() - self.filelist.remove_duplicates() - - # The write method is hidden in the manifest_maker instance that - # generated the filelist and was thrown away - # We will instead replicate their final normalization (to unicode, - # and POSIX-style paths) - from setuptools import unicode_utils - normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') - for f in self.filelist.files] - - manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') - with open(manifest_filename, 'w') as fobj: - fobj.write('\n'.join(normalized)) - - cmds['egg_info'] = cmd_egg_info - - # we override different "sdist" commands for both environments - if 'sdist' in cmds: - _sdist = cmds['sdist'] - else: - from setuptools.command.sdist import sdist as _sdist - - class cmd_sdist(_sdist): - def run(self): - versions = get_versions() - self._versioneer_generated_versions = versions - # unless we update this, the command will keep using the old - # version - self.distribution.metadata.version = versions["version"] - return _sdist.run(self) - - def make_release_tree(self, base_dir, files): - root = get_root() - cfg = get_config_from_root(root) - _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory - # (remembering that it may be a hardlink) and replace it with an - # updated value - target_versionfile = os.path.join(base_dir, cfg.versionfile_source) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) - cmds["sdist"] = cmd_sdist - - return cmds - - -CONFIG_ERROR = """ -setup.cfg is missing the necessary Versioneer configuration. You need -a section like: - - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = - parentdir_prefix = myproject- - -You will also need to edit your setup.py to use the results: - - import versioneer - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), ...) - -Please read the docstring in ./versioneer.py for configuration instructions, -edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. -""" - -SAMPLE_CONFIG = """ -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. - -[versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = - -""" - -OLD_SNIPPET = """ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions -""" - -INIT_PY_SNIPPET = """ -from . import {0} -__version__ = {0}.get_versions()['version'] -""" - - -def do_setup(): - """Do main VCS-independent setup function for installing Versioneer.""" - root = get_root() - try: - cfg = get_config_from_root(root) - except (OSError, configparser.NoSectionError, - configparser.NoOptionError) as e: - if isinstance(e, (OSError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) - with open(os.path.join(root, "setup.cfg"), "a") as f: - f.write(SAMPLE_CONFIG) - print(CONFIG_ERROR, file=sys.stderr) - return 1 - - print(" creating %s" % cfg.versionfile_source) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except OSError: - old = "" - module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] - snippet = INIT_PY_SNIPPET.format(module) - if OLD_SNIPPET in old: - print(" replacing boilerplate in %s" % ipy) - with open(ipy, "w") as f: - f.write(old.replace(OLD_SNIPPET, snippet)) - elif snippet not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(snippet) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-subst keyword - # substitution. - do_vcs_install(cfg.versionfile_source, ipy) - return 0 - - -def scan_setup_py(): - """Validate the contents of setup.py against Versioneer's expectations.""" - found = set() - setters = False - errors = 0 - with open("setup.py", "r") as f: - for line in f.readlines(): - if "import versioneer" in line: - found.add("import") - if "versioneer.get_cmdclass()" in line: - found.add("cmdclass") - if "versioneer.get_version()" in line: - found.add("get_version") - if "versioneer.VCS" in line: - setters = True - if "versioneer.versionfile_source" in line: - setters = True - if len(found) != 3: - print("") - print("Your setup.py appears to be missing some important items") - print("(but I might be wrong). Please make sure it has something") - print("roughly like the following:") - print("") - print(" import versioneer") - print(" setup( version=versioneer.get_version(),") - print(" cmdclass=versioneer.get_cmdclass(), ...)") - print("") - errors += 1 - if setters: - print("You should remove lines like 'versioneer.VCS = ' and") - print("'versioneer.versionfile_source = ' . This configuration") - print("now lives in setup.cfg, and should be removed from setup.py") - print("") - errors += 1 - return errors - - -def setup_command(): - """Set up Versioneer and exit with appropriate error code.""" - errors = do_setup() - errors += scan_setup_py() - sys.exit(1 if errors else 0) - - -if __name__ == "__main__": - cmd = sys.argv[1] - if cmd == "setup": - setup_command() From 0065919c36b875e9b43d1c9d1ce77a12bbfe825c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 12 Aug 2023 12:15:17 -0600 Subject: [PATCH 037/120] REL: Prepare for the NumPy 1.26.0b1 release. - Create 1.26.0-changelog.rst - Update 1.26.0-notes.rst - Update pyproject.toml - Update pyproject.toml.setuppy - Skip failing test [wheel build] --- doc/changelog/1.26.0-changelog.rst | 41 ++++++++++ doc/source/release/1.26.0-notes.rst | 111 +++++++++++++++++++++++++++- numpy/tests/test_ctypeslib.py | 4 + pyproject.toml | 6 +- pyproject.toml.setuppy | 2 +- 5 files changed, 157 insertions(+), 7 deletions(-) create mode 100644 doc/changelog/1.26.0-changelog.rst diff --git a/doc/changelog/1.26.0-changelog.rst b/doc/changelog/1.26.0-changelog.rst new file mode 100644 index 000000000000..9f0cd79d23b7 --- /dev/null +++ b/doc/changelog/1.26.0-changelog.rst @@ -0,0 +1,41 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bas van Beek +* Charles Harris +* Matti Picus +* Melissa Weber Mendonça +* Ralf Gommers +* Sayed Adel +* Sebastian Berg +* Stefan van der Walt +* Tyler Reddy +* Warren Weckesser + +Pull requests merged +==================== + +A total of 18 pull requests were merged for this release. + +* `#24305 `__: MAINT: Prepare 1.26.x branch for development +* `#24308 `__: MAINT: Massive update of files from main for numpy 1.26 +* `#24322 `__: CI: fix wheel builds on the 1.26.x branch +* `#24326 `__: BLD: update openblas to newer version +* `#24327 `__: TYP: Trim down the ``_NestedSequence.__getitem__`` signature +* `#24328 `__: BUG: fix choose refcount leak +* `#24337 `__: TST: fix running the test suite in builds without BLAS/LAPACK +* `#24338 `__: BUG: random: Fix generation of nan by dirichlet. +* `#24340 `__: MAINT: Dependabot updates from main +* `#24342 `__: MAINT: Add back NPY_RUN_MYPY_IN_TESTSUITE=1 +* `#24353 `__: MAINT: Update ``extbuild.py`` from main. +* `#24356 `__: TST: fix distutils tests for deprecations in recent setuptools... +* `#24375 `__: MAINT: Update cibuildwheel to version 2.15.0 +* `#24381 `__: MAINT: Fix codespaces setup.sh script +* `#24403 `__: ENH: Vendor meson for multi-target build support +* `#24404 `__: BLD: vendor meson-python to make the Windows builds with SIMD... +* `#24405 `__: BLD, SIMD: The meson CPU dispatcher implementation +* `#24406 `__: MAINT: Remove versioneer diff --git a/doc/source/release/1.26.0-notes.rst b/doc/source/release/1.26.0-notes.rst index c160706d6878..0e7f10e03787 100644 --- a/doc/source/release/1.26.0-notes.rst +++ b/doc/source/release/1.26.0-notes.rst @@ -4,9 +4,114 @@ NumPy 1.26.0 Release Notes ========================== -The NumPy 1.26.0 release is a continuation of the 1.25.x release cycle, but -with the distutils based build replaced by meson in order to work with Python -3.12. +The NumPy 1.26.0 release is a continuation of the 1.25.x release cycle with the +addition of Python 3.12.0 support. Python 3.12 dropped distutils, consequently +supporting it required finding a replacement for the setup.py/distutils based +build system NumPy was using. We have chosen to use the Meson build system +instead, and this is the first NumPy release supporting it. This is also the +first release that supports Cython 3.0 in addition to retaining 0.29.X +compatibility. Supporting those two upgrades was a large project, over 100 +files have been touched in this release. The changelog doesn't capture the full +extent of the work, special thanks to Ralf Gommers, Sayed Adel, Stéfan van der +Walt, and Matti Picus who did much of the work in the main development branch. + +The highlights of this release are: + +- Python 3.12.0 support. +- Cython 3.0.0 compatibility. +- Use of the Meson build system +- Updated SIMD support The Python versions supported in this release are 3.9-3.12. +Build system changes +==================== + +In this release, NumPy has switched to Meson as the build system and +meson-python as the build backend. Installing NumPy or building a wheel can be +done with standard tools like ``pip`` and ``pypa/build``. The following are +supported: + +- Regular installs: ``pip install numpy`` or (in a cloned repo) + ``pip install .`` +- Building a wheel: ``python -m build`` (preferred), or ``pip wheel .`` +- Editable installs: ``pip install -e . --no-build-isolation`` +- Development builds through the custom CLI implemented with + `spin `__: ``spin build``. + +All the regular ``pip`` and ``pypa/build`` flags (e.g., +``--no-build-isolation``) should work as expected. + +NumPy-specific build customization +---------------------------------- + +Many of the NumPy-specific ways of customizing builds have changed. +The ``NPY_*`` environment variables which control BLAS/LAPACK, SIMD, threading, +and other such options are no longer supported, nor is a ``site.cfg`` file to +select BLAS and LAPACK. Instead, there are command-line flags that can be +passed to the build via ``pip``/``build``'s config-settings interface. These +flags are all listed in the ``meson_options.txt`` file in the root of the repo. +Detailed documented will be available before the final 1.26.0 release; for now +please see `the SciPy "building from source" docs +`__ since most build +customization works in an almost identical way in SciPy as it does in NumPy. + +Build dependencies +------------------ + +While the runtime dependencies of NumPy have not changed, the build +dependencies have. Because we temporarily vendor Meson and meson-python, +there are several new dependencies - please see the ``[build-system]`` section +of ``pyproject.toml`` for details. + +Troubleshooting +--------------- + +This build system change is quite large. In case of unexpected issues, it is +still possible to use a ``setup.py``-based build as a temporary workaround (on +Python 3.9-3.11, not 3.12), by copying ``pyproject.toml.setuppy`` to +``pyproject.toml``. However, please open an issue with details on the NumPy +issue tracker. We aim to phase out ``setup.py`` builds as soon as possible, and +therefore would like to see all potential blockers surfaced early on in the +1.26.0 release cycle. + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bas van Beek +* Charles Harris +* Matti Picus +* Melissa Weber Mendonça +* Ralf Gommers +* Sayed Adel +* Sebastian Berg +* Stefan van der Walt +* Tyler Reddy +* Warren Weckesser + +Pull requests merged +==================== + +A total of 18 pull requests were merged for this release. + +* `#24305 `__: MAINT: Prepare 1.26.x branch for development +* `#24308 `__: MAINT: Massive update of files from main for numpy 1.26 +* `#24322 `__: CI: fix wheel builds on the 1.26.x branch +* `#24326 `__: BLD: update openblas to newer version +* `#24327 `__: TYP: Trim down the ``_NestedSequence.__getitem__`` signature +* `#24328 `__: BUG: fix choose refcount leak +* `#24337 `__: TST: fix running the test suite in builds without BLAS/LAPACK +* `#24338 `__: BUG: random: Fix generation of nan by dirichlet. +* `#24340 `__: MAINT: Dependabot updates from main +* `#24342 `__: MAINT: Add back NPY_RUN_MYPY_IN_TESTSUITE=1 +* `#24353 `__: MAINT: Update ``extbuild.py`` from main. +* `#24356 `__: TST: fix distutils tests for deprecations in recent setuptools... +* `#24375 `__: MAINT: Update cibuildwheel to version 2.15.0 +* `#24381 `__: MAINT: Fix codespaces setup.sh script +* `#24403 `__: ENH: Vendor meson for multi-target build support +* `#24404 `__: BLD: vendor meson-python to make the Windows builds with SIMD... +* `#24405 `__: BLD, SIMD: The meson CPU dispatcher implementation +* `#24406 `__: MAINT: Remove versioneer diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 63906b0f41cb..965e547e7c97 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -213,6 +213,10 @@ def test_pointer(self): # shape argument is required assert_raises(TypeError, as_array, p) + @pytest.mark.skipif( + sys.version_info == (3, 12, 0, "candidate", 1), + reason="Broken in 3.12.0rc1, see gh-24399", + ) def test_struct_array_pointer(self): from ctypes import c_int16, Structure, pointer diff --git a/pyproject.toml b/pyproject.toml index 2ad2085000c2..b75b81931974 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ build-backend = "mesonpy" backend-path = ['./vendored-meson/meson-python'] requires = [ - "Cython>=3.0", + "Cython>=0.29.34,<3.1", # All dependencies of the vendored meson-python (except for meson, because # we've got that vendored too - that's the point of this exercise). 'pyproject-metadata >= 0.7.1', @@ -17,7 +17,7 @@ requires = [ [project] name = "numpy" -version = "2.0.0.dev0" +version = "1.26.0b1" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} @@ -26,7 +26,7 @@ authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.9" +requires-python = ">=3.9,<3.13" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', diff --git a/pyproject.toml.setuppy b/pyproject.toml.setuppy index d6c0f3d72688..ceef9ac2a692 100644 --- a/pyproject.toml.setuppy +++ b/pyproject.toml.setuppy @@ -3,7 +3,7 @@ # to avoid building with Meson (e.g., in the Emscripten/Pyodide CI job) [project] name = "numpy" -version = "2.0.0.dev0" +version = "1.26.0b1" [build-system] requires = [ From e442aa414aa9dbc670547729fef34f2dcfc2fd2a Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 17 Aug 2023 13:30:17 -0600 Subject: [PATCH 038/120] MAINT: Pin upper version of sphinx. The 7.2.x versions of sphinx broke the circleci document builds. [skip azp] [skip cirrus] --- doc_requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc_requirements.txt b/doc_requirements.txt index 9811ef746ab7..b2e43dceeadd 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,5 +1,5 @@ # doxygen required, use apt-get or dnf -sphinx>=4.5.0 +sphinx>=4.5.0,<7.2.0 numpydoc==1.4 pydata-sphinx-theme==0.13.3 sphinx-design @@ -11,4 +11,4 @@ breathe>4.33.0 # needed to build release notes towncrier -toml \ No newline at end of file +toml From a2864da874207e091fbb660b5e4556e789c03a81 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 18 Aug 2023 23:15:12 +0400 Subject: [PATCH 039/120] ENH: Add prefix to _ALIGN Macro This change adds a prefix to the `_ALIGN` macro to prevent compiler warnings on OpenBSD, where the macro's definition conflicts with system headers. --- .../src/multiarray/_multiarray_tests.c.src | 2 +- numpy/core/src/multiarray/arraytypes.c.src | 40 +++++++++---------- numpy/core/src/multiarray/common.h | 16 ++++---- .../multiarray/lowlevel_strided_loops.c.src | 32 +++++++-------- numpy/core/src/umath/_scaled_float_dtype.c | 2 +- 5 files changed, 46 insertions(+), 46 deletions(-) diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src index 482596f97654..cd2aa20f8593 100644 --- a/numpy/core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/core/src/multiarray/_multiarray_tests.c.src @@ -2026,7 +2026,7 @@ get_struct_alignments(PyObject *NPY_UNUSED(self), PyObject *args) { /**begin repeat * #N = 1,2,3# */ - alignment = PyLong_FromLong(_ALIGN(struct TestStruct@N@)); + alignment = PyLong_FromLong(NPY_ALIGNOF(struct TestStruct@N@)); size = PyLong_FromLong(sizeof(struct TestStruct@N@)); val = PyTuple_Pack(2, alignment, size); Py_DECREF(alignment); diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index b84625c776cd..bc3f743727e7 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -392,7 +392,7 @@ NPY_NO_EXPORT int return -1; } if (ap == NULL || PyArray_ISBEHAVED(ap)) { - assert(npy_is_aligned(ov, _ALIGN(@type@))); + assert(npy_is_aligned(ov, NPY_ALIGNOF(@type@))); *((@type@ *)ov)=temp; } else { @@ -809,7 +809,7 @@ STRING_setitem(PyObject *op, void *ov, void *vap) /* OBJECT */ -#define __ALIGNED(obj, sz) ((((size_t) obj) % (sz))==0) +#define NPY__ALIGNED(obj, sz) ((((size_t) obj) % (sz))==0) static PyObject * OBJECT_getitem(void *ip, void *NPY_UNUSED(ap)) @@ -2426,10 +2426,10 @@ OBJECT_copyswapn(PyObject **dst, npy_intp dstride, PyObject **src, { npy_intp i; if (src != NULL) { - if (__ALIGNED(dst, sizeof(PyObject **)) - && __ALIGNED(src, sizeof(PyObject **)) - && __ALIGNED(dstride, sizeof(PyObject **)) - && __ALIGNED(sstride, sizeof(PyObject **))) { + if (NPY__ALIGNED(dst, sizeof(PyObject **)) + && NPY__ALIGNED(src, sizeof(PyObject **)) + && NPY__ALIGNED(dstride, sizeof(PyObject **)) + && NPY__ALIGNED(sstride, sizeof(PyObject **))) { dstride /= sizeof(PyObject **); sstride /= sizeof(PyObject **); for (i = 0; i < n; i++) { @@ -2466,8 +2466,8 @@ OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap), { if (src != NULL) { - if (__ALIGNED(dst,sizeof(PyObject **)) && - __ALIGNED(src,sizeof(PyObject **))) { + if (NPY__ALIGNED(dst,sizeof(PyObject **)) && + NPY__ALIGNED(src,sizeof(PyObject **))) { Py_XINCREF(*src); Py_XDECREF(*dst); *dst = *src; @@ -2927,7 +2927,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) } dummy_fields.descr = new; - if ((new->alignment > 1) && !__ALIGNED(ip + offset, + if ((new->alignment > 1) && !NPY__ALIGNED(ip + offset, new->alignment)) { PyArray_CLEARFLAGS(dummy_arr, NPY_ARRAY_ALIGNED); } @@ -2952,7 +2952,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) return nonz; } -#undef __ALIGNED +#undef NPY__ALIGNED /* @@ -4326,7 +4326,7 @@ static PyArray_Descr @from@_Descr = { /* elsize */ 0, /* alignment */ - _ALIGN(@align@), + NPY_ALIGNOF(@align@), /* subarray */ NULL, /* fields */ @@ -4476,7 +4476,7 @@ NPY_NO_EXPORT PyArray_Descr @from@_Descr = { /* elsize */ sizeof(@fromtype@), /* alignment */ - _ALIGN(@fromtype@), + NPY_ALIGNOF(@fromtype@), /* subarray */ NULL, /* fields */ @@ -4815,7 +4815,7 @@ set_typeinfo(PyObject *dict) */ s = PyArray_typeinforanged( - NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@uname@, _ALIGN(@type@), + NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@uname@, NPY_ALIGNOF(@type@), Py_BuildValue("@cx@", @max@), Py_BuildValue("@cn@", @min@), &Py@Name@ArrType_Type @@ -4846,7 +4846,7 @@ set_typeinfo(PyObject *dict) */ s = PyArray_typeinfo( NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@name@, - _ALIGN(@type@), &Py@Name@ArrType_Type + NPY_ALIGNOF(@type@), &Py@Name@ArrType_Type ); if (s == NULL) { Py_DECREF(infodict); @@ -4863,7 +4863,7 @@ set_typeinfo(PyObject *dict) s = PyArray_typeinfo( NPY_OBJECTLTR, NPY_OBJECT, sizeof(PyObject *) * CHAR_BIT, - _ALIGN(PyObject *), + NPY_ALIGNOF(PyObject *), &PyObjectArrType_Type ); if (s == NULL) { @@ -4877,7 +4877,7 @@ set_typeinfo(PyObject *dict) return -1; } s = PyArray_typeinfo( - NPY_STRINGLTR, NPY_STRING, 0, _ALIGN(char), + NPY_STRINGLTR, NPY_STRING, 0, NPY_ALIGNOF(char), &PyStringArrType_Type ); if (s == NULL) { @@ -4891,7 +4891,7 @@ set_typeinfo(PyObject *dict) return -1; } s = PyArray_typeinfo( - NPY_UNICODELTR, NPY_UNICODE, 0, _ALIGN(npy_ucs4), + NPY_UNICODELTR, NPY_UNICODE, 0, NPY_ALIGNOF(npy_ucs4), &PyUnicodeArrType_Type ); if (s == NULL) { @@ -4905,7 +4905,7 @@ set_typeinfo(PyObject *dict) return -1; } s = PyArray_typeinfo( - NPY_VOIDLTR, NPY_VOID, 0, _ALIGN(char), + NPY_VOIDLTR, NPY_VOID, 0, NPY_ALIGNOF(char), &PyVoidArrType_Type ); if (s == NULL) { @@ -4920,7 +4920,7 @@ set_typeinfo(PyObject *dict) } s = PyArray_typeinforanged( NPY_DATETIMELTR, NPY_DATETIME, NPY_BITSOF_DATETIME, - _ALIGN(npy_datetime), + NPY_ALIGNOF(npy_datetime), MyPyLong_FromInt64(NPY_MAX_DATETIME), MyPyLong_FromInt64(NPY_MIN_DATETIME), &PyDatetimeArrType_Type @@ -4937,7 +4937,7 @@ set_typeinfo(PyObject *dict) } s = PyArray_typeinforanged( NPY_TIMEDELTALTR, NPY_TIMEDELTA, NPY_BITSOF_TIMEDELTA, - _ALIGN(npy_timedelta), + NPY_ALIGNOF(npy_timedelta), MyPyLong_FromInt64(NPY_MAX_TIMEDELTA), MyPyLong_FromInt64(NPY_MIN_TIMEDELTA), &PyTimedeltaArrType_Type diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index cb9fadc4e9f6..b9d2b69e306e 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -175,7 +175,7 @@ check_and_adjust_axis(int *axis, int ndim) } /* used for some alignment checks */ -/* +/* * GCC releases before GCC 4.9 had a bug in _Alignof. See GCC bug 52023 * . * clang versions < 8.0.0 have the same bug. @@ -184,11 +184,11 @@ check_and_adjust_axis(int *axis, int ndim) || (defined __GNUC__ && __GNUC__ < 4 + (__GNUC_MINOR__ < 9) \ && !defined __clang__) \ || (defined __clang__ && __clang_major__ < 8)) -# define _ALIGN(type) offsetof(struct {char c; type v;}, v) +# define NPY_ALIGNOF(type) offsetof(struct {char c; type v;}, v) #else -# define _ALIGN(type) _Alignof(type) +# define NPY_ALIGNOF(type) _Alignof(type) #endif -#define _UINT_ALIGN(type) npy_uint_alignment(sizeof(type)) +#define NPY_ALIGNOF_UINT(type) npy_uint_alignment(sizeof(type)) /* * Disable harmless compiler warning "4116: unnamed type definition in * parentheses" which is caused by the _ALIGN macro. @@ -223,20 +223,20 @@ npy_uint_alignment(int itemsize) case 1: return 1; case 2: - alignment = _ALIGN(npy_uint16); + alignment = NPY_ALIGNOF(npy_uint16); break; case 4: - alignment = _ALIGN(npy_uint32); + alignment = NPY_ALIGNOF(npy_uint32); break; case 8: - alignment = _ALIGN(npy_uint64); + alignment = NPY_ALIGNOF(npy_uint64); break; case 16: /* * 16 byte types are copied using 2 uint64 assignments. * See the strided copy function in lowlevel_strided_loops.c. */ - alignment = _ALIGN(npy_uint64); + alignment = NPY_ALIGNOF(npy_uint64); break; default: break; diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index 2c48ae58bf21..16020ee7a772 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -134,8 +134,8 @@ static int #if @is_aligned@ /* sanity check */ - assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@))); - assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@))); + assert(N == 0 || npy_is_aligned(dst, NPY_ALIGNOF_UINT(@type@))); + assert(N == 0 || npy_is_aligned(src, NPY_ALIGNOF_UINT(@type@))); #endif /*printf("fn @prefix@_@oper@_size@elsize@\n");*/ while (N > 0) { @@ -220,8 +220,8 @@ static NPY_GCC_OPT_3 int } #if @is_aligned@ && @elsize@ != 16 /* sanity check */ - assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@))); - assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@))); + assert(N == 0 || npy_is_aligned(dst, NPY_ALIGNOF_UINT(@type@))); + assert(N == 0 || npy_is_aligned(src, NPY_ALIGNOF_UINT(@type@))); #endif #if @elsize@ == 1 && @dst_contig@ memset(dst, *src, N); @@ -864,8 +864,8 @@ static NPY_GCC_OPT_3 int #if @aligned@ /* sanity check */ - assert(N == 0 || npy_is_aligned(src, _ALIGN(_TYPE1))); - assert(N == 0 || npy_is_aligned(dst, _ALIGN(_TYPE2))); + assert(N == 0 || npy_is_aligned(src, NPY_ALIGNOF(_TYPE1))); + assert(N == 0 || npy_is_aligned(dst, NPY_ALIGNOF(_TYPE2))); #endif /*printf("@prefix@_cast_@name1@_to_@name2@\n");*/ @@ -1515,7 +1515,7 @@ mapiter_trivial_@name@( while (itersize--) { char * self_ptr; npy_intp indval = *((npy_intp*)ind_ptr); - assert(npy_is_aligned(ind_ptr, _UINT_ALIGN(npy_intp))); + assert(npy_is_aligned(ind_ptr, NPY_ALIGNOF_UINT(npy_intp))); #if @isget@ if (check_and_adjust_index(&indval, fancy_dim, 0, _save) < 0 ) { return -1; @@ -1529,8 +1529,8 @@ mapiter_trivial_@name@( #if @isget@ #if @elsize@ - assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@))); - assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@))); + assert(npy_is_aligned(result_ptr, NPY_ALIGNOF_UINT(@copytype@))); + assert(npy_is_aligned(self_ptr, NPY_ALIGNOF_UINT(@copytype@))); *(@copytype@ *)result_ptr = *(@copytype@ *)self_ptr; #else char *args[2] = {self_ptr, result_ptr}; @@ -1544,8 +1544,8 @@ mapiter_trivial_@name@( #else /* !@isget@ */ #if @elsize@ - assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@))); - assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@))); + assert(npy_is_aligned(result_ptr, NPY_ALIGNOF_UINT(@copytype@))); + assert(npy_is_aligned(self_ptr, NPY_ALIGNOF_UINT(@copytype@))); *(@copytype@ *)self_ptr = *(@copytype@ *)result_ptr; #else char *args[2] = {result_ptr, self_ptr}; @@ -1672,7 +1672,7 @@ mapiter_@name@( for (i=0; i < @numiter@; i++) { npy_intp indval = *((npy_intp*)outer_ptrs[i]); assert(npy_is_aligned(outer_ptrs[i], - _UINT_ALIGN(npy_intp))); + NPY_ALIGNOF_UINT(npy_intp))); #if @isget@ && @one_iter@ if (check_and_adjust_index(&indval, fancy_dims[i], @@ -1693,9 +1693,9 @@ mapiter_@name@( #if @isget@ #if @elsize@ assert(npy_is_aligned(outer_ptrs[i], - _UINT_ALIGN(@copytype@))); + NPY_ALIGNOF_UINT(@copytype@))); assert(npy_is_aligned(self_ptr, - _UINT_ALIGN(@copytype@))); + NPY_ALIGNOF_UINT(@copytype@))); *(@copytype@ *)(outer_ptrs[i]) = *(@copytype@ *)self_ptr; #else char *args[2] = {self_ptr, outer_ptrs[i]}; @@ -1709,9 +1709,9 @@ mapiter_@name@( #else /* !@isget@ */ #if @elsize@ assert(npy_is_aligned(outer_ptrs[i], - _UINT_ALIGN(@copytype@))); + NPY_ALIGNOF_UINT(@copytype@))); assert(npy_is_aligned(self_ptr, - _UINT_ALIGN(@copytype@))); + NPY_ALIGNOF_UINT(@copytype@))); *(@copytype@ *)self_ptr = *(@copytype@ *)(outer_ptrs[i]); #else char *args[2] = {outer_ptrs[i], self_ptr}; diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c index c26ace9f1396..7489e9a766fd 100644 --- a/numpy/core/src/umath/_scaled_float_dtype.c +++ b/numpy/core/src/umath/_scaled_float_dtype.c @@ -149,7 +149,7 @@ static PyArray_SFloatDescr SFloatSingleton = {{ .flags = NPY_USE_GETITEM|NPY_USE_SETITEM, .type_num = -1, .elsize = sizeof(double), - .alignment = _ALIGN(double), + .alignment = NPY_ALIGNOF(double), .f = &sfloat_slots.f, }, .scaling = 1, From 999ac383c75915da1b587d4979dc12610ca299f0 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 14 Aug 2023 15:57:13 +0300 Subject: [PATCH 040/120] BUG: cleanup warnings [skip azp][skip circle][skip travis][skip cirrus] --- numpy/_build_utils/gitversion.py | 3 ++- numpy/core/src/multiarray/compiled_base.c | 11 +++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index 6d98de6eacf5..d910581a85d4 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -4,7 +4,8 @@ def init_version(): init = os.path.join(os.path.dirname(__file__), '../../pyproject.toml') - data = open(init).readlines() + with open(init) as fid: + data = fid.readlines() version_line = next( line for line in data if line.startswith('version =') diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 22f2547ada6f..15b7b43a970a 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1412,7 +1412,18 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t static char *msg = "already has a different docstring"; /* Don't add docstrings */ +#if PY_VERSION_HEX > 0x030b0000 + static long optimize = -1000; + if (optimize < 0) { + PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ + PyObject *level = PyObject_GetAttrString(flags, "optimize"); + optimize = PyLong_AsLong(level); + Py_DECREF(level); + } + if (optimize > 1) { +#else if (Py_OptimizeFlag > 1) { +#endif Py_RETURN_NONE; } From 7a7ffe43a81d880705298efddd0f5ed445b9b7dd Mon Sep 17 00:00:00 2001 From: Stefan van der Walt Date: Fri, 18 Aug 2023 17:45:28 -0700 Subject: [PATCH 041/120] Upgrade to spin 0.5 This version of spin allows us to use a vendored meson CLI. --- .spin/cmds.py | 460 +++-------------------------------------- build_requirements.txt | 2 +- pyproject.toml | 9 +- 3 files changed, 32 insertions(+), 439 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 8e9fe86368f7..656625afe2a9 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -10,437 +10,11 @@ import click from spin import util +from spin.cmds import meson -_run = util.run - -# START of spin/cmds/meson.py -install_dir = "build-install" - -# The numpy-vendored version of Meson -meson_cli = [sys.executable, - str(pathlib.Path(__file__).parent.parent.resolve() / - 'vendored-meson' / 'meson' / 'meson.py') - ] - - -def _set_pythonpath(quiet=False): - site_packages = _get_site_packages() - env = os.environ - - if "PYTHONPATH" in env: - env["PYTHONPATH"] = f"{site_packages}{os.pathsep}{env['PYTHONPATH']}" - else: - env["PYTHONPATH"] = site_packages - - if not quiet: - click.secho( - f'$ export PYTHONPATH="{site_packages}"', bold=True, fg="bright_blue" - ) - - return env["PYTHONPATH"] - - -def _get_site_packages(): - candidate_paths = [] - for root, dirs, _files in os.walk(install_dir): - for subdir in dirs: - if subdir == "site-packages" or subdir == "dist-packages": - candidate_paths.append(os.path.abspath(os.path.join(root, subdir))) - - X, Y = sys.version_info.major, sys.version_info.minor - - site_packages = None - if any(f"python{X}." in p for p in candidate_paths): - # We have a system that uses `python3.X/site-packages` or `python3.X/dist-packages` - site_packages = [p for p in candidate_paths if f"python{X}.{Y}" in p] - if len(site_packages) == 0: - raise FileNotFoundError( - f"No site-packages found in {install_dir} for Python {X}.{Y}" - ) - else: - site_packages = site_packages[0] - else: - # A naming scheme that does not encode the Python major/minor version is used, so return - # whatever site-packages path was found - if len(candidate_paths) > 1: - raise FileNotFoundError( - f"Multiple `site-packages` found in `{install_dir}`, but cannot use Python version to disambiguate" - ) - elif len(candidate_paths) == 1: - site_packages = candidate_paths[0] - - if site_packages is None: - raise FileNotFoundError( - f"No `site-packages` or `dist-packages` found under `{install_dir}`" - ) - - return site_packages - - -def _meson_version(): - try: - p = _run(meson_cli + ["--version"], output=False, echo=False) - return p.stdout.decode("ascii").strip() - except: - pass - - -def _meson_version_configured(): - try: - meson_info_fn = os.path.join("build", "meson-info", "meson-info.json") - meson_info = json.load(open(meson_info_fn)) - return meson_info["meson_version"]["full"] - except: - pass - - -@click.command() -@click.option("-j", "--jobs", help="Number of parallel tasks to launch", type=int) -@click.option("--clean", is_flag=True, help="Clean build directory before build") -@click.option( - "-v", "--verbose", is_flag=True, help="Print all build output, even installation" -) -@click.argument("meson_args", nargs=-1) -def meson_build(meson_args, jobs=None, clean=False, verbose=False): - """🔧 Build package with Meson/ninja and install - - MESON_ARGS are passed through e.g.: - - spin build -- -Dpkg_config_path=/lib64/pkgconfig - - The package is installed to build-install - - By default builds for release, to be able to use a debugger set CFLAGS - appropriately. For example, for linux use - - CFLAGS="-O0 -g" spin build - """ - build_dir = "build" - setup_cmd = meson_cli + ["setup", build_dir, "--prefix=/usr"] + list(meson_args) - - if clean: - print(f"Removing `{build_dir}`") - if os.path.isdir(build_dir): - shutil.rmtree(build_dir) - print(f"Removing `{install_dir}`") - if os.path.isdir(install_dir): - shutil.rmtree(install_dir) - - if not (os.path.exists(build_dir) and _meson_version_configured()): - p = _run(setup_cmd, sys_exit=False) - if p.returncode != 0: - raise RuntimeError( - "Meson configuration failed; please try `spin build` again with the `--clean` flag." - ) - else: - # Build dir has been configured; check if it was configured by - # current version of Meson - - if _meson_version() != _meson_version_configured(): - _run(setup_cmd + ["--reconfigure"]) - - # Any other conditions that warrant a reconfigure? - - p = _run(meson_cli + ["compile", "-C", build_dir], sys_exit=False) - p = _run(meson_cli + - [ - "install", - "--only-changed", - "-C", - build_dir, - "--destdir", - f"../{install_dir}", - ], - output=verbose, - ) - - -def _get_configured_command(command_name): - from spin.cmds.util import get_commands - command_groups = get_commands() - commands = [cmd for section in command_groups for cmd in command_groups[section]] - return next((cmd for cmd in commands if cmd.name == command_name), None) - - -@click.command() -@click.argument("pytest_args", nargs=-1) -@click.pass_context -def meson_test(ctx, pytest_args): - """🔧 Run tests - - PYTEST_ARGS are passed through directly to pytest, e.g.: - - spin test -- -v - - To run tests on a directory or file: - - \b - spin test numpy/linalg - spin test numpy/linalg/tests/test_linalg.py - - To run specific tests, by module, function, class, or method: - - \b - spin test -- --pyargs numpy.random - spin test -- --pyargs numpy.random.tests.test_generator_mt19937 - spin test -- --pyargs numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric - spin test -- --pyargs numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases - - To report the durations of the N slowest tests: - - spin test -- --durations=N - - To run tests that match a given pattern: - - \b - spin test -- -k "geometric" - spin test -- -k "geometric and not rgeometric" - - To skip tests with a given marker: - - spin test -- -m "not slow" - - To parallelize test runs (requires `pytest-xdist`): - - spin test -- -n NUM_JOBS - - For more, see `pytest --help`. - - """ - from spin.cmds.util import get_config - cfg = get_config() - - build_cmd = _get_configured_command("build") - if build_cmd: - click.secho( - "Invoking `build` prior to running tests:", bold=True, fg="bright_green" - ) - ctx.invoke(build_cmd) - - package = cfg.get("tool.spin.package", None) - if not pytest_args: - pytest_args = (package,) - if pytest_args == (None,): - print( - "Please specify `package = packagename` under `tool.spin` section of `pyproject.toml`" - ) - sys.exit(1) - - site_path = _set_pythonpath() - - # Sanity check that library built properly - if sys.version_info[:2] >= (3, 11): - p = _run([sys.executable, "-P", "-c", f"import {package}"], sys_exit=False) - if p.returncode != 0: - print(f"As a sanity check, we tried to import {package}.") - print("Stopping. Please investigate the build error.") - sys.exit(1) - - print(f'$ export PYTHONPATH="{site_path}"') - _run( - [sys.executable, "-m", "pytest", f"--rootdir={site_path}"] + list(pytest_args), - cwd=site_path, - replace=True, - ) - - -@click.command() -@click.argument("ipython_args", nargs=-1) -def ipython(ipython_args): - """💻 Launch IPython shell with PYTHONPATH set - - IPYTHON_ARGS are passed through directly to IPython, e.g.: - - spin ipython -- -i myscript.py - """ - p = _set_pythonpath() - print(f'💻 Launching IPython with PYTHONPATH="{p}"') - _run(["ipython", "--ignore-cwd"] + list(ipython_args), replace=True) - - -@click.command() -@click.argument("shell_args", nargs=-1) -def meson_shell(shell_args=[]): - """💻 Launch shell with PYTHONPATH set - - SHELL_ARGS are passed through directly to the shell, e.g.: - - spin shell -- -c 'echo $PYTHONPATH' - - Ensure that your shell init file (e.g., ~/.zshrc) does not override - the PYTHONPATH. - """ - p = _set_pythonpath() - shell = os.environ.get("SHELL", "sh") - cmd = [shell] + list(shell_args) - print(f'💻 Launching shell with PYTHONPATH="{p}"') - print("⚠ Change directory to avoid importing source instead of built package") - print("⚠ Ensure that your ~/.shellrc does not unset PYTHONPATH") - _run(cmd, replace=True) - - -@click.command() -@click.argument("python_args", nargs=-1) -def meson_python(python_args): - """🐍 Launch Python shell with PYTHONPATH set - - PYTHON_ARGS are passed through directly to Python, e.g.: - - spin python -- -c 'import sys; print(sys.path)' - """ - p = _set_pythonpath() - v = sys.version_info - if (v.major < 3) or (v.major == 3 and v.minor < 11): - print("We're sorry, but this feature only works on Python 3.11 and greater 😢") - print() - print( - "Why? Because we need the '-P' flag so the interpreter doesn't muck with PYTHONPATH" - ) - print() - print("However! You can still launch your own interpreter:") - print() - print(f" PYTHONPATH='{p}' python") - print() - print("And then call:") - print() - print("import sys; del(sys.path[0])") - sys.exit(-1) - - print(f'🐍 Launching Python with PYTHONPATH="{p}"') - - _run(["/usr/bin/env", "python", "-P"] + list(python_args), replace=True) - - -@click.command(context_settings={"ignore_unknown_options": True}) -@click.argument("args", nargs=-1) -def meson_run(args): - """🏁 Run a shell command with PYTHONPATH set - - \b - spin run make - spin run 'echo $PYTHONPATH' - spin run python -c 'import sys; del sys.path[0]; import mypkg' - - If you'd like to expand shell variables, like `$PYTHONPATH` in the example - above, you need to provide a single, quoted command to `run`: - - spin run 'echo $SHELL && echo $PWD' - - On Windows, all shell commands are run via Bash. - Install Git for Windows if you don't have Bash already. - """ - if not len(args) > 0: - raise RuntimeError("No command given") - - is_posix = sys.platform in ("linux", "darwin") - shell = len(args) == 1 - if shell: - args = args[0] - - if shell and not is_posix: - # On Windows, we're going to try to use bash - args = ["bash", "-c", args] - - _set_pythonpath(quiet=True) - _run(args, echo=False, shell=shell) - - -@click.command() -@click.argument("sphinx_target", default="html") -@click.option( - "--clean", - is_flag=True, - default=False, - help="Clean previously built docs before building", -) -@click.option( - "--build/--no-build", - "first_build", - default=True, - help="Build numpy before generating docs", -) -@click.option("--jobs", "-j", default="auto", help="Number of parallel build jobs") -@click.pass_context -def meson_docs(ctx, sphinx_target, clean, first_build, jobs): - """📖 Build Sphinx documentation - - By default, SPHINXOPTS="-W", raising errors on warnings. - To build without raising on warnings: - - SPHINXOPTS="" spin docs - - To list all Sphinx targets: - - spin docs targets - - To build another Sphinx target: - - spin docs TARGET - - """ - # Detect docs dir - doc_dir_candidates = ("doc", "docs") - doc_dir = next((d for d in doc_dir_candidates if os.path.exists(d)), None) - if doc_dir is None: - print( - f"No documentation folder found; one of {', '.join(doc_dir_candidates)} must exist" - ) - sys.exit(1) - - if sphinx_target in ("targets", "help"): - clean = False - first_build = False - sphinx_target = "help" - - if clean: - doc_dirs = [ - "./doc/build/", - "./doc/source/api/", - "./doc/source/auto_examples/", - "./doc/source/jupyterlite_contents/", - ] - for doc_dir in doc_dirs: - if os.path.isdir(doc_dir): - print(f"Removing {doc_dir!r}") - shutil.rmtree(doc_dir) - - build_cmd = _get_configured_command("build") - - if build_cmd and first_build: - click.secho( - "Invoking `build` prior to building docs:", bold=True, fg="bright_green" - ) - ctx.invoke(build_cmd) - - try: - site_path = _get_site_packages() - except FileNotFoundError: - print("No built numpy found; run `spin build` first.") - sys.exit(1) - - opts = os.environ.get("SPHINXOPTS", "-W") - os.environ["SPHINXOPTS"] = f"{opts} -j {jobs}" - click.secho( - f"$ export SPHINXOPTS={os.environ['SPHINXOPTS']}", bold=True, fg="bright_blue" - ) - - os.environ["PYTHONPATH"] = f'{site_path}{os.sep}:{os.environ.get("PYTHONPATH", "")}' - click.secho( - f"$ export PYTHONPATH={os.environ['PYTHONPATH']}", bold=True, fg="bright_blue" - ) - _run(["make", "-C", "doc", sphinx_target], replace=True) - - -# END of spin/cmds/meson.py - - -# The numpy-vendored version of Meson. Put the directory that the executable -# `meson` is in at the front of the PATH. -curdir = pathlib.Path(__file__).parent.resolve() -meson_executable_dir = str(curdir.parent / 'vendored-meson' / 'entrypoint') -os.environ['PATH'] = meson_executable_dir + os.pathsep + os.environ['PATH'] # Check that the meson git submodule is present +curdir = pathlib.Path(__file__).parent meson_import_dir = curdir.parent / 'vendored-meson' / 'meson' / 'mesonbuild' if not meson_import_dir.exists(): raise RuntimeError( @@ -479,7 +53,7 @@ def build(ctx, meson_args, jobs=None, clean=False, verbose=False): CFLAGS="-O0 -g" spin build """ - ctx.forward(meson_build) + ctx.forward(meson.build) @click.command() @@ -528,9 +102,9 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, install_deps): if install_deps: util.run(['pip', 'install', '-q', '-r', 'doc_requirements.txt']) - meson_docs.ignore_unknown_options = True + meson.docs.ignore_unknown_options = True del ctx.params['install_deps'] - ctx.forward(meson_docs) + ctx.forward(meson.docs) @click.command() @@ -616,7 +190,7 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): del ctx.params[extra_param] - ctx.forward(meson_test) + ctx.forward(meson.test) @click.command() @@ -643,7 +217,7 @@ def gdb(code, gdb_args): spin gdb my_tests.py spin gdb -- my_tests.py --mytest-flag """ - _set_pythonpath() + meson._set_pythonpath() gdb_args = list(gdb_args) if gdb_args and gdb_args[0].endswith('.py'): @@ -817,7 +391,7 @@ def bench(ctx, tests, compare, verbose, commits): ) ctx.invoke(build) - _set_pythonpath() + meson._set_pythonpath() p = util.run( ['python', '-c', 'import numpy as np; print(np.__version__)'], @@ -874,7 +448,7 @@ def python(ctx, python_args): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') ctx.invoke(build) - ctx.forward(meson_python) + ctx.forward(meson.python) @click.command(context_settings={ @@ -894,7 +468,7 @@ def ipython(ctx, ipython_args): ctx.invoke(build) - ppath = _set_pythonpath() + ppath = meson._set_pythonpath() print(f'💻 Launching IPython with PYTHONPATH="{ppath}"') preimport = (r"import numpy as np; " @@ -924,4 +498,16 @@ def run(ctx, args): Install Git for Windows if you don't have Bash already. """ ctx.invoke(build) - ctx.forward(meson_run) + ctx.forward(meson.run) + + +@click.command(context_settings={"ignore_unknown_options": True}) +@click.pass_context +def mypy(ctx): + """🦆 Run Mypy tests for NumPy + """ + env = os.environ + env['NPY_RUN_MYPY_IN_TESTSUITE'] = '1' + ctx.params['pytest_args'] = [os.path.join('numpy', 'typing')] + ctx.params['markexpr'] = 'full' + ctx.forward(test) diff --git a/build_requirements.txt b/build_requirements.txt index e7e776a7de89..e12ac1cf4e2c 100644 --- a/build_requirements.txt +++ b/build_requirements.txt @@ -2,4 +2,4 @@ meson-python>=0.13.1 Cython>=3.0 wheel==0.38.1 ninja -spin==0.4 +spin==0.5 diff --git a/pyproject.toml b/pyproject.toml index b75b81931974..3f3bc121ad10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -193,8 +193,15 @@ repair-wheel-command = "bash ./tools/wheels/repair_windows.sh {wheel} {dest_dir} [tool.spin] package = 'numpy' +[tool.spin.meson] +cli = 'vendored-meson/meson/meson.py' + [tool.spin.commands] -"Build" = [".spin/cmds.py:build", ".spin/cmds.py:test"] +"Build" = [ + ".spin/cmds.py:build", + ".spin/cmds.py:test", + ".spin/cmds.py:mypy", +] "Environments" = [ ".spin/cmds.py:run", ".spin/cmds.py:ipython", ".spin/cmds.py:python", ".spin/cmds.py:gdb" From 0a64aafd766ee935815e645ee70c8926114ce2b7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 21 Aug 2023 19:49:02 -0600 Subject: [PATCH 042/120] BUG: ``asv dev`` has been removed, use ``asv run``. --- tools/travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 637961ccc32b..9664b83c3c73 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -183,7 +183,7 @@ EOF pushd ../benchmarks $PYTHON `which asv` check --python=same $PYTHON `which asv` machine --machine travis - $PYTHON `which asv` dev -q 2>&1| tee asv-output.log + $PYTHON `which asv` run -q 2>&1| tee asv-output.log if grep -q Traceback asv-output.log; then echo "Some benchmarks have errors!" exit 1 From 4d3a1bc62784c443419a83931012076b544871f7 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 21 Aug 2023 01:00:57 +0400 Subject: [PATCH 043/120] BUG: Fix meson build failure due to uncleaned inplace auto-generated dispatch config headers Ensure that the distutils generated config files and wrapped sources, derived from dispatch-able sources are consistently generated within the build directory when the inplace build option is enabled. This change is crucial to prevent conflicts with meson-generated config headers. Given that `spin build --clean` does not remove these headers, which requires cleaning up the numpy root via `git clean` otherwise the build will fails. --- .gitignore | 5 ----- numpy/distutils/command/build_clib.py | 4 ++-- numpy/distutils/command/build_ext.py | 13 ++++++++++++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 860aa9aa81d4..3de77ebc4cac 100644 --- a/.gitignore +++ b/.gitignore @@ -208,11 +208,6 @@ tools/swig/test/Array.py # SIMD generated files # ################################### -# config headers of dispatchable sources -*.dispatch.h -# wrapped sources of dispatched targets, e.g. *.dispatch.avx2.c -*.dispatch.*.c -*.dispatch.*.cpp # _simd module numpy/core/src/_simd/_simd.dispatch.c numpy/core/src/_simd/_simd_data.inc diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 11999dae2322..6cd2f3e7eeca 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -320,8 +320,8 @@ def build_a_library(self, build_info, lib_name, libraries): dispatch_hpath = os.path.join("numpy", "distutils", "include") dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) include_dirs.append(dispatch_hpath) - - copt_build_src = None if self.inplace else bsrc_dir + # copt_build_src = None if self.inplace else bsrc_dir + copt_build_src = bsrc_dir for _srcs, _dst, _ext in ( ((c_sources,), copt_c_sources, ('.dispatch.c',)), ((c_sources, cxx_sources), copt_cxx_sources, diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 68b13c0dd370..5c62d90c5768 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -458,7 +458,18 @@ def build_extension(self, ext): dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) include_dirs.append(dispatch_hpath) - copt_build_src = None if self.inplace else bsrc_dir + # copt_build_src = None if self.inplace else bsrc_dir + # Always generate the generated config files and + # dispatch-able sources inside the build directory, + # even if the build option `inplace` is enabled. + # This approach prevents conflicts with Meson-generated + # config headers. Since `spin build --clean` will not remove + # these headers, they might overwrite the generated Meson headers, + # causing compatibility issues. Maintaining separate directories + # ensures compatibility between distutils dispatch config headers + # and Meson headers, avoiding build disruptions. + # See gh-24450 for more details. + copt_build_src = bsrc_dir for _srcs, _dst, _ext in ( ((c_sources,), copt_c_sources, ('.dispatch.c',)), ((c_sources, cxx_sources), copt_cxx_sources, From 6adf17429c8e0a3e8fb78152b377176572bbe6b6 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 24 Aug 2023 05:18:55 +0200 Subject: [PATCH 044/120] BUG: fix issue with git-version script, needs a shebang to run Closes gh-24514 --- meson.build | 2 +- numpy/_build_utils/gitversion.py | 1 + numpy/meson.build | 13 +++++-------- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/meson.build b/meson.build index a9e68c2eb94b..0469f7f4590b 100644 --- a/meson.build +++ b/meson.build @@ -3,7 +3,7 @@ project( 'c', 'cpp', 'cython', version: run_command( # This should become `numpy/_version.py` in NumPy 2.0 - ['python', 'numpy/_build_utils/gitversion.py'], + ['numpy/_build_utils/gitversion.py'], check: true).stdout().strip(), license: 'BSD-3', meson_version: '>=1.2.99', # version in vendored-meson is 1.2.99 diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index d910581a85d4..1d7898b3409e 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import os import textwrap diff --git a/numpy/meson.build b/numpy/meson.build index 9b9a3581a9c6..40766081d140 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -188,14 +188,6 @@ if not have_lapack and not allow_noblas 'for some linear algebra operations).') endif -# Generate version.py for sdist -gitversion = files('_build_utils/gitversion.py')[0] -python_bin = py.full_path() -meson.add_dist_script( - py, - [gitversion, '--meson-dist', '--write', 'numpy/version.py'] -) - # Copy the main __init__.py|pxd files to the build dir (needed for Cython) __init__py = fs.copyfile('__init__.py') __init__pxd = fs.copyfile('__init__.pxd') @@ -257,6 +249,11 @@ endif np_dir = py.get_install_dir() / 'numpy' +# Generate version.py for sdist +meson.add_dist_script( + ['_build_utils/gitversion.py', '--meson-dist', '--write', + 'numpy/version.py'] +) if not fs.exists('version.py') generate_version = custom_target( 'generate-version', From 8a3a15ba811b4014cfd9f53fb392a9df9ef1e390 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 23 Aug 2023 16:01:40 +0300 Subject: [PATCH 045/120] use a default assignment for git_hash [skip ci] --- numpy/_build_utils/gitversion.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index d910581a85d4..6686ddb51fdd 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -24,6 +24,7 @@ def git_version(version): import subprocess import os.path + git_hash = '' try: p = subprocess.Popen( ['git', 'log', '-1', '--format="%H %aI"'], @@ -48,8 +49,6 @@ def git_version(version): # Only attach git tag to development versions if 'dev' in version: version += f'+git{git_date}.{git_hash[:7]}' - else: - git_hash = '' return version, git_hash From 973850a17cf79bc5233d0b8192d7398d3f46fc6e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 21 Aug 2023 15:45:11 -0600 Subject: [PATCH 046/120] BUG: fix NPY_cast_info error handling in choose --- numpy/core/src/multiarray/item_selection.c | 2 +- numpy/core/tests/test_multiarray.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index e935a27edb6c..c1d4a9508158 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -968,6 +968,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, PyArrayObject **mps, *ap; PyArrayMultiIterObject *multi = NULL; npy_intp mi; + NPY_cast_info cast_info = {.func = NULL}; ap = NULL; /* @@ -1045,7 +1046,6 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, npy_intp transfer_strides[2] = {elsize, elsize}; npy_intp one = 1; NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; - NPY_cast_info cast_info = {.func = NULL}; if (PyDataType_REFCHK(dtype)) { int is_aligned = IsUintAligned(obj); PyArray_GetDTypeTransferFunction( diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 869ebe4d8ac7..b9021ccb24cf 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -10042,3 +10042,9 @@ def test_gh_22683(): np.choose(np.zeros(10000, dtype=int), [a], out=a) refc_end = sys.getrefcount(b) assert refc_end - refc_start < 10 + + +def test_gh_24459(): + a = np.zeros((50, 3), dtype=np.float64) + with pytest.raises(TypeError): + np.choose(a, [3, -1]) From 5db99053918b743dfeea9eb7766a306321d09dbf Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Wed, 23 Nov 2022 05:25:15 +0000 Subject: [PATCH 047/120] BUG: Hotfix for handling common blocks in f2py --- numpy/f2py/crackfortran.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index cce93dd7f530..002a2edb1275 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -614,7 +614,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): r'endinterface|endsubroutine|endfunction') endpattern = re.compile( beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' -endifs = r'end\s*(if|do|where|select|while|forall|associate|block|' + \ +# block, the Fortran 2008 construct needs special handling in the rest of the file +endifs = r'end\s*(if|do|where|select|while|forall|associate|' + \ r'critical|enum|team)' endifpattern = re.compile( beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' From 215cbe0eb67037eb931a5ff4d9583e636469db98 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 29 Jan 2023 23:01:50 +0000 Subject: [PATCH 048/120] TST: Add a test for gh-22648 --- numpy/f2py/tests/src/crackfortran/gh22648.pyf | 7 +++++++ numpy/f2py/tests/test_crackfortran.py | 10 ++++++++++ 2 files changed, 17 insertions(+) create mode 100644 numpy/f2py/tests/src/crackfortran/gh22648.pyf diff --git a/numpy/f2py/tests/src/crackfortran/gh22648.pyf b/numpy/f2py/tests/src/crackfortran/gh22648.pyf new file mode 100644 index 000000000000..b3454f18635f --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/gh22648.pyf @@ -0,0 +1,7 @@ +python module iri16py ! in + interface ! in :iri16py + block data ! in :iri16py:iridreg_modified.for + COMMON /fircom/ eden,tabhe,tabla,tabmo,tabza,tabfl + end block data + end interface +end python module iri16py diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index c07ae036c0e5..c8d9ddb88460 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -8,6 +8,8 @@ from . import util from numpy.f2py import crackfortran import textwrap +import contextlib +import io class TestNoSpace(util.F2PyTest): @@ -338,3 +340,11 @@ def test_end_if_comment(self): crackfortran.crackfortran([str(fpath)]) except Exception as exc: assert False, f"'crackfortran.crackfortran' raised an exception {exc}" + + +class TestF77CommonBlockReader(): + def test_gh22648(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") + with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: + mod = crackfortran.crackfortran([str(fpath)]) + assert "Mismatch" not in stdout_f2py.getvalue() From 63f4d5dbad2a49280d21ff63ef653d6df1c12692 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 24 Apr 2023 17:30:24 +0200 Subject: [PATCH 049/120] CI,TYP: Bump mypy to 1.3.0 --- environment.yml | 3 +- numpy/typing/tests/data/fail/modules.pyi | 2 +- numpy/typing/tests/data/fail/npyio.pyi | 3 - numpy/typing/tests/data/mypy.ini | 6 - numpy/typing/tests/data/pass/lib_utils.py | 5 +- numpy/typing/tests/data/pass/ufunc_config.py | 26 ++++- numpy/typing/tests/data/reveal/arithmetic.pyi | 104 ++++++++---------- .../typing/tests/data/reveal/bitwise_ops.pyi | 16 +-- numpy/typing/tests/data/reveal/mod.pyi | 30 ++--- .../tests/data/reveal/nbit_base_example.pyi | 4 +- numpy/typing/tests/data/reveal/type_check.pyi | 8 +- test_requirements.txt | 2 +- 12 files changed, 93 insertions(+), 116 deletions(-) diff --git a/environment.yml b/environment.yml index a6a309c8de29..7c92abbb0e9c 100644 --- a/environment.yml +++ b/environment.yml @@ -24,8 +24,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=0.981 - - typing_extensions>=4.2.0 + - mypy=1.3.0 # For building docs - sphinx>=4.5.0 - sphinx-design diff --git a/numpy/typing/tests/data/fail/modules.pyi b/numpy/typing/tests/data/fail/modules.pyi index 59e724f22b48..c86627e0c8ea 100644 --- a/numpy/typing/tests/data/fail/modules.pyi +++ b/numpy/typing/tests/data/fail/modules.pyi @@ -6,7 +6,7 @@ np.bob # E: Module has no attribute # Stdlib modules in the namespace by accident np.warnings # E: Module has no attribute np.sys # E: Module has no attribute -np.os # E: Module has no attribute +np.os # E: Module "numpy" does not explicitly export np.math # E: Module has no attribute # Public sub-modules that are not imported to their parent module by default; diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index c91b4c9cb846..1749a6847e9b 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -13,13 +13,10 @@ AR_i8: npt.NDArray[np.int64] np.load(str_file) # E: incompatible type np.save(bytes_path, AR_i8) # E: incompatible type -np.save(str_file, AR_i8) # E: incompatible type np.savez(bytes_path, AR_i8) # E: incompatible type -np.savez(str_file, AR_i8) # E: incompatible type np.savez_compressed(bytes_path, AR_i8) # E: incompatible type -np.savez_compressed(str_file, AR_i8) # E: incompatible type np.loadtxt(bytes_path) # E: incompatible type diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index baad759bbdc3..13928c2bca3a 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -2,9 +2,3 @@ plugins = numpy.typing.mypy_plugin show_absolute_path = True implicit_reexport = False - -[mypy-numpy] -ignore_errors = True - -[mypy-numpy.*] -ignore_errors = True diff --git a/numpy/typing/tests/data/pass/lib_utils.py b/numpy/typing/tests/data/pass/lib_utils.py index 65640c28873d..53a3e17432e4 100644 --- a/numpy/typing/tests/data/pass/lib_utils.py +++ b/numpy/typing/tests/data/pass/lib_utils.py @@ -7,7 +7,10 @@ FILE = StringIO() AR = np.arange(10, dtype=np.float64) -def func(a: int) -> bool: ... + +def func(a: int) -> bool: + return True + np.deprecate(func) np.deprecate() diff --git a/numpy/typing/tests/data/pass/ufunc_config.py b/numpy/typing/tests/data/pass/ufunc_config.py index 2d13142457df..58dd3e550a51 100644 --- a/numpy/typing/tests/data/pass/ufunc_config.py +++ b/numpy/typing/tests/data/pass/ufunc_config.py @@ -2,18 +2,32 @@ import numpy as np -def func1(a: str, b: int) -> None: ... -def func2(a: str, b: int, c: float = ...) -> None: ... -def func3(a: str, b: int) -> int: ... + +def func1(a: str, b: int) -> None: + return None + + +def func2(a: str, b: int, c: float = 1.0) -> None: + return None + + +def func3(a: str, b: int) -> int: + return 0 + class Write1: - def write(self, a: str) -> None: ... + def write(self, a: str) -> None: + return None + class Write2: - def write(self, a: str, b: int = ...) -> None: ... + def write(self, a: str, b: int = 1) -> None: + return None + class Write3: - def write(self, a: str) -> int: ... + def write(self, a: str) -> int: + return 0 _err_default = np.geterr() diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 0ca5e9772958..fe983cca1f2b 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -343,184 +343,168 @@ reveal_type(c8 / b_) # E: {complex64} # Complex -reveal_type(c16 + f16) # E: {complex256} +reveal_type(c16 + f16) # E: complexfloating[Union[_64Bit, _128Bit], Union[_64Bit, _128Bit]] reveal_type(c16 + c16) # E: {complex128} reveal_type(c16 + f8) # E: {complex128} reveal_type(c16 + i8) # E: {complex128} -reveal_type(c16 + c8) # E: {complex128} -reveal_type(c16 + f4) # E: {complex128} -reveal_type(c16 + i4) # E: {complex128} +reveal_type(c16 + c8) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] +reveal_type(c16 + f4) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] +reveal_type(c16 + i4) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] reveal_type(c16 + b_) # E: {complex128} reveal_type(c16 + b) # E: {complex128} reveal_type(c16 + c) # E: {complex128} reveal_type(c16 + f) # E: {complex128} -reveal_type(c16 + i) # E: {complex128} reveal_type(c16 + AR_f) # E: Any -reveal_type(f16 + c16) # E: {complex256} +reveal_type(f16 + c16) # E: complexfloating[Union[_64Bit, _128Bit], Union[_64Bit, _128Bit]] reveal_type(c16 + c16) # E: {complex128} reveal_type(f8 + c16) # E: {complex128} reveal_type(i8 + c16) # E: {complex128} -reveal_type(c8 + c16) # E: {complex128} -reveal_type(f4 + c16) # E: {complex128} -reveal_type(i4 + c16) # E: {complex128} +reveal_type(c8 + c16) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(f4 + c16) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] +reveal_type(i4 + c16) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] reveal_type(b_ + c16) # E: {complex128} reveal_type(b + c16) # E: {complex128} reveal_type(c + c16) # E: {complex128} reveal_type(f + c16) # E: {complex128} -reveal_type(i + c16) # E: {complex128} reveal_type(AR_f + c16) # E: Any -reveal_type(c8 + f16) # E: {complex256} -reveal_type(c8 + c16) # E: {complex128} -reveal_type(c8 + f8) # E: {complex128} -reveal_type(c8 + i8) # E: {complex128} +reveal_type(c8 + f16) # E: complexfloating[Union[_32Bit, _128Bit], Union[_32Bit, _128Bit]] +reveal_type(c8 + c16) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(c8 + f8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(c8 + i8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] reveal_type(c8 + c8) # E: {complex64} reveal_type(c8 + f4) # E: {complex64} reveal_type(c8 + i4) # E: {complex64} reveal_type(c8 + b_) # E: {complex64} reveal_type(c8 + b) # E: {complex64} -reveal_type(c8 + c) # E: {complex128} -reveal_type(c8 + f) # E: {complex128} -reveal_type(c8 + i) # E: complexfloating[{_NBitInt}, {_NBitInt}] +reveal_type(c8 + c) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(c8 + f) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] reveal_type(c8 + AR_f) # E: Any -reveal_type(f16 + c8) # E: {complex256} -reveal_type(c16 + c8) # E: {complex128} -reveal_type(f8 + c8) # E: {complex128} -reveal_type(i8 + c8) # E: {complex128} +reveal_type(f16 + c8) # E: complexfloating[Union[_32Bit, _128Bit], Union[_32Bit, _128Bit]] +reveal_type(c16 + c8) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] +reveal_type(f8 + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(i8 + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] reveal_type(c8 + c8) # E: {complex64} reveal_type(f4 + c8) # E: {complex64} reveal_type(i4 + c8) # E: {complex64} reveal_type(b_ + c8) # E: {complex64} reveal_type(b + c8) # E: {complex64} -reveal_type(c + c8) # E: {complex128} -reveal_type(f + c8) # E: {complex128} -reveal_type(i + c8) # E: complexfloating[{_NBitInt}, {_NBitInt}] +reveal_type(c + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(f + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] reveal_type(AR_f + c8) # E: Any # Float -reveal_type(f8 + f16) # E: {float128} +reveal_type(f8 + f16) # E: floating[Union[_64Bit, _128Bit]] reveal_type(f8 + f8) # E: {float64} reveal_type(f8 + i8) # E: {float64} -reveal_type(f8 + f4) # E: {float64} -reveal_type(f8 + i4) # E: {float64} +reveal_type(f8 + f4) # E: floating[Union[_64Bit, _32Bit]] +reveal_type(f8 + i4) # E: floating[Union[_64Bit, _32Bit]] reveal_type(f8 + b_) # E: {float64} reveal_type(f8 + b) # E: {float64} reveal_type(f8 + c) # E: {complex128} reveal_type(f8 + f) # E: {float64} -reveal_type(f8 + i) # E: {float64} reveal_type(f8 + AR_f) # E: Any -reveal_type(f16 + f8) # E: {float128} +reveal_type(f16 + f8) # E: floating[Union[_128Bit, _64Bit]] reveal_type(f8 + f8) # E: {float64} reveal_type(i8 + f8) # E: {float64} -reveal_type(f4 + f8) # E: {float64} -reveal_type(i4 + f8) # E: {float64} +reveal_type(f4 + f8) # E: floating[Union[_32Bit, _64Bit]] +reveal_type(i4 + f8) # E: floating[Union[_64Bit, _32Bit]] reveal_type(b_ + f8) # E: {float64} reveal_type(b + f8) # E: {float64} reveal_type(c + f8) # E: {complex128} reveal_type(f + f8) # E: {float64} -reveal_type(i + f8) # E: {float64} reveal_type(AR_f + f8) # E: Any -reveal_type(f4 + f16) # E: {float128} -reveal_type(f4 + f8) # E: {float64} -reveal_type(f4 + i8) # E: {float64} +reveal_type(f4 + f16) # E: floating[Union[_32Bit, _128Bit]] +reveal_type(f4 + f8) # E: floating[Union[_32Bit, _64Bit]] +reveal_type(f4 + i8) # E: floating[Union[_32Bit, _64Bit]] reveal_type(f4 + f4) # E: {float32} reveal_type(f4 + i4) # E: {float32} reveal_type(f4 + b_) # E: {float32} reveal_type(f4 + b) # E: {float32} -reveal_type(f4 + c) # E: {complex128} -reveal_type(f4 + f) # E: {float64} -reveal_type(f4 + i) # E: floating[{_NBitInt}] +reveal_type(f4 + c) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(f4 + f) # E: floating[Union[_32Bit, _64Bit]] reveal_type(f4 + AR_f) # E: Any -reveal_type(f16 + f4) # E: {float128} -reveal_type(f8 + f4) # E: {float64} -reveal_type(i8 + f4) # E: {float64} +reveal_type(f16 + f4) # E: floating[Union[_128Bit, _32Bit]] +reveal_type(f8 + f4) # E: floating[Union[_64Bit, _32Bit]] +reveal_type(i8 + f4) # E: floating[Union[_32Bit, _64Bit]] reveal_type(f4 + f4) # E: {float32} reveal_type(i4 + f4) # E: {float32} reveal_type(b_ + f4) # E: {float32} reveal_type(b + f4) # E: {float32} -reveal_type(c + f4) # E: {complex128} -reveal_type(f + f4) # E: {float64} -reveal_type(i + f4) # E: floating[{_NBitInt}] +reveal_type(c + f4) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] +reveal_type(f + f4) # E: floating[Union[_32Bit, _64Bit]] reveal_type(AR_f + f4) # E: Any # Int reveal_type(i8 + i8) # E: {int64} reveal_type(i8 + u8) # E: Any -reveal_type(i8 + i4) # E: {int64} +reveal_type(i8 + i4) # E: signedinteger[Union[_64Bit, _32Bit]] reveal_type(i8 + u4) # E: Any reveal_type(i8 + b_) # E: {int64} reveal_type(i8 + b) # E: {int64} reveal_type(i8 + c) # E: {complex128} reveal_type(i8 + f) # E: {float64} -reveal_type(i8 + i) # E: {int64} reveal_type(i8 + AR_f) # E: Any reveal_type(u8 + u8) # E: {uint64} reveal_type(u8 + i4) # E: Any -reveal_type(u8 + u4) # E: {uint64} +reveal_type(u8 + u4) # E: signedinteger[Union[_64Bit, _32Bit]] reveal_type(u8 + b_) # E: {uint64} reveal_type(u8 + b) # E: {uint64} reveal_type(u8 + c) # E: {complex128} reveal_type(u8 + f) # E: {float64} -reveal_type(u8 + i) # E: Any reveal_type(u8 + AR_f) # E: Any reveal_type(i8 + i8) # E: {int64} reveal_type(u8 + i8) # E: Any -reveal_type(i4 + i8) # E: {int64} +reveal_type(i4 + i8) # E: signedinteger[Union[_32Bit, _64Bit]] reveal_type(u4 + i8) # E: Any reveal_type(b_ + i8) # E: {int64} reveal_type(b + i8) # E: {int64} reveal_type(c + i8) # E: {complex128} reveal_type(f + i8) # E: {float64} -reveal_type(i + i8) # E: {int64} reveal_type(AR_f + i8) # E: Any reveal_type(u8 + u8) # E: {uint64} reveal_type(i4 + u8) # E: Any -reveal_type(u4 + u8) # E: {uint64} +reveal_type(u4 + u8) # E: unsignedinteger[Union[_32Bit, _64Bit]] reveal_type(b_ + u8) # E: {uint64} reveal_type(b + u8) # E: {uint64} reveal_type(c + u8) # E: {complex128} reveal_type(f + u8) # E: {float64} -reveal_type(i + u8) # E: Any reveal_type(AR_f + u8) # E: Any -reveal_type(i4 + i8) # E: {int64} +reveal_type(i4 + i8) # E: signedinteger[Union[_32Bit, _64Bit]] reveal_type(i4 + i4) # E: {int32} -reveal_type(i4 + i) # E: {int_} reveal_type(i4 + b_) # E: {int32} reveal_type(i4 + b) # E: {int32} reveal_type(i4 + AR_f) # E: Any reveal_type(u4 + i8) # E: Any reveal_type(u4 + i4) # E: Any -reveal_type(u4 + u8) # E: {uint64} +reveal_type(u4 + u8) # E: unsignedinteger[Union[_32Bit, _64Bit]] reveal_type(u4 + u4) # E: {uint32} -reveal_type(u4 + i) # E: Any reveal_type(u4 + b_) # E: {uint32} reveal_type(u4 + b) # E: {uint32} reveal_type(u4 + AR_f) # E: Any -reveal_type(i8 + i4) # E: {int64} +reveal_type(i8 + i4) # E: signedinteger[Union[_64Bit, _32Bit]] reveal_type(i4 + i4) # E: {int32} -reveal_type(i + i4) # E: {int_} reveal_type(b_ + i4) # E: {int32} reveal_type(b + i4) # E: {int32} reveal_type(AR_f + i4) # E: Any reveal_type(i8 + u4) # E: Any reveal_type(i4 + u4) # E: Any -reveal_type(u8 + u4) # E: {uint64} +reveal_type(u8 + u4) # E: unsignedinteger[Union[_64Bit, _32Bit]] reveal_type(u4 + u4) # E: {uint32} reveal_type(b_ + u4) # E: {uint32} reveal_type(b + u4) # E: {uint32} -reveal_type(i + u4) # E: Any reveal_type(AR_f + u4) # E: Any diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index f293ef65b58b..3273f8226776 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -33,17 +33,11 @@ reveal_type(i4 | i4) # E: {int32} reveal_type(i4 ^ i4) # E: {int32} reveal_type(i4 & i4) # E: {int32} -reveal_type(i8 << i4) # E: {int64} -reveal_type(i8 >> i4) # E: {int64} -reveal_type(i8 | i4) # E: {int64} -reveal_type(i8 ^ i4) # E: {int64} -reveal_type(i8 & i4) # E: {int64} - -reveal_type(i8 << i) # E: {int64} -reveal_type(i8 >> i) # E: {int64} -reveal_type(i8 | i) # E: {int64} -reveal_type(i8 ^ i) # E: {int64} -reveal_type(i8 & i) # E: {int64} +reveal_type(i8 << i4) # E: signedinteger[Union[_64Bit, _32Bit]] +reveal_type(i8 >> i4) # E: signedinteger[Union[_64Bit, _32Bit]] +reveal_type(i8 | i4) # E: signedinteger[Union[_64Bit, _32Bit]] +reveal_type(i8 ^ i4) # E: signedinteger[Union[_64Bit, _32Bit]] +reveal_type(i8 & i4) # E: signedinteger[Union[_64Bit, _32Bit]] reveal_type(i8 << b_) # E: {int64} reveal_type(i8 >> b_) # E: {int64} diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index b2790b7f3973..d22f2df12af1 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -70,45 +70,41 @@ reveal_type(divmod(AR_b, b_)) # E: ndarray[Any, dtype[{int8}]], ndarray[Any, dt # int reveal_type(i8 % b) # E: {int64} -reveal_type(i8 % i) # E: {int64} reveal_type(i8 % f) # E: {float64} reveal_type(i8 % i8) # E: {int64} reveal_type(i8 % f8) # E: {float64} -reveal_type(i4 % i8) # E: {int64} -reveal_type(i4 % f8) # E: {float64} +reveal_type(i4 % i8) # E: signedinteger[Union[_32Bit, _64Bit]] +reveal_type(i4 % f8) # E: floating[Union[_64Bit, _32Bit]] reveal_type(i4 % i4) # E: {int32} reveal_type(i4 % f4) # E: {float32} reveal_type(i8 % AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]] reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(i8, i)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(i8, f)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(i8, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(i8, i4)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(i8, f4)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i8, i4)) # E: Tuple[signedinteger[Union[_64Bit, _32Bit]], signedinteger[Union[_64Bit, _32Bit]]] +reveal_type(divmod(i8, f4)) # E: Tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(i8, AR_b)) # E: Tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] reveal_type(b % i8) # E: {int64} -reveal_type(i % i8) # E: {int64} reveal_type(f % i8) # E: {float64} reveal_type(i8 % i8) # E: {int64} reveal_type(f8 % i8) # E: {float64} -reveal_type(i8 % i4) # E: {int64} -reveal_type(f8 % i4) # E: {float64} +reveal_type(i8 % i4) # E: signedinteger[Union[_64Bit, _32Bit]] +reveal_type(f8 % i4) # E: floating[Union[_64Bit, _32Bit]] reveal_type(i4 % i4) # E: {int32} reveal_type(f4 % i4) # E: {float32} reveal_type(AR_b % i8) # E: ndarray[Any, dtype[signedinteger[Any]]] reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(i, i8)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(f, i8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] reveal_type(divmod(f8, i8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(i4, i8)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(f4, i8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i4, i8)) # E: Tuple[signedinteger[Union[_32Bit, _64Bit]], signedinteger[Union[_32Bit, _64Bit]]] +reveal_type(divmod(f4, i8)) # E: Tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(AR_b, i8)) # E: Tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] @@ -116,22 +112,19 @@ reveal_type(divmod(AR_b, i8)) # E: Tuple[ndarray[Any, dtype[signedinteger[Any]] # float reveal_type(f8 % b) # E: {float64} -reveal_type(f8 % i) # E: {float64} reveal_type(f8 % f) # E: {float64} -reveal_type(i8 % f4) # E: {float64} +reveal_type(i8 % f4) # E: floating[Union[_32Bit, _64Bit]] reveal_type(f4 % f4) # E: {float32} reveal_type(f8 % AR_b) # E: ndarray[Any, dtype[floating[Any]]] reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f8, i)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f8, f)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f8, f4)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f4)) # E: Tuple[floating[Union[_64Bit, _32Bit]], floating[Union[_64Bit, _32Bit]]] reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(f8, AR_b)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] reveal_type(b % f8) # E: {float64} -reveal_type(i % f8) # E: {float64} reveal_type(f % f8) # E: {float64} reveal_type(f8 % f8) # E: {float64} reveal_type(f8 % f8) # E: {float64} @@ -139,9 +132,8 @@ reveal_type(f4 % f4) # E: {float32} reveal_type(AR_b % f8) # E: ndarray[Any, dtype[floating[Any]]] reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(i, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f, f8)) # E: Tuple[{float64}, {float64}] reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f4, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f4, f8)) # E: Tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] reveal_type(divmod(AR_b, f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index a7cc681947ea..f8a30689dc8b 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -16,6 +16,6 @@ f8: np.float64 f4: np.float32 reveal_type(add(f8, i8)) # E: {float64} -reveal_type(add(f4, i8)) # E: {float64} -reveal_type(add(f8, i4)) # E: {float64} +reveal_type(add(f4, i8)) # E: floating[Union[_32Bit, _64Bit]] +reveal_type(add(f8, i4)) # E: floating[Union[_64Bit, _32Bit]] reveal_type(add(f4, i4)) # E: {float32} diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index ddd319a94adf..af3d1dd41c1d 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -67,7 +67,7 @@ reveal_type(np.typename("S1")) # E: Literal['character'] reveal_type(np.common_type(AR_i4)) # E: Type[{float64}] reveal_type(np.common_type(AR_f2)) # E: Type[{float16}] -reveal_type(np.common_type(AR_f2, AR_i4)) # E: Type[{float64}] -reveal_type(np.common_type(AR_f16, AR_i4)) # E: Type[{float128}] -reveal_type(np.common_type(AR_c8, AR_f2)) # E: Type[{complex64}] -reveal_type(np.common_type(AR_f2, AR_c8, AR_i4)) # E: Type[{complex128}] +reveal_type(np.common_type(AR_f2, AR_i4)) # E: Type[floating[Union[_16Bit, _64Bit]]] +reveal_type(np.common_type(AR_f16, AR_i4)) # E: Type[floating[Union[_128Bit, _64Bit]]] +reveal_type(np.common_type(AR_c8, AR_f2)) # E: Type[complexfloating[Union[_16Bit, _32Bit], Union[_16Bit, _32Bit]]] +reveal_type(np.common_type(AR_f2, AR_c8, AR_i4)) # E: Type[complexfloating[Union[_64Bit, _16Bit, _32Bit], Union[_64Bit, _16Bit, _32Bit]]] diff --git a/test_requirements.txt b/test_requirements.txt index 91237409e0ac..b0ef738eb2ec 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -13,7 +13,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==0.981; platform_python_implementation != "PyPy" +mypy==1.3.0; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer From 5428852cd78680e47f194cd2d8d562144d3b9d3b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 15 May 2023 13:58:50 +0200 Subject: [PATCH 050/120] TYP: Improve function-based annotations with `typing.Concatenate` --- numpy/lib/shape_base.pyi | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi index 1b718da221e0..7cd9608b42fc 100644 --- a/numpy/lib/shape_base.pyi +++ b/numpy/lib/shape_base.pyi @@ -1,6 +1,12 @@ +import sys from collections.abc import Callable, Sequence from typing import TypeVar, Any, overload, SupportsIndex, Protocol +if sys.version_info >= (3, 10): + from typing import ParamSpec, Concatenate +else: + from typing_extensions import ParamSpec, Concatenate + from numpy import ( generic, integer, @@ -28,6 +34,7 @@ from numpy._typing import ( from numpy.core.shape_base import vstack +_P = ParamSpec("_P") _SCT = TypeVar("_SCT", bound=generic) # The signatures of `__array_wrap__` and `__array_prepare__` are the same; @@ -73,23 +80,21 @@ def put_along_axis( axis: None | int, ) -> None: ... -# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate` -# xref python/mypy#8645 @overload def apply_along_axis( - func1d: Callable[..., _ArrayLike[_SCT]], + func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]], axis: SupportsIndex, arr: ArrayLike, - *args: Any, - **kwargs: Any, + *args: _P.args, + **kwargs: _P.kwargs, ) -> NDArray[_SCT]: ... @overload def apply_along_axis( - func1d: Callable[..., ArrayLike], + func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike], axis: SupportsIndex, arr: ArrayLike, - *args: Any, - **kwargs: Any, + *args: _P.args, + **kwargs: _P.kwargs, ) -> NDArray[Any]: ... def apply_over_axes( From 95f0a43538736049f05ed4d0dfb7c02d2a608b4c Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 17 May 2023 17:00:53 +0200 Subject: [PATCH 051/120] CI: Avoid overriding `MYPYPATH` This practice is actively discouraged by mypy --- runtests.py | 5 ++--- tools/travis-test.sh | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/runtests.py b/runtests.py index 8b026bec7b89..fa78f9e9c390 100755 --- a/runtests.py +++ b/runtests.py @@ -194,8 +194,8 @@ def main(argv): sys.path.insert(0, site_dir_noarch) os.environ['PYTHONPATH'] = \ os.pathsep.join(( - site_dir, - site_dir_noarch, + site_dir, + site_dir_noarch, os.environ.get('PYTHONPATH', '') )) else: @@ -256,7 +256,6 @@ def main(argv): "pip install -r test_requirements.txt from the repo root" ) - os.environ['MYPYPATH'] = site_dir # By default mypy won't color the output since it isn't being # invoked from a tty. os.environ['MYPY_FORCE_COLOR'] = '1' diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 9664b83c3c73..90e1e14537cb 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -93,7 +93,6 @@ run_test() if [ -n "$USE_DEBUG" ]; then export PYTHONPATH=$PWD - export MYPYPATH=$PWD fi if [ -n "$RUN_COVERAGE" ]; then From 800edbb2429a9a843049ae21dfec2823aa705902 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 23 Aug 2023 15:27:49 +0200 Subject: [PATCH 052/120] TYP: Bump mypy to 1.5.1 --- environment.yml | 2 +- test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 7c92abbb0e9c..0b25b888c0c1 100644 --- a/environment.yml +++ b/environment.yml @@ -24,7 +24,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=1.3.0 + - mypy=1.5.1 # For building docs - sphinx>=4.5.0 - sphinx-design diff --git a/test_requirements.txt b/test_requirements.txt index b0ef738eb2ec..ff1ed284e37d 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -13,7 +13,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.3.0; platform_python_implementation != "PyPy" +mypy==1.5.1; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer From bfa0681abde7b69b5ff96155f81cd8702be3a561 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 23 Aug 2023 19:24:56 +0200 Subject: [PATCH 053/120] TYP: Lower the mypy pin to 1.4.1 --- environment.yml | 2 +- test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 0b25b888c0c1..e23f07ba9334 100644 --- a/environment.yml +++ b/environment.yml @@ -24,7 +24,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=1.5.1 + - mypy=1.4.1 # For building docs - sphinx>=4.5.0 - sphinx-design diff --git a/test_requirements.txt b/test_requirements.txt index ff1ed284e37d..5d52d9843432 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -13,7 +13,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.5.1; platform_python_implementation != "PyPy" +mypy==1.4.1; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer From 864e3b7ac16102fa4f78636ac1d41e4896ca3938 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Wed, 23 Aug 2023 19:35:06 +0200 Subject: [PATCH 054/120] TYP,TST: Adapt the typing test suite to mypy >=1.4 changes --- .../tests/data/reveal/array_constructors.pyi | 8 +- .../typing/tests/data/reveal/arraysetops.pyi | 30 +++--- numpy/typing/tests/data/reveal/dtype.pyi | 2 +- numpy/typing/tests/data/reveal/einsumfunc.pyi | 16 ++-- numpy/typing/tests/data/reveal/histograms.pyi | 16 ++-- .../typing/tests/data/reveal/index_tricks.pyi | 16 ++-- .../tests/data/reveal/lib_function_base.pyi | 8 +- .../tests/data/reveal/lib_polynomial.pyi | 20 ++-- numpy/typing/tests/data/reveal/lib_utils.pyi | 4 +- numpy/typing/tests/data/reveal/linalg.pyi | 6 +- numpy/typing/tests/data/reveal/mod.pyi | 94 +++++++++---------- numpy/typing/tests/data/reveal/multiarray.pyi | 6 +- .../typing/tests/data/reveal/ndarray_misc.pyi | 2 +- .../typing/tests/data/reveal/numerictypes.pyi | 2 +- numpy/typing/tests/data/reveal/random.pyi | 2 +- numpy/typing/tests/data/reveal/scalars.pyi | 10 +- .../typing/tests/data/reveal/twodim_base.pyi | 18 ++-- numpy/typing/tests/data/reveal/ufuncs.pyi | 8 +- 18 files changed, 134 insertions(+), 134 deletions(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 759d521c8d2a..61d3705b1fe2 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -126,10 +126,10 @@ reveal_type(np.linspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] reveal_type(np.linspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] reveal_type(np.linspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] reveal_type(np.linspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]] -reveal_type(np.linspace(0j, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]] -reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: Tuple[ndarray[Any, dtype[{int64}]], {int64}] -reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: Tuple[ndarray[Any, dtype[Any]], Any] +reveal_type(np.linspace(0, 10, retstep=True)) # E: tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]] +reveal_type(np.linspace(0j, 10, retstep=True)) # E: tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]] +reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: tuple[ndarray[Any, dtype[{int64}]], {int64}] +reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: tuple[ndarray[Any, dtype[Any]], Any] reveal_type(np.logspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] reveal_type(np.logspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 9deff8a8ea29..68d1c068003f 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -18,7 +18,7 @@ reveal_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5])) # E: ndarray[Any, dtype[ reveal_type(np.intersect1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]] reveal_type(np.intersect1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]] reveal_type(np.intersect1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] reveal_type(np.setxor1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]] reveal_type(np.setxor1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]] @@ -44,17 +44,17 @@ reveal_type(np.setdiff1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]] reveal_type(np.unique(AR_f8)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.unique(AR_LIKE_f8, axis=0)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.unique(AR_f8, return_index=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_index=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_index=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_inverse=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_inverse=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_index=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index ce6b803d6830..477877a71a1c 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -49,7 +49,7 @@ reveal_type(np.dtype(("U", 10))) # E: dtype[void] # Methods and attributes reveal_type(dtype_U.base) # E: dtype[Any] -reveal_type(dtype_U.subdtype) # E: Union[None, Tuple[dtype[Any], builtins.tuple[builtins.int, ...]]] +reveal_type(dtype_U.subdtype) # E: Union[None, tuple[dtype[Any], builtins.tuple[builtins.int, ...]]] reveal_type(dtype_U.newbyteorder()) # E: dtype[str_] reveal_type(dtype_U.type) # E: Type[str_] reveal_type(dtype_U.name) # E: str diff --git a/numpy/typing/tests/data/reveal/einsumfunc.pyi b/numpy/typing/tests/data/reveal/einsumfunc.pyi index 5f6415f275c3..e19ed233c569 100644 --- a/numpy/typing/tests/data/reveal/einsumfunc.pyi +++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -26,13 +26,13 @@ reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsaf reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")) # E: Any reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")) # E: Any -reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: tuple[builtins.list[Any], builtins.str] reveal_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i)) # E: Any -reveal_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i)) # E: tuple[builtins.list[Any], builtins.str] diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index d96e44f096fd..69ffd26a3361 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -8,12 +8,12 @@ reveal_type(np.histogram_bin_edges(AR_i8, bins="auto")) # E: ndarray[Any, dtype reveal_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3))) # E: ndarray[Any, dtype[Any]] reveal_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.histogram(AR_i8, bins="auto")) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] -reveal_type(np.histogram(AR_i8, bins="rice", range=(0, 3))) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] -reveal_type(np.histogram(AR_i8, bins="scott", weights=AR_f8)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] -reveal_type(np.histogram(AR_f8, bins=1, density=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +reveal_type(np.histogram(AR_i8, bins="auto")) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +reveal_type(np.histogram(AR_i8, bins="rice", range=(0, 3))) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +reveal_type(np.histogram(AR_i8, bins="scott", weights=AR_f8)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +reveal_type(np.histogram(AR_f8, bins=1, density=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] -reveal_type(np.histogramdd(AR_i8, bins=[1])) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] -reveal_type(np.histogramdd(AR_i8, range=[(0, 3)])) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] -reveal_type(np.histogramdd(AR_i8, weights=AR_f8)) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] -reveal_type(np.histogramdd(AR_f8, density=True)) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] +reveal_type(np.histogramdd(AR_i8, bins=[1])) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] +reveal_type(np.histogramdd(AR_i8, range=[(0, 3)])) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] +reveal_type(np.histogramdd(AR_i8, weights=AR_f8)) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] +reveal_type(np.histogramdd(AR_f8, density=True)) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 707d6f3d42f9..7165189592ca 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -16,9 +16,9 @@ reveal_type(np.ndenumerate(AR_i8).iter) # E: flatiter[ndarray[Any, dtype[{int64 reveal_type(np.ndenumerate(AR_LIKE_f).iter) # E: flatiter[ndarray[Any, dtype[{double}]]] reveal_type(np.ndenumerate(AR_LIKE_U).iter) # E: flatiter[ndarray[Any, dtype[str_]]] -reveal_type(next(np.ndenumerate(AR_i8))) # E: Tuple[builtins.tuple[builtins.int, ...], {int64}] -reveal_type(next(np.ndenumerate(AR_LIKE_f))) # E: Tuple[builtins.tuple[builtins.int, ...], {double}] -reveal_type(next(np.ndenumerate(AR_LIKE_U))) # E: Tuple[builtins.tuple[builtins.int, ...], str_] +reveal_type(next(np.ndenumerate(AR_i8))) # E: tuple[builtins.tuple[builtins.int, ...], {int64}] +reveal_type(next(np.ndenumerate(AR_LIKE_f))) # E: tuple[builtins.tuple[builtins.int, ...], {double}] +reveal_type(next(np.ndenumerate(AR_LIKE_U))) # E: tuple[builtins.tuple[builtins.int, ...], str_] reveal_type(iter(np.ndenumerate(AR_i8))) # E: ndenumerate[{int64}] reveal_type(iter(np.ndenumerate(AR_LIKE_f))) # E: ndenumerate[{double}] @@ -46,13 +46,13 @@ reveal_type(np.mgrid[1:1:2, None:10]) # E: ndarray[Any, dtype[Any]] reveal_type(np.ogrid[1:1:2]) # E: list[ndarray[Any, dtype[Any]]] reveal_type(np.ogrid[1:1:2, None:10]) # E: list[ndarray[Any, dtype[Any]]] -reveal_type(np.index_exp[0:1]) # E: Tuple[builtins.slice] -reveal_type(np.index_exp[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] -reveal_type(np.index_exp[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] +reveal_type(np.index_exp[0:1]) # E: tuple[builtins.slice] +reveal_type(np.index_exp[0:1, None:3]) # E: tuple[builtins.slice, builtins.slice] +reveal_type(np.index_exp[0, 0:1, ..., [0, 1, 3]]) # E: tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] reveal_type(np.s_[0:1]) # E: builtins.slice -reveal_type(np.s_[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] -reveal_type(np.s_[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] +reveal_type(np.s_[0:1, None:3]) # E: tuple[builtins.slice, builtins.slice] +reveal_type(np.s_[0, 0:1, ..., [0, 1, 3]]) # E: tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] reveal_type(np.ix_(AR_LIKE_b)) # E: tuple[ndarray[Any, dtype[bool_]], ...] reveal_type(np.ix_(AR_LIKE_i, AR_LIKE_f)) # E: tuple[ndarray[Any, dtype[{double}]], ...] diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index a8b9b01ac934..bf285819e364 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -50,11 +50,11 @@ reveal_type(np.iterable([1])) # E: bool reveal_type(np.average(AR_f8)) # E: floating[Any] reveal_type(np.average(AR_f8, weights=AR_c16)) # E: complexfloating[Any, Any] reveal_type(np.average(AR_O)) # E: Any -reveal_type(np.average(AR_f8, returned=True)) # E: Tuple[floating[Any], floating[Any]] -reveal_type(np.average(AR_f8, weights=AR_c16, returned=True)) # E: Tuple[complexfloating[Any, Any], complexfloating[Any, Any]] -reveal_type(np.average(AR_O, returned=True)) # E: Tuple[Any, Any] +reveal_type(np.average(AR_f8, returned=True)) # E: tuple[floating[Any], floating[Any]] +reveal_type(np.average(AR_f8, weights=AR_c16, returned=True)) # E: tuple[complexfloating[Any, Any], complexfloating[Any, Any]] +reveal_type(np.average(AR_O, returned=True)) # E: tuple[Any, Any] reveal_type(np.average(AR_f8, axis=0)) # E: Any -reveal_type(np.average(AR_f8, axis=0, returned=True)) # E: Tuple[Any, Any] +reveal_type(np.average(AR_f8, axis=0, returned=True)) # E: tuple[Any, Any] reveal_type(np.asarray_chkfinite(AR_f8)) # E: ndarray[Any, dtype[{float64}]] reveal_type(np.asarray_chkfinite(AR_LIKE_f8)) # E: ndarray[Any, dtype[Any]] diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index de8950724eb9..4d70c267865f 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -61,11 +61,11 @@ reveal_type(np.polyder(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, An reveal_type(np.polyder(AR_O, m=2)) # E: ndarray[Any, dtype[object_]] reveal_type(np.polyfit(AR_f8, AR_f8, 2)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.polyfit(AR_f8, AR_i8, 1, full=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] -reveal_type(np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled")) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] +reveal_type(np.polyfit(AR_f8, AR_i8, 1, full=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] +reveal_type(np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled")) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] reveal_type(np.polyfit(AR_c16, AR_f8, 2)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.polyfit(AR_f8, AR_c16, 1, full=True)) # E: Tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] -reveal_type(np.polyfit(AR_u4, AR_c16, 1.0, cov=True)) # E: Tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{complex128}]]] +reveal_type(np.polyfit(AR_f8, AR_c16, 1, full=True)) # E: tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] +reveal_type(np.polyfit(AR_u4, AR_c16, 1.0, cov=True)) # E: tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{complex128}]]] reveal_type(np.polyval(AR_b, AR_b)) # E: ndarray[Any, dtype[{int64}]] reveal_type(np.polyval(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] @@ -103,9 +103,9 @@ reveal_type(np.polymul(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] reveal_type(np.polydiv(poly_obj, AR_i8)) # E: poly1d reveal_type(np.polydiv(AR_f8, poly_obj)) # E: poly1d -reveal_type(np.polydiv(AR_b, AR_b)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_u4, AR_b)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_i8, AR_i8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_f8, AR_i8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_i8, AR_c16)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]] -reveal_type(np.polydiv(AR_O, AR_O)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +reveal_type(np.polydiv(AR_b, AR_b)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.polydiv(AR_u4, AR_b)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.polydiv(AR_i8, AR_i8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.polydiv(AR_f8, AR_i8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.polydiv(AR_i8, AR_c16)) # E: tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]] +reveal_type(np.polydiv(AR_O, AR_O)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] diff --git a/numpy/typing/tests/data/reveal/lib_utils.pyi b/numpy/typing/tests/data/reveal/lib_utils.pyi index 9b1bf4123da7..3214043ee438 100644 --- a/numpy/typing/tests/data/reveal/lib_utils.pyi +++ b/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -15,8 +15,8 @@ reveal_type(np.deprecate()) # E: _Deprecate reveal_type(np.deprecate_with_doc("test")) # E: _Deprecate reveal_type(np.deprecate_with_doc(None)) # E: _Deprecate -reveal_type(np.byte_bounds(AR)) # E: Tuple[builtins.int, builtins.int] -reveal_type(np.byte_bounds(np.float64())) # E: Tuple[builtins.int, builtins.int] +reveal_type(np.byte_bounds(AR)) # E: tuple[builtins.int, builtins.int] +reveal_type(np.byte_bounds(np.float64())) # E: tuple[builtins.int, builtins.int] reveal_type(np.who(None)) # E: None reveal_type(np.who(AR_DICT)) # E: None diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 130351864317..e264d176eb01 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -80,9 +80,9 @@ reveal_type(np.linalg.det(AR_i8)) # E: Any reveal_type(np.linalg.det(AR_f8)) # E: Any reveal_type(np.linalg.det(AR_c16)) # E: Any -reveal_type(np.linalg.lstsq(AR_i8, AR_i8)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], {int32}, ndarray[Any, dtype[{float64}]]] -reveal_type(np.linalg.lstsq(AR_i8, AR_f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.linalg.lstsq(AR_f8, AR_c16)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.linalg.lstsq(AR_i8, AR_i8)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], {int32}, ndarray[Any, dtype[{float64}]]] +reveal_type(np.linalg.lstsq(AR_i8, AR_f8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.linalg.lstsq(AR_f8, AR_c16)) # E: tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]] reveal_type(np.linalg.norm(AR_i8)) # E: floating[Any] reveal_type(np.linalg.norm(AR_f8)) # E: floating[Any] diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index d22f2df12af1..55cb6546f8e0 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -25,9 +25,9 @@ reveal_type(td % td) # E: timedelta64 reveal_type(AR_m % td) # E: Any reveal_type(td % AR_m) # E: Any -reveal_type(divmod(td, td)) # E: Tuple[{int64}, timedelta64] -reveal_type(divmod(AR_m, td)) # E: Tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]] -reveal_type(divmod(td, AR_m)) # E: Tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]] +reveal_type(divmod(td, td)) # E: tuple[{int64}, timedelta64] +reveal_type(divmod(AR_m, td)) # E: tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]] +reveal_type(divmod(td, AR_m)) # E: tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]] # Bool @@ -40,13 +40,13 @@ reveal_type(b_ % u8) # E: {uint64} reveal_type(b_ % f8) # E: {float64} reveal_type(b_ % AR_b) # E: ndarray[Any, dtype[{int8}]] -reveal_type(divmod(b_, b)) # E: Tuple[{int8}, {int8}] -reveal_type(divmod(b_, i)) # E: Tuple[{int_}, {int_}] -reveal_type(divmod(b_, f)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}] -reveal_type(divmod(b_, i8)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(b_, u8)) # E: Tuple[{uint64}, {uint64}] -reveal_type(divmod(b_, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(b_, b)) # E: tuple[{int8}, {int8}] +reveal_type(divmod(b_, i)) # E: tuple[{int_}, {int_}] +reveal_type(divmod(b_, f)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(b_, b_)) # E: tuple[{int8}, {int8}] +reveal_type(divmod(b_, i8)) # E: tuple[{int64}, {int64}] +reveal_type(divmod(b_, u8)) # E: tuple[{uint64}, {uint64}] +reveal_type(divmod(b_, f8)) # E: tuple[{float64}, {float64}] reveal_type(divmod(b_, AR_b)) # E: ndarray[Any, dtype[{int8}]], ndarray[Any, dtype[{int8}]]] reveal_type(b % b_) # E: {int8} @@ -58,13 +58,13 @@ reveal_type(u8 % b_) # E: {uint64} reveal_type(f8 % b_) # E: {float64} reveal_type(AR_b % b_) # E: ndarray[Any, dtype[{int8}]] -reveal_type(divmod(b, b_)) # E: Tuple[{int8}, {int8}] -reveal_type(divmod(i, b_)) # E: Tuple[{int_}, {int_}] -reveal_type(divmod(f, b_)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}] -reveal_type(divmod(i8, b_)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(u8, b_)) # E: Tuple[{uint64}, {uint64}] -reveal_type(divmod(f8, b_)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(b, b_)) # E: tuple[{int8}, {int8}] +reveal_type(divmod(i, b_)) # E: tuple[{int_}, {int_}] +reveal_type(divmod(f, b_)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(b_, b_)) # E: tuple[{int8}, {int8}] +reveal_type(divmod(i8, b_)) # E: tuple[{int64}, {int64}] +reveal_type(divmod(u8, b_)) # E: tuple[{uint64}, {uint64}] +reveal_type(divmod(f8, b_)) # E: tuple[{float64}, {float64}] reveal_type(divmod(AR_b, b_)) # E: ndarray[Any, dtype[{int8}]], ndarray[Any, dtype[{int8}]]] # int @@ -79,15 +79,15 @@ reveal_type(i4 % i4) # E: {int32} reveal_type(i4 % f4) # E: {float32} reveal_type(i8 % AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(i8, f)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(i8, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(i8, i4)) # E: Tuple[signedinteger[Union[_64Bit, _32Bit]], signedinteger[Union[_64Bit, _32Bit]]] -reveal_type(divmod(i8, f4)) # E: Tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] -reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] -reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(i8, AR_b)) # E: Tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] +reveal_type(divmod(i8, b)) # E: tuple[{int64}, {int64}] +reveal_type(divmod(i8, f)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(i8, i8)) # E: tuple[{int64}, {int64}] +reveal_type(divmod(i8, f8)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(i8, i4)) # E: tuple[signedinteger[Union[_64Bit, _32Bit]], signedinteger[Union[_64Bit, _32Bit]]] +reveal_type(divmod(i8, f4)) # E: tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] +reveal_type(divmod(i4, i4)) # E: tuple[{int32}, {int32}] +reveal_type(divmod(i4, f4)) # E: tuple[{float32}, {float32}] +reveal_type(divmod(i8, AR_b)) # E: tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] reveal_type(b % i8) # E: {int64} reveal_type(f % i8) # E: {float64} @@ -99,15 +99,15 @@ reveal_type(i4 % i4) # E: {int32} reveal_type(f4 % i4) # E: {float32} reveal_type(AR_b % i8) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(f, i8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] -reveal_type(divmod(f8, i8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(i4, i8)) # E: Tuple[signedinteger[Union[_32Bit, _64Bit]], signedinteger[Union[_32Bit, _64Bit]]] -reveal_type(divmod(f4, i8)) # E: Tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] -reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] -reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(AR_b, i8)) # E: Tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] +reveal_type(divmod(b, i8)) # E: tuple[{int64}, {int64}] +reveal_type(divmod(f, i8)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(i8, i8)) # E: tuple[{int64}, {int64}] +reveal_type(divmod(f8, i8)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(i4, i8)) # E: tuple[signedinteger[Union[_32Bit, _64Bit]], signedinteger[Union[_32Bit, _64Bit]]] +reveal_type(divmod(f4, i8)) # E: tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] +reveal_type(divmod(i4, i4)) # E: tuple[{int32}, {int32}] +reveal_type(divmod(f4, i4)) # E: tuple[{float32}, {float32}] +reveal_type(divmod(AR_b, i8)) # E: tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] # float @@ -117,12 +117,12 @@ reveal_type(i8 % f4) # E: floating[Union[_32Bit, _64Bit]] reveal_type(f4 % f4) # E: {float32} reveal_type(f8 % AR_b) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f8, f)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f8, f4)) # E: Tuple[floating[Union[_64Bit, _32Bit]], floating[Union[_64Bit, _32Bit]]] -reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(f8, AR_b)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(divmod(f8, b)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(f8, f)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(f8, f8)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(f8, f4)) # E: tuple[floating[Union[_64Bit, _32Bit]], floating[Union[_64Bit, _32Bit]]] +reveal_type(divmod(f4, f4)) # E: tuple[{float32}, {float32}] +reveal_type(divmod(f8, AR_b)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] reveal_type(b % f8) # E: {float64} reveal_type(f % f8) # E: {float64} @@ -131,9 +131,9 @@ reveal_type(f8 % f8) # E: {float64} reveal_type(f4 % f4) # E: {float32} reveal_type(AR_b % f8) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] -reveal_type(divmod(f4, f8)) # E: Tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] -reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] -reveal_type(divmod(AR_b, f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(divmod(b, f8)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(f, f8)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(f8, f8)) # E: tuple[{float64}, {float64}] +reveal_type(divmod(f4, f8)) # E: tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] +reveal_type(divmod(f4, f4)) # E: tuple[{float32}, {float32}] +reveal_type(divmod(AR_b, f8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 27a54f50d6e7..d8e0a956baeb 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -107,9 +107,9 @@ reveal_type(np.promote_types("f4", float)) # E: dtype[Any] reveal_type(np.frompyfunc(func, 1, 1, identity=None)) # ufunc -reveal_type(np.datetime_data("m8[D]")) # E: Tuple[builtins.str, builtins.int] -reveal_type(np.datetime_data(np.datetime64)) # E: Tuple[builtins.str, builtins.int] -reveal_type(np.datetime_data(np.dtype(np.timedelta64))) # E: Tuple[builtins.str, builtins.int] +reveal_type(np.datetime_data("m8[D]")) # E: tuple[builtins.str, builtins.int] +reveal_type(np.datetime_data(np.datetime64)) # E: tuple[builtins.str, builtins.int] +reveal_type(np.datetime_data(np.dtype(np.timedelta64))) # E: tuple[builtins.str, builtins.int] reveal_type(np.busday_count("2011-01", "2011-02")) # E: {int_} reveal_type(np.busday_count(["2011-01"], "2011-02")) # E: ndarray[Any, dtype[{int_}]] diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 03fea72dc2bd..4da87b662179 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -25,7 +25,7 @@ AR_V: NDArray[np.void] ctypes_obj = AR_f8.ctypes reveal_type(AR_f8.__dlpack__()) # E: Any -reveal_type(AR_f8.__dlpack_device__()) # E: Tuple[int, Literal[0]] +reveal_type(AR_f8.__dlpack_device__()) # E: tuple[int, Literal[0]] reveal_type(ctypes_obj.data) # E: int reveal_type(ctypes_obj.shape) # E: ctypes.Array[{c_intp}] diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index d4399e2b121a..95ec76e8ec7c 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -29,7 +29,7 @@ reveal_type(np.nbytes[int]) # E: int reveal_type(np.nbytes["i8"]) # E: int reveal_type(np.nbytes[np.int64]) # E: int -reveal_type(np.ScalarType) # E: Tuple +reveal_type(np.ScalarType) # E: tuple reveal_type(np.ScalarType[0]) # E: Type[builtins.int] reveal_type(np.ScalarType[3]) # E: Type[builtins.bool] reveal_type(np.ScalarType[8]) # E: Type[{csingle}] diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 67a5d3e7aad8..ac287feb2c3c 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -1521,7 +1521,7 @@ reveal_type(random_st.seed([0, 1])) # E: None random_st_get_state = random_st.get_state() reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] random_st_get_state_legacy = random_st.get_state(legacy=True) -reveal_type(random_st_get_state_legacy) # E: Union[builtins.dict[builtins.str, Any], Tuple[builtins.str, ndarray[Any, dtype[unsignedinteger[typing._32Bit]]], builtins.int, builtins.int, builtins.float]] +reveal_type(random_st_get_state_legacy) # E: Union[builtins.dict[builtins.str, Any], tuple[builtins.str, ndarray[Any, dtype[unsignedinteger[typing._32Bit]]], builtins.int, builtins.int, builtins.float]] reveal_type(random_st.set_state(random_st_get_state)) # E: None reveal_type(random_st.rand()) # E: float diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 965aa5ace449..88404d9d0215 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -18,8 +18,8 @@ reveal_type(c8.real.real) # E: {float32} reveal_type(c8.real.imag) # E: {float32} reveal_type(c8.itemsize) # E: int -reveal_type(c8.shape) # E: Tuple[] -reveal_type(c8.strides) # E: Tuple[] +reveal_type(c8.shape) # E: tuple[] +reveal_type(c8.strides) # E: tuple[] reveal_type(c8.ndim) # E: Literal[0] reveal_type(c8.size) # E: Literal[1] @@ -126,15 +126,15 @@ reveal_type(i8.getfield(float)) # E: Any reveal_type(i8.getfield(np.float64)) # E: {float64} reveal_type(i8.getfield(np.float64, 8)) # E: {float64} -reveal_type(f8.as_integer_ratio()) # E: Tuple[builtins.int, builtins.int] +reveal_type(f8.as_integer_ratio()) # E: tuple[builtins.int, builtins.int] reveal_type(f8.is_integer()) # E: bool reveal_type(f8.__trunc__()) # E: int reveal_type(f8.__getformat__("float")) # E: str reveal_type(f8.hex()) # E: str reveal_type(np.float64.fromhex("0x0.0p+0")) # E: {float64} -reveal_type(f8.__getnewargs__()) # E: Tuple[builtins.float] -reveal_type(c16.__getnewargs__()) # E: Tuple[builtins.float, builtins.float] +reveal_type(f8.__getnewargs__()) # E: tuple[builtins.float] +reveal_type(c16.__getnewargs__()) # E: tuple[builtins.float, builtins.float] reveal_type(i8.numerator) # E: {int64} reveal_type(i8.denominator) # E: Literal[1] diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 0dc58d43786c..659f00dfa208 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -56,17 +56,17 @@ reveal_type(np.vander(AR_f, increasing=True)) # E: ndarray[Any, dtype[floating[ reveal_type(np.vander(AR_c)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] reveal_type(np.vander(AR_O)) # E: ndarray[Any, dtype[object_]] -reveal_type(np.histogram2d(AR_i, AR_b)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.histogram2d(AR_f, AR_f)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]] +reveal_type(np.histogram2d(AR_i, AR_b)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.histogram2d(AR_f, AR_f)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +reveal_type(np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]] -reveal_type(np.mask_indices(10, func1)) # E: Tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.mask_indices(8, func2, "0")) # E: Tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.mask_indices(10, func1)) # E: tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +reveal_type(np.mask_indices(8, func2, "0")) # E: tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.tril_indices(10)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] +reveal_type(np.tril_indices(10)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] -reveal_type(np.tril_indices_from(AR_b)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] +reveal_type(np.tril_indices_from(AR_b)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] -reveal_type(np.triu_indices(10)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] +reveal_type(np.triu_indices(10)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] -reveal_type(np.triu_indices_from(AR_b)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] +reveal_type(np.triu_indices_from(AR_b)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 3bf83c8207bf..d4d522988b4e 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -43,8 +43,8 @@ reveal_type(np.frexp.nin) # E: Literal[1] reveal_type(np.frexp.nout) # E: Literal[2] reveal_type(np.frexp.nargs) # E: Literal[3] reveal_type(np.frexp.signature) # E: None -reveal_type(np.frexp(f8)) # E: Tuple[Any, Any] -reveal_type(np.frexp(AR_f8)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +reveal_type(np.frexp(f8)) # E: tuple[Any, Any] +reveal_type(np.frexp(AR_f8)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] reveal_type(np.divmod.__name__) # E: Literal['divmod'] reveal_type(np.divmod.ntypes) # E: Literal[15] @@ -53,8 +53,8 @@ reveal_type(np.divmod.nin) # E: Literal[2] reveal_type(np.divmod.nout) # E: Literal[2] reveal_type(np.divmod.nargs) # E: Literal[4] reveal_type(np.divmod.signature) # E: None -reveal_type(np.divmod(f8, f8)) # E: Tuple[Any, Any] -reveal_type(np.divmod(AR_f8, f8)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +reveal_type(np.divmod(f8, f8)) # E: tuple[Any, Any] +reveal_type(np.divmod(AR_f8, f8)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] reveal_type(np.matmul.__name__) # E: Literal['matmul'] reveal_type(np.matmul.ntypes) # E: Literal[19] From 929039f7b9bf051d922bbc7ea0377d2990676065 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 24 Aug 2023 05:48:41 +0200 Subject: [PATCH 055/120] TYP: guard `typing_extensions` usage for =4.2.0 # needed for python < 3.10 - mypy=1.4.1 # For building docs - sphinx>=4.5.0 diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi index 98d3789a579e..eccfb237bbe9 100644 --- a/numpy/core/numeric.pyi +++ b/numpy/core/numeric.pyi @@ -8,7 +8,10 @@ from typing import ( SupportsIndex, NoReturn, ) -from typing_extensions import TypeGuard +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard from numpy import ( ComplexWarning as ComplexWarning, diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 6e051e914177..6baefd83bd0a 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -20,7 +20,10 @@ from typing import ( Final, SupportsIndex, ) -from typing_extensions import ParamSpec +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec from numpy import generic, dtype, number, object_, bool_, _FloatValue from numpy._typing import ( From 16ad79a4c8b59e91f3bb326f8cbeb099a36684e5 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Mon, 31 Jul 2023 06:12:00 +0000 Subject: [PATCH 056/120] BUG: Fix f2py F77 assumed length issues Closes gh-24008. Essentially, array and length setup needs to be done before the length defaults kick in. --- numpy/f2py/crackfortran.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 002a2edb1275..b67918802c18 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1742,6 +1742,16 @@ def updatevars(typespec, selector, attrspec, entitydecl): else: del d1[k] + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + if 'len' in d1: if typespec in ['complex', 'integer', 'logical', 'real']: if ('kindselector' not in edecl) or (not edecl['kindselector']): @@ -1763,16 +1773,6 @@ def updatevars(typespec, selector, attrspec, entitydecl): else: edecl['='] = d1['init'] - if 'len' in d1 and 'array' in d1: - if d1['len'] == '': - d1['len'] = d1['array'] - del d1['array'] - else: - d1['array'] = d1['array'] + ',' + d1['len'] - del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( - typespec, e, typespec, ename, d1['array'])) - if 'array' in d1: dm = 'dimension(%s)' % d1['array'] if 'attrspec' not in edecl or (not edecl['attrspec']): From 2903e1ce43ce074d1209a562fc05cc4e9bbea1b1 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Mon, 31 Jul 2023 06:31:55 +0000 Subject: [PATCH 057/120] TST: Add a test for assumed length F77 --- numpy/f2py/tests/src/string/gh24008.f | 8 ++++++++ numpy/f2py/tests/test_string.py | 6 ++++++ 2 files changed, 14 insertions(+) create mode 100644 numpy/f2py/tests/src/string/gh24008.f diff --git a/numpy/f2py/tests/src/string/gh24008.f b/numpy/f2py/tests/src/string/gh24008.f new file mode 100644 index 000000000000..ab64cf771f68 --- /dev/null +++ b/numpy/f2py/tests/src/string/gh24008.f @@ -0,0 +1,8 @@ + SUBROUTINE GREET(NAME, GREETING) + CHARACTER NAME*(*), GREETING*(*) + CHARACTER*(50) MESSAGE + + MESSAGE = 'Hello, ' // NAME // ', ' // GREETING +c$$$ PRINT *, MESSAGE + + END SUBROUTINE GREET diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py index 9e937188c930..c097d1ae6112 100644 --- a/numpy/f2py/tests/test_string.py +++ b/numpy/f2py/tests/test_string.py @@ -19,6 +19,12 @@ def test_char(self): assert out == pytest.approx(expected) +class TestStringAssumedLength(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24008.f")] + + def test_gh24008(self): + self.module.greet("joe", "bob") + class TestDocStringArguments(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "string.f")] From 8726cd753c84e477b9701e5a8ffb570a6f668798 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 5 Aug 2023 20:35:59 +0000 Subject: [PATCH 058/120] MAINT: move gh24008 to test_character Since it belongs logically to the tests where character array shapes and types are determined --- numpy/f2py/tests/test_character.py | 7 +++++++ numpy/f2py/tests/test_string.py | 6 ------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/tests/test_character.py b/numpy/f2py/tests/test_character.py index 0bb0f4290272..1c5f8c2a905f 100644 --- a/numpy/f2py/tests/test_character.py +++ b/numpy/f2py/tests/test_character.py @@ -591,3 +591,10 @@ def test_char_arr(self): assert out.shape == expected expected = '|S12' assert out.dtype == expected + +class TestStringAssumedLength(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24008.f")] + + @pytest.mark.slow + def test_gh24008(self): + self.module.greet("joe", "bob") diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py index c097d1ae6112..9e937188c930 100644 --- a/numpy/f2py/tests/test_string.py +++ b/numpy/f2py/tests/test_string.py @@ -19,12 +19,6 @@ def test_char(self): assert out == pytest.approx(expected) -class TestStringAssumedLength(util.F2PyTest): - sources = [util.getpath("tests", "src", "string", "gh24008.f")] - - def test_gh24008(self): - self.module.greet("joe", "bob") - class TestDocStringArguments(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "string.f")] From d29991da8a040d4f52adcd99ebfce750fd9961eb Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 5 Aug 2023 20:36:35 +0000 Subject: [PATCH 059/120] BUG: Rework character array assignments (gh-24008) --- numpy/f2py/crackfortran.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index b67918802c18..1b8672410e4d 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1746,6 +1746,13 @@ def updatevars(typespec, selector, attrspec, entitydecl): if d1['len'] == '': d1['len'] = d1['array'] del d1['array'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] else: d1['array'] = d1['array'] + ',' + d1['len'] del d1['len'] From 5b63b895b901b0f3e4a031376fee4df7dbda1500 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Thu, 24 Aug 2023 09:37:22 +0000 Subject: [PATCH 060/120] TST: Stop marking small f2py tests as slow --- numpy/f2py/tests/test_character.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/f2py/tests/test_character.py b/numpy/f2py/tests/test_character.py index 1c5f8c2a905f..373262bf96a6 100644 --- a/numpy/f2py/tests/test_character.py +++ b/numpy/f2py/tests/test_character.py @@ -574,7 +574,6 @@ def test_character_bc(self, state): class TestStringScalarArr(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "scalar_string.f90")] - @pytest.mark.slow def test_char(self): for out in (self.module.string_test.string, self.module.string_test.string77): @@ -583,7 +582,6 @@ def test_char(self): expected = '|S8' assert out.dtype == expected - @pytest.mark.slow def test_char_arr(self): for out in (self.module.string_test.strarr, self.module.string_test.strarr77): @@ -595,6 +593,5 @@ def test_char_arr(self): class TestStringAssumedLength(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "gh24008.f")] - @pytest.mark.slow def test_gh24008(self): self.module.greet("joe", "bob") From a49ae3474d1a33cccf12a59ef29c028bf703839f Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Fri, 25 Aug 2023 11:11:46 +0000 Subject: [PATCH 061/120] MAINT: Harmonize fortranobject (#24517) As noted in the discussion around C99 here: https://github.com/numpy/numpy/issues/22572#issuecomment-1691301573 [skip ci] --- numpy/f2py/src/fortranobject.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index add6e8b6ef29..072392bb6651 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -726,7 +726,8 @@ static int check_and_fix_dimensions(const PyArrayObject* arr, static int find_first_negative_dimension(const int rank, const npy_intp *dims) { - for (int i = 0; i < rank; ++i) { + int i; + for (i = 0; i < rank; ++i) { if (dims[i] < 0) { return i; } From 603311d45b23b0c99a3dd431e412f10f53efb748 Mon Sep 17 00:00:00 2001 From: Jake Vanderplas Date: Fri, 25 Aug 2023 04:13:26 -0700 Subject: [PATCH 062/120] TYP: add kind argument to numpy.isin type specification (#24527) Fixes #24491 [skip ci] --- numpy/lib/arraysetops.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/lib/arraysetops.pyi b/numpy/lib/arraysetops.pyi index aa1310a3210c..7075c334ea7d 100644 --- a/numpy/lib/arraysetops.pyi +++ b/numpy/lib/arraysetops.pyi @@ -333,6 +333,8 @@ def isin( test_elements: ArrayLike, assume_unique: bool = ..., invert: bool = ..., + *, + kind: None | str = ..., ) -> NDArray[bool_]: ... @overload From 73f90838548e445fa404e2505f310bd167f9e065 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Sat, 26 Aug 2023 16:59:45 +0200 Subject: [PATCH 063/120] BUG: fix comparisons between masked and unmasked structured arrays --- numpy/ma/core.py | 3 +++ numpy/ma/tests/test_core.py | 23 ++++++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 2fe326885295..907b240349d3 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4127,6 +4127,9 @@ def _comparison(self, other, compare): # Now take care of the mask; the merged mask should have an item # masked if all fields were masked (in one and/or other). mask = (mask == np.ones((), mask.dtype)) + # Ensure we can compare masks below if other was not masked. + if omask is np.False_: + omask = np.zeros((), smask.dtype) else: # For regular arrays, just use the data as they come. diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 6ab1d7e4f1a6..e07bdc90663f 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1310,8 +1310,8 @@ def test_minmax_dtypes(self): m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] xm = masked_array(x, mask=m1) xm.set_fill_value(1e+20) - float_dtypes = [np.half, np.single, np.double, - np.longdouble, np.cfloat, np.cdouble, np.clongdouble] + float_dtypes = [np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble] for float_dtype in float_dtypes: assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(), float_dtype(a10)) @@ -1614,6 +1614,23 @@ def test_ne_on_structured(self): assert_equal(test.mask, [[False, False], [False, True]]) assert_(test.fill_value == True) + def test_eq_ne_structured_with_non_masked(self): + a = array([(1, 1), (2, 2), (3, 4)], + mask=[(0, 1), (0, 0), (1, 1)], dtype='i4,i4') + eq = a == a.data + ne = a.data != a + # Test the obvious. + assert_(np.all(eq)) + assert_(not np.any(ne)) + # Expect the mask set only for items with all fields masked. + expected_mask = a.mask == np.ones((), a.mask.dtype) + assert_array_equal(eq.mask, expected_mask) + assert_array_equal(ne.mask, expected_mask) + # The masked element will indicated not equal, because the + # masks did not match. + assert_equal(eq.data, [True, True, False]) + assert_array_equal(eq.data, ~ne.data) + def test_eq_ne_structured_extra(self): # ensure simple examples are symmetric and make sense. # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 @@ -3444,7 +3461,7 @@ def test_ravel_order(self, order, data_order): raveled = x.ravel(order) assert (raveled.filled(0) == 0).all() - # NOTE: Can be wrong if arr order is neither C nor F and `order="K"` + # NOTE: Can be wrong if arr order is neither C nor F and `order="K"` assert_array_equal(arr.ravel(order), x.ravel(order)._data) def test_reshape(self): From 806c8293ac84c15b12fd95661c2057a8248e8c01 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Sat, 26 Aug 2023 17:02:18 +0200 Subject: [PATCH 064/120] BUG: ensure mask on comparison results always has properly broadcast shape --- numpy/ma/core.py | 9 +++++---- numpy/ma/tests/test_core.py | 8 ++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 907b240349d3..ebdf41682436 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4146,10 +4146,11 @@ def _comparison(self, other, compare): # Note that this works automatically for structured arrays too. # Ignore this for operations other than `==` and `!=` check = np.where(mask, compare(smask, omask), check) - if mask.shape != check.shape: - # Guarantee consistency of the shape, making a copy since the - # the mask may need to get written to later. - mask = np.broadcast_to(mask, check.shape).copy() + + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() check = check.view(type(self)) check._update_from(self) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e07bdc90663f..cc17cc2ad782 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1762,6 +1762,14 @@ def test_eq_for_numeric(self, dt1, dt2, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) + @pytest.mark.parametrize("op", [operator.eq, operator.lt]) + def test_eq_broadcast_with_unmasked(self, op): + a = array([0, 1], mask=[0, 1]) + b = np.arange(10).reshape(5, 2) + result = op(a, b) + assert_(result.mask.shape == b.shape) + assert_equal(result.mask, np.zeros(b.shape, bool) | a.mask) + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) @pytest.mark.parametrize('fill', [None, 1]) From 3499c943fc7309c3a3a6bc5627cb0553e33ebc31 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Sun, 27 Aug 2023 12:46:44 +0200 Subject: [PATCH 065/120] BUG: ensure nomask in comparison result is not broadcast --- numpy/ma/core.py | 23 ++++++++++++----------- numpy/ma/tests/test_core.py | 9 +++++++++ 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index ebdf41682436..16f74e89e902 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4140,17 +4140,18 @@ def _comparison(self, other, compare): if isinstance(check, (np.bool_, bool)): return masked if mask else check - if mask is not nomask and compare in (operator.eq, operator.ne): - # Adjust elements that were masked, which should be treated - # as equal if masked in both, unequal if masked in one. - # Note that this works automatically for structured arrays too. - # Ignore this for operations other than `==` and `!=` - check = np.where(mask, compare(smask, omask), check) - - if mask.shape != check.shape: - # Guarantee consistency of the shape, making a copy since the - # the mask may need to get written to later. - mask = np.broadcast_to(mask, check.shape).copy() + if mask is not nomask: + if compare in (operator.eq, operator.ne): + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + # Ignore this for operations other than `==` and `!=` + check = np.where(mask, compare(smask, omask), check) + + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() check = check.view(type(self)) check._update_from(self) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index cc17cc2ad782..08ddc46c2498 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1770,6 +1770,15 @@ def test_eq_broadcast_with_unmasked(self, op): assert_(result.mask.shape == b.shape) assert_equal(result.mask, np.zeros(b.shape, bool) | a.mask) + @pytest.mark.parametrize("op", [operator.eq, operator.gt]) + def test_comp_no_mask_not_broadcast(self, op): + # Regression test for failing doctest in MaskedArray.nonzero + # after gh-24556. + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = op(a, 3) + assert_(not result.mask.shape) + assert_(result.mask is nomask) + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) @pytest.mark.parametrize('fill', [None, 1]) From 1a0073c9b36449e29b78fdb30a24547f72845930 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Mon, 28 Aug 2023 17:12:51 -0400 Subject: [PATCH 066/120] CI: Exclude import libraries from list of DLLs on Cygwin. There are import libraries in numpy.random that are intended for distribution. This script should not include those, only actual DLLs. --- tools/list_numpy_dlls.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/list_numpy_dlls.sh b/tools/list_numpy_dlls.sh index fedd2097ba67..39aaccf6ed2c 100644 --- a/tools/list_numpy_dlls.sh +++ b/tools/list_numpy_dlls.sh @@ -5,5 +5,5 @@ py_ver=${1} site_packages=`python${py_ver} -m pip show numpy | \ grep Location | cut -d " " -f 2 -`; dll_list=`for name in $(python${py_ver} -m pip show -f numpy | \ - grep -F .dll); do echo ${site_packages}/${name}; done` + grep -E -e '\.dll$'); do echo ${site_packages}/${name}; done` echo ${dll_list} From 48390607a20a2beda03c9aee58c4cd6a6c83f8f2 Mon Sep 17 00:00:00 2001 From: Albert Steppi Date: Wed, 30 Aug 2023 08:20:43 -0400 Subject: [PATCH 067/120] BLD: fix `_umath_linalg` dependencies (#24584) Closes gh-24512, where `linalg.eigvalsh` was observed to be non-thread safe. This was due to the non-thread safe `lapack_lite` being called instead of the installed BLAS/LAPACK. Co-authored-by: Ralf Gommers --- numpy/linalg/meson.build | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index d290e5b3932d..da1e8bad8068 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -1,35 +1,37 @@ +# Note that `python_xerbla.c` was excluded on Windows in setup.py; +# unclear why and it seems needed, so unconditionally used here. lapack_lite_sources = [ - 'lapack_lite/f2c.c', - 'lapack_lite/f2c_c_lapack.c', - 'lapack_lite/f2c_d_lapack.c', - 'lapack_lite/f2c_s_lapack.c', - 'lapack_lite/f2c_z_lapack.c', - 'lapack_lite/f2c_blas.c', - 'lapack_lite/f2c_config.c', - 'lapack_lite/f2c_lapack.c', 'lapack_lite/python_xerbla.c', ] - -# TODO: ILP64 support - -lapack_lite_module_src = ['lapack_litemodule.c'] if not have_lapack - warning('LAPACK was not found, NumPy is using an unoptimized, naive build from sources!') - lapack_lite_module_src += lapack_lite_sources + lapack_lite_sources += [ + 'lapack_lite/f2c.c', + 'lapack_lite/f2c_c_lapack.c', + 'lapack_lite/f2c_d_lapack.c', + 'lapack_lite/f2c_s_lapack.c', + 'lapack_lite/f2c_z_lapack.c', + 'lapack_lite/f2c_blas.c', + 'lapack_lite/f2c_config.c', + 'lapack_lite/f2c_lapack.c', + ] endif py.extension_module('lapack_lite', - lapack_lite_module_src, + [ + 'lapack_litemodule.c', + lapack_lite_sources, + ], dependencies: [np_core_dep, blas_dep, lapack_dep], install: true, subdir: 'numpy/linalg', ) -_umath_linalg_src = ['umath_linalg.cpp'] + lapack_lite_sources - py.extension_module('_umath_linalg', - _umath_linalg_src, - dependencies: np_core_dep, + [ + 'umath_linalg.cpp', + lapack_lite_sources, + ], + dependencies: [np_core_dep, blas_dep, lapack_dep], link_with: npymath_lib, install: true, subdir: 'numpy/linalg', From 24937c7dc0143ef4241e31ac35031e7eefef5c1b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 21 Aug 2023 20:44:00 -0600 Subject: [PATCH 068/120] MAINT: Stop testing on ppc64le. The machine is usually missing, so tests are often reporting a failure. I'm tired of seeing tests without a green checkmark just because of that. --- .travis.yml | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/.travis.yml b/.travis.yml index c10e20483606..72845eefac09 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,16 +26,18 @@ cache: jobs: include: - - python: "3.9" - os: linux - arch: ppc64le - env: - # use OpenBLAS build, not system ATLAS - - DOWNLOAD_OPENBLAS=1 - # - NPY_USE_BLAS_ILP64=1 # the openblas build fails - - ATLAS=None - # VSX4 still not supported by ubuntu/gcc-11 - - EXPECT_CPU_FEATURES="VSX VSX2 VSX3" +# The ppc64le for these tests is usually missing, resulting in +# test failure most of the time. Let's not do that. +# - python: "3.9" +# os: linux +# arch: ppc64le +# env: +# # use OpenBLAS build, not system ATLAS +# - DOWNLOAD_OPENBLAS=1 +# # - NPY_USE_BLAS_ILP64=1 # the openblas build fails +# - ATLAS=None +# # VSX4 still not supported by ubuntu/gcc-11 +# - EXPECT_CPU_FEATURES="VSX VSX2 VSX3" - python: "3.9" os: linux From 676b63be33f8a8cb5e0cc57df5e877d3be81ca32 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 31 Aug 2023 14:29:33 +0200 Subject: [PATCH 069/120] BLD: meson-cpu: fix SIMD support on platforms with no features This code returned an empty list rather than an empty dict on platforms that don't have a key in `max_features_dict`. That would break the build on the next `foreach` statement, which needs a dict. [skip cirrus] [skip circle] [skip azp] --- meson_cpu/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index b99638bfc24f..97b33c461b95 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -106,7 +106,7 @@ max_features_dict = { 's390x': S390X_FEATURES, 'arm': ARM_FEATURES, 'aarch64': ARM_FEATURES, -}.get(cpu_family, []) +}.get(cpu_family, {}) max_features = [] foreach fet_name, fet_obj : max_features_dict max_features += [fet_obj] From 54fb19670e1dad130deae6e8a0dd5a8462dc2bcf Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 31 Aug 2023 15:07:19 -0600 Subject: [PATCH 070/120] BUG: Change Cython binding directive to "False". The Cython 2 default was "False", in Cython 3 it was changed to "True". Because NumPy 1.26.x supports both Cython versions, make it "False" for compatibility. Closes #24490. --- numpy/random/_generator.pyx | 2 +- numpy/random/_mt19937.pyx | 2 +- numpy/random/_pcg64.pyx | 2 +- numpy/random/_philox.pyx | 2 +- numpy/random/_sfc64.pyx | 2 +- numpy/random/bit_generator.pyx | 2 +- numpy/random/mtrand.pyx | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 9bd09a2bca33..05061aa2dc3b 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -1,5 +1,5 @@ #!python -#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3, binding=True +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3, binding=False import operator import warnings from collections.abc import Sequence diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index 50bc0084c6c2..c3f9206a1e2f 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -1,4 +1,4 @@ -#cython: binding=True +#cython: binding=False import operator diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index a924d75fdbf3..75ffdddb7e44 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -1,4 +1,4 @@ -#cython: binding=True +#cython: binding=False import numpy as np cimport numpy as np diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index d90da6a9b657..291f8224ac3e 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -1,4 +1,4 @@ -#cython: binding=True +#cython: binding=False from cpython.pycapsule cimport PyCapsule_New diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 9b38dff84122..2393979df6db 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -1,4 +1,4 @@ -#cython: binding=True +#cython: binding=False import numpy as np cimport numpy as np diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index bcc9e50a1804..9028cb65bfcb 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -1,4 +1,4 @@ -#cython: binding=True +#cython: binding=False """ BitGenerator base class and SeedSequence used to seed the BitGenerators. diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 752d9beaefd2..eb9e3ddd1dd0 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -1,5 +1,5 @@ #!python -#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3, binding=True +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3, binding=False import operator import warnings from collections.abc import Sequence From 784842a87dc90cb59ebb7f142a51c652165b0089 Mon Sep 17 00:00:00 2001 From: Developer-Ecosystem-Engineering <65677710+Developer-Ecosystem-Engineering@users.noreply.github.com> Date: Thu, 31 Aug 2023 01:47:59 -0700 Subject: [PATCH 071/120] ENH: Adopt new macOS Accelerate BLAS/LAPACK Interfaces, including ILP64 (#24053) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit macOS 13.3 shipped with an updated Accelerate framework that provides BLAS / LAPACK. The new version is aligned with Netlib's v3.9.1 and also supports ILP64. The changes here adopt those new interfaces when available. - New interfaces are used when ACCELERATE_NEW_LAPACK is defined. - ILP64 interfaces are used when both ACCELERATE_NEW_LAPACK and ACCELERATE_LAPACK_ILP64 are defined. macOS 13.3 now ships with 3 different sets of BLAS / LAPACK interfaces: - LP64 / LAPACK v3.2.1 - legacy interfaces kept for compatibility - LP64 / LAPACK v3.9.1 - new interfaces - ILP64 / LAPACK v3.9.1 - new interfaces with ILP64 support For LP64, we want to support building against macOS 13.3+ SDK, but having it work on pre-13.3 systems. To that end, we created wrappers for each API that do a runtime check on which set of API is available and should be used. However, these were deemed potentially too complex to include during review of gh-24053, and left out in this commit. Please see gh-24053 for those. ILP64 is only supported on macOS 13.3+ and does not use additional wrappers. We've included support for both distutils and Meson builds. All tests pass on Apple silicon and Intel based Macs. A new CI job for Accelerate ILP64 on x86-64 was added as well. Benchmarks ILP64 Accelerate vs OpenBLAS before after ratio [73f0cf4f] [d1572653] n/a n/a n/a bench_linalg.Linalg.time_op('det', 'float16') n/a n/a n/a bench_linalg.Linalg.time_op('pinv', 'float16') n/a n/a n/a bench_linalg.Linalg.time_op('svd', 'float16') failed failed n/a bench_linalg.LinalgSmallArrays.time_det_small_array + 3.96±0.1μs 5.04±0.4μs 1.27 bench_linalg.Linalg.time_op('norm', 'float32') 1.43±0.04ms 1.43±0ms 1.00 bench_linalg.Einsum.time_einsum_outer() 12.7±0.4μs 12.7±0.3μs 1.00 bench_linalg.Einsum.time_einsum_sum_mul2() 24.1±0.8μs 24.1±0.04μs 1.00 bench_linalg.Linalg.time_op('norm', 'float16') 9.48±0.2ms 9.48±0.3ms 1.00 bench_linalg.Einsum.time_einsum_outer() 609±20μs 609±2μs 1.00 bench_linalg.Einsum.time_einsum_noncon_outer() 64.9±2μs 64.7±0.07μs 1.00 bench_linalg.Einsum.time_einsum_contig_outstride0() 1.24±0.03ms 1.24±0.01ms 1.00 bench_linalg.Einsum.time_einsum_noncon_outer() 102±3μs 102±0.2μs 1.00 bench_linalg.Einsum.time_einsum_contig_contig() 21.9±0.8μs 21.8±0.02μs 1.00 bench_linalg.Einsum.time_einsum_multiply() 22.8±0.2ms 22.7±0.3ms 0.99 bench_linalg.Eindot.time_einsum_ijk_jil_kl 13.3±0.4μs 13.3±0.02μs 0.99 bench_linalg.Einsum.time_einsum_sum_mul2() 9.56±0.3μs 9.49±0.2μs 0.99 bench_linalg.Einsum.time_einsum_noncon_contig_contig() 7.31±0.2μs 7.26±0.08μs 0.99 bench_linalg.Einsum.time_einsum_noncon_contig_outstride0() 5.60±0.2ms 5.55±0.02ms 0.99 bench_linalg.Eindot.time_einsum_ij_jk_a_b 37.1±1μs 36.7±0.1μs 0.99 bench_linalg.Einsum.time_einsum_contig_outstride0() 13.5±0.4μs 13.4±0.05μs 0.99 bench_linalg.Einsum.time_einsum_sum_mul() 1.03±0.03μs 1.02±0μs 0.99 bench_linalg.LinalgSmallArrays.time_norm_small_array 51.6±2μs 51.0±0.09μs 0.99 bench_linalg.Einsum.time_einsum_contig_contig() 15.2±0.5μs 15.0±0.04μs 0.99 bench_linalg.Einsum.time_einsum_noncon_sum_mul2() 13.9±0.4μs 13.7±0.02μs 0.99 bench_linalg.Einsum.time_einsum_noncon_sum_mul2() 415±10μs 409±0.4μs 0.99 bench_linalg.Eindot.time_einsum_i_ij_j 9.29±0.3μs 9.01±0.03μs 0.97 bench_linalg.Einsum.time_einsum_noncon_mul() 18.2±0.6μs 17.6±0.04μs 0.97 bench_linalg.Einsum.time_einsum_multiply() 509±40μs 492±10μs 0.97 bench_linalg.Einsum.time_einsum_mul() 9.63±0.3μs 9.28±0.09μs 0.96 bench_linalg.Einsum.time_einsum_noncon_contig_contig() 9.08±0.2μs 8.73±0.02μs 0.96 bench_linalg.Einsum.time_einsum_noncon_mul() 15.6±0.5μs 15.0±0.04μs 0.96 bench_linalg.Einsum.time_einsum_noncon_sum_mul() 7.74±0.2μs 7.39±0.04μs 0.95 bench_linalg.Einsum.time_einsum_noncon_contig_outstride0() 18.6±0.6μs 17.7±0.03μs 0.95 bench_linalg.Einsum.time_einsum_noncon_multiply() 14.5±0.4μs 13.7±0.03μs 0.95 bench_linalg.Einsum.time_einsum_noncon_sum_mul() 13.3±0.6μs 12.5±0.3μs 0.94 bench_linalg.Einsum.time_einsum_sum_mul() 23.5±0.5μs 21.9±0.05μs 0.93 bench_linalg.Einsum.time_einsum_noncon_multiply() 264±20μs 243±4μs 0.92 bench_linalg.Einsum.time_einsum_mul() - 177±50μs 132±0.6μs 0.75 bench_linalg.Eindot.time_dot_trans_at_a - 10.7±0.3μs 7.13±0.01μs 0.67 bench_linalg.Linalg.time_op('norm', 'int16') - 97.5±2μs 64.7±0.1μs 0.66 bench_linalg.Eindot.time_matmul_trans_a_at - 8.87±0.3μs 5.76±0μs 0.65 bench_linalg.Linalg.time_op('norm', 'longfloat') - 8.90±0.3μs 5.77±0.01μs 0.65 bench_linalg.Linalg.time_op('norm', 'float64') - 8.48±0.3μs 5.40±0.01μs 0.64 bench_linalg.Linalg.time_op('norm', 'int64') - 106±2μs 66.5±8μs 0.63 bench_linalg.Eindot.time_inner_trans_a_a - 8.25±0.3μs 5.16±0μs 0.62 bench_linalg.Linalg.time_op('norm', 'int32') - 103±5ms 64.6±0.5ms 0.62 bench_import.Import.time_linalg - 106±3μs 66.0±0.1μs 0.62 bench_linalg.Eindot.time_dot_trans_a_at - 202±20μs 124±0.6μs 0.61 bench_linalg.Eindot.time_matmul_trans_at_a - 31.5±10μs 19.3±0.02μs 0.61 bench_linalg.Eindot.time_dot_d_dot_b_c - 32.4±20μs 19.7±0.03μs 0.61 bench_linalg.Eindot.time_matmul_d_matmul_b_c - 5.05±1ms 3.06±0.09ms 0.61 bench_linalg.Linalg.time_op('svd', 'complex128') - 5.35±0.9ms 3.09±0.09ms 0.58 bench_linalg.Linalg.time_op('svd', 'complex64') - 6.37±3ms 3.27±0.1ms 0.51 bench_linalg.Linalg.time_op('pinv', 'complex128') - 7.26±8ms 3.24±0.1ms 0.45 bench_linalg.Linalg.time_op('pinv', 'complex64') - 519±100μs 219±0.8μs 0.42 bench_linalg.Linalg.time_op('det', 'complex64') - 31.3±0.9μs 12.8±0.1μs 0.41 bench_linalg.Linalg.time_op('norm', 'complex128') - 2.44±0.7ms 924±1μs 0.38 bench_linalg.Linalg.time_op('pinv', 'float64') - 29.9±0.8μs 10.8±0.01μs 0.36 bench_linalg.Linalg.time_op('norm', 'complex64') - 2.56±0.5ms 924±1μs 0.36 bench_linalg.Linalg.time_op('pinv', 'float32') - 2.63±0.5ms 924±0.6μs 0.35 bench_linalg.Linalg.time_op('pinv', 'int64') - 2.68±0.7ms 927±10μs 0.35 bench_linalg.Linalg.time_op('pinv', 'int32') - 2.68±0.5ms 927±10μs 0.35 bench_linalg.Linalg.time_op('pinv', 'int16') - 2.93±0.6ms 925±2μs 0.32 bench_linalg.Linalg.time_op('pinv', 'longfloat') - 809±500μs 215±0.2μs 0.27 bench_linalg.Linalg.time_op('det', 'complex128') - 3.67±0.9ms 895±20μs 0.24 bench_linalg.Eindot.time_tensordot_a_b_axes_1_0_0_1 - 489±100μs 114±20μs 0.23 bench_linalg.Eindot.time_inner_trans_a_ac - 3.64±0.7ms 777±0.3μs 0.21 bench_linalg.Lstsq.time_numpy_linalg_lstsq_a__b_float64 - 755±90μs 157±10μs 0.21 bench_linalg.Eindot.time_dot_a_b - 4.63±1ms 899±9μs 0.19 bench_linalg.Linalg.time_op('svd', 'longfloat') - 5.19±1ms 922±10μs 0.18 bench_linalg.Linalg.time_op('svd', 'float64') - 599±200μs 89.4±2μs 0.15 bench_linalg.Eindot.time_matmul_trans_atc_a - 956±200μs 140±10μs 0.15 bench_linalg.Eindot.time_matmul_a_b - 6.45±3ms 903±10μs 0.14 bench_linalg.Linalg.time_op('svd', 'float32') - 6.42±3ms 896±0.7μs 0.14 bench_linalg.Linalg.time_op('svd', 'int32') - 6.47±4ms 902±5μs 0.14 bench_linalg.Linalg.time_op('svd', 'int64') - 6.52±1ms 899±2μs 0.14 bench_linalg.Linalg.time_op('svd', 'int16') - 799±300μs 109±2μs 0.14 bench_linalg.Eindot.time_dot_trans_atc_a - 502±100μs 65.0±0.2μs 0.13 bench_linalg.Eindot.time_dot_trans_a_atc - 542±300μs 64.2±0.05μs 0.12 bench_linalg.Eindot.time_matmul_trans_a_atc - 458±300μs 41.6±0.09μs 0.09 bench_linalg.Linalg.time_op('det', 'int32') - 471±100μs 41.9±0.03μs 0.09 bench_linalg.Linalg.time_op('det', 'float32') - 510±100μs 43.6±0.06μs 0.09 bench_linalg.Linalg.time_op('det', 'int16') - 478±200μs 39.6±0.05μs 0.08 bench_linalg.Linalg.time_op('det', 'longfloat') - 599±200μs 39.6±0.09μs 0.07 bench_linalg.Linalg.time_op('det', 'float64') - 758±300μs 41.6±0.1μs 0.05 bench_linalg.Linalg.time_op('det', 'int64') Co-authored-by: Ralf Gommers --- .cirrus.star | 5 +- .github/workflows/macos.yml | 135 ++++++++++++++++++ build_requirements.txt | 1 + .../upcoming_changes/24053.new_feature.rst | 5 + environment.yml | 7 +- numpy/core/src/common/npy_cblas.h | 15 ++ numpy/distutils/system_info.py | 27 +++- numpy/linalg/meson.build | 10 +- numpy/meson.build | 59 +++++++- ...cirrus_macosx_arm64.yml => cirrus_arm.yml} | 60 +++++++- 10 files changed, 307 insertions(+), 17 deletions(-) create mode 100644 .github/workflows/macos.yml create mode 100644 doc/release/upcoming_changes/24053.new_feature.rst rename tools/ci/{cirrus_macosx_arm64.yml => cirrus_arm.yml} (59%) diff --git a/.cirrus.star b/.cirrus.star index 6f331a7c5b66..c503f25720a7 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -48,4 +48,7 @@ def main(ctx): if wheel: return fs.read("tools/ci/cirrus_wheels.yml") - return fs.read("tools/ci/cirrus_macosx_arm64.yml") + if int(pr_number) < 0: + return [] + + return fs.read("tools/ci/cirrus_arm.yml") diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml new file mode 100644 index 000000000000..ee445220607c --- /dev/null +++ b/.github/workflows/macos.yml @@ -0,0 +1,135 @@ +name: macOS tests (meson) + +on: + pull_request: + branches: + - main + - maintenance/** + +permissions: + contents: read # to fetch code (actions/checkout) + +env: + CCACHE_DIR: "${{ github.workspace }}/.ccache" + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + x86_conda: + name: macOS x86-64 conda + if: "github.repository == 'numpy/numpy'" + runs-on: macos-latest + strategy: + matrix: + python-version: ["3.11"] + + steps: + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + submodules: recursive + fetch-depth: 0 + + - name: Prepare cache dirs and timestamps + id: prep-ccache + shell: bash -l {0} + run: | + mkdir -p "${CCACHE_DIR}" + echo "dir=$CCACHE_DIR" >> $GITHUB_OUTPUT + NOW=$(date -u +"%F-%T") + echo "timestamp=${NOW}" >> $GITHUB_OUTPUT + echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT + + - name: Setup compiler cache + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + id: cache-ccache + with: + path: ${{ steps.prep-ccache.outputs.dir }} + key: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos-${{ steps.prep-ccache.outputs.timestamp }} + restore-keys: | + ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- + + - name: Setup Mambaforge + uses: conda-incubator/setup-miniconda@3b0f2504dd76ef23b6d31f291f4913fb60ab5ff3 # v2.2.0 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + channel-priority: true + activate-environment: numpy-dev + use-only-tar-bz2: false + miniforge-variant: Mambaforge + miniforge-version: latest + use-mamba: true + + # Updates if `environment.yml` or the date changes. The latter is needed to + # ensure we re-solve once a day (since we don't lock versions). Could be + # replaced by a conda-lock based approach in the future. + - name: Cache conda environment + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + env: + # Increase this value to reset cache if environment.yml has not changed + CACHE_NUMBER: 1 + with: + path: ${{ env.CONDA }}/envs/numpy-dev + key: + ${{ runner.os }}--${{ steps.prep-ccache.outputs.today }}-conda-${{ env.CACHE_NUMBER }}-${{ hashFiles('environment.yml') }} + id: envcache + + - name: Update Conda Environment + run: mamba env update -n numpy-dev -f environment.yml + if: steps.envcache.outputs.cache-hit != 'true' + + - name: Build and Install NumPy + shell: bash -l {0} + run: | + conda activate numpy-dev + CC="ccache $CC" spin build -j2 + + - name: Run test suite (full) + shell: bash -l {0} + run: | + conda activate numpy-dev + export OMP_NUM_THREADS=2 + spin test -j2 -m full + + - name: Ccache statistics + shell: bash -l {0} + run: | + conda activate numpy-dev + ccache -s + + accelerate: + name: Accelerate ILP64 + if: "github.repository == 'numpy/numpy'" + runs-on: macos-13 + steps: + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + submodules: recursive + fetch-depth: 0 + + - uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0 + with: + python-version: '3.10' + + - uses: maxim-lobanov/setup-xcode@9a697e2b393340c3cacd97468baa318e4c883d98 # v1.5.1 + with: + xcode-version: '14.3' + + - name: Install dependencies + run: | + pip install -r build_requirements.txt + pip install pytest pytest-xdist hypothesis + + - name: Build NumPy against Accelerate (ILP64) + run: | + spin build -- -Dblas=accelerate -Dlapack=accelerate -Duse-ilp64=true + + - name: Show meson-log.txt + if: always() + run: 'cat build/meson-logs/meson-log.txt' + + - name: Test + run: | + spin test -j2 diff --git a/build_requirements.txt b/build_requirements.txt index e12ac1cf4e2c..7f598f623252 100644 --- a/build_requirements.txt +++ b/build_requirements.txt @@ -3,3 +3,4 @@ Cython>=3.0 wheel==0.38.1 ninja spin==0.5 +build diff --git a/doc/release/upcoming_changes/24053.new_feature.rst b/doc/release/upcoming_changes/24053.new_feature.rst new file mode 100644 index 000000000000..f32eeef293c1 --- /dev/null +++ b/doc/release/upcoming_changes/24053.new_feature.rst @@ -0,0 +1,5 @@ +Support for the updated Accelerate BLAS/LAPACK library, including ILP64 (64-bit +integer) support, in macOS 13.3 has been added. This brings arm64 support, and +significant performance improvements of up to 10x for commonly used linear +algebra operations. When Accelerate is selected at build time, the 13.3+ +version will automatically be used if available. diff --git a/environment.yml b/environment.yml index ccde1f73e212..63f80b745b0d 100644 --- a/environment.yml +++ b/environment.yml @@ -8,16 +8,17 @@ channels: - conda-forge dependencies: - python=3.9 #need to pin to avoid issues with builds - - cython>=0.29.30 + - cython>=3.0 - compilers - openblas - nomkl - setuptools=59.2.0 - - meson >= 0.64.0 - ninja - pkg-config - meson-python - - pip # so you can use pip to install spin + - pip + - spin + - ccache # For testing - pytest - pytest-cov diff --git a/numpy/core/src/common/npy_cblas.h b/numpy/core/src/common/npy_cblas.h index 751854b6eb0a..dad9599f605e 100644 --- a/numpy/core/src/common/npy_cblas.h +++ b/numpy/core/src/common/npy_cblas.h @@ -25,6 +25,21 @@ enum CBLAS_SIDE {CblasLeft=141, CblasRight=142}; #define CBLAS_INDEX size_t /* this may vary between platforms */ +#ifdef ACCELERATE_NEW_LAPACK + #if __MAC_OS_X_VERSION_MAX_ALLOWED < 130300 + #ifdef HAVE_BLAS_ILP64 + #error "Accelerate ILP64 support is only available with macOS 13.3 SDK or later" + #endif + #else + #define NO_APPEND_FORTRAN + #ifdef HAVE_BLAS_ILP64 + #define BLAS_SYMBOL_SUFFIX $NEWLAPACK$ILP64 + #else + #define BLAS_SYMBOL_SUFFIX $NEWLAPACK + #endif + #endif +#endif + #ifdef NO_APPEND_FORTRAN #define BLAS_FORTRAN_SUFFIX #else diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 3dca7fb5a77d..edf56909ab5d 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -47,6 +47,7 @@ _numpy_info:Numeric _pkg_config_info:None accelerate_info:accelerate + accelerate_lapack_info:accelerate agg2_info:agg2 amd_info:amd atlas_3_10_blas_info:atlas @@ -534,6 +535,7 @@ def get_info(name, notfound_action=0): 'lapack_ssl2': lapack_ssl2_info, 'blas_ssl2': blas_ssl2_info, 'accelerate': accelerate_info, # use blas_opt instead + 'accelerate_lapack': accelerate_lapack_info, 'openblas64_': openblas64__info, 'openblas64__lapack': openblas64__lapack_info, 'openblas_ilp64': openblas_ilp64_info, @@ -2015,14 +2017,17 @@ def _check_info(self, info): class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64'] + lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' def _calc_info(self, name): + print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) info = get_info(name + '_lapack') if self._check_info(info): self.set_info(**info) return True + else: + print('%s_lapack does not exist' % (name)) return False @@ -2163,7 +2168,7 @@ def calc_info(self): class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64'] + blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] order_env_var_name = 'NPY_BLAS_ILP64_ORDER' def _calc_info(self, name): @@ -2625,13 +2630,27 @@ def calc_info(self): link_args.extend(['-Wl,-framework', '-Wl,vecLib']) if args: + macros = [ + ('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None), + ('ACCELERATE_NEW_LAPACK', None), + ] + if(os.getenv('NPY_USE_BLAS_ILP64', None)): + print('Setting HAVE_BLAS_ILP64') + macros += [ + ('HAVE_BLAS_ILP64', None), + ('ACCELERATE_LAPACK_ILP64', None), + ] self.set_info(extra_compile_args=args, extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None)]) + define_macros=macros) return +class accelerate_lapack_info(accelerate_info): + def _calc_info(self): + return super()._calc_info() + class blas_src_info(system_info): # BLAS_SRC is deprecated, please do not use this! # Build or install a BLAS library via your package manager or from diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index da1e8bad8068..ec0afaab030f 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -1,10 +1,10 @@ # Note that `python_xerbla.c` was excluded on Windows in setup.py; # unclear why and it seems needed, so unconditionally used here. -lapack_lite_sources = [ - 'lapack_lite/python_xerbla.c', -] +python_xerbla_sources = ['lapack_lite/python_xerbla.c'] + +lapack_lite_sources = [] if not have_lapack - lapack_lite_sources += [ + lapack_lite_sources = [ 'lapack_lite/f2c.c', 'lapack_lite/f2c_c_lapack.c', 'lapack_lite/f2c_d_lapack.c', @@ -19,6 +19,7 @@ endif py.extension_module('lapack_lite', [ 'lapack_litemodule.c', + python_xerbla_sources, lapack_lite_sources, ], dependencies: [np_core_dep, blas_dep, lapack_dep], @@ -29,6 +30,7 @@ py.extension_module('lapack_lite', py.extension_module('_umath_linalg', [ 'umath_linalg.cpp', + python_xerbla_sources, lapack_lite_sources, ], dependencies: [np_core_dep, blas_dep, lapack_dep], diff --git a/numpy/meson.build b/numpy/meson.build index 40766081d140..150257d5b8b3 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -48,6 +48,13 @@ else ] endif +macOS13_3_or_later = false +if host_machine.system() == 'darwin' + r = run_command('xcrun', '-sdk', 'macosx', '--show-sdk-version', check: true) + sdkVersion = r.stdout().strip() + + macOS13_3_or_later = sdkVersion.version_compare('>=13.3') +endif # This is currently injected directly into CFLAGS/CXXFLAGS for wheel builds # (see cibuildwheel settings in pyproject.toml), but used by CI jobs already @@ -81,6 +88,7 @@ endif # https://github.com/mesonbuild/meson/issues/2835 blas_name = get_option('blas') lapack_name = get_option('lapack') + # pkg-config uses a lower-case name while CMake uses a capitalized name, so try # that too to make the fallback detection with CMake work if blas_name == 'openblas' @@ -90,6 +98,23 @@ if blas_name == 'openblas' _openblas_names = ['openblas', 'OpenBLAS'] endif blas = dependency(_openblas_names, required: false) +elif blas_name.to_lower() == 'accelerate' + # macOS 13.3+ has updated interfaces aligned with BLAS/LAPACK 3.9.1. Use them if available. + if macOS13_3_or_later + accelerate_compile_args = ['-DACCELERATE_NEW_LAPACK'] + if(use_ilp64) + accelerate_compile_args += '-DACCELERATE_LAPACK_ILP64' + endif + blas = declare_dependency( + compile_args: accelerate_compile_args, + dependencies: dependency('Accelerate') + ) + else + if(use_ilp64) + error('macOS SDK 13.3+ is required for ILP64 support.') + endif + blas = dependency('Accelerate') + endif else blas = dependency(blas_name, required: false) endif @@ -112,14 +137,22 @@ if have_blas # `dependency('blas', modules: cblas)` # see https://github.com/mesonbuild/meson/pull/10921. have_cblas = false - if cc.links(''' + if blas_name.to_lower() == 'accelerate' + _cblas_header = '' + elif blas_name.to_lower().startswith('mkl') + _cblas_header = '' + else + _cblas_header = '' + endif + if cc.links(f''' #ifndef BLAS_SYMBOL_SUFFIX # define BLAS_SYMBOL_SUFFIX #endif #define EXPAND(suffix) cblas_ddot ## suffix #define DDOT(suffix) EXPAND(suffix) - #include + #include @_cblas_header@ + int main(int argc, const char *argv[]) { double a[4] = {1,2,3,4}; @@ -178,9 +211,27 @@ else endif if lapack_name == 'openblas' - lapack_name = ['openblas', 'OpenBLAS'] + lapack_dep = dependency(['openblas', 'OpenBLAS'], required: false) +elif lapack_name.to_lower() == 'accelerate' + # macOS 13.3+ has updated interfaces aligned with BLAS/LAPACK 3.9.1. Use them if available. + if macOS13_3_or_later + accelerate_compile_args = ['-DACCELERATE_NEW_LAPACK'] + if(use_ilp64) + accelerate_compile_args += '-DACCELERATE_LAPACK_ILP64' + endif + lapack_dep = declare_dependency( + compile_args: accelerate_compile_args, + dependencies: dependency('Accelerate') + ) + else + if(use_ilp64) + error('macOS SDK 13.3+ is required for ILP64 support.') + endif + lapack_dep = dependency('Accelerate') + endif +else + lapack_dep = dependency(lapack_name, required: false) endif -lapack_dep = dependency(lapack_name, required: false) have_lapack = lapack_dep.found() if not have_lapack and not allow_noblas error('No LAPACK library detected! Install one, or use the ' + \ diff --git a/tools/ci/cirrus_macosx_arm64.yml b/tools/ci/cirrus_arm.yml similarity index 59% rename from tools/ci/cirrus_macosx_arm64.yml rename to tools/ci/cirrus_arm.yml index 0dc97763e954..e4e127d2af4e 100644 --- a/tools/ci/cirrus_macosx_arm64.yml +++ b/tools/ci/cirrus_arm.yml @@ -21,19 +21,75 @@ modified_clone: &MODIFIED_CLONE fi +linux_aarch64_test_task: + compute_engine_instance: + image_project: cirrus-images + image: family/docker-builder-arm64 + architecture: arm64 + platform: linux + cpu: 1 + memory: 4G + + <<: *MODIFIED_CLONE + + ccache_cache: + folder: .ccache + populate_script: + - mkdir -p .ccache + fingerprint_key: ccache-linux_aarch64 + + prepare_env_script: | + apt-get update + apt-get install -y --no-install-recommends software-properties-common gcc g++ gfortran pkg-config ccache + apt-get install -y --no-install-recommends python3.10 python3.10-venv libopenblas-dev libatlas-base-dev liblapack-dev + + # python3.10 -m ensurepip --default-pip --user + ln -s $(which python3.10) python + + # put ccache and python on PATH + export PATH=/usr/lib/ccache:$PWD:$PATH + echo "PATH=$PATH" >> $CIRRUS_ENV + echo "CCACHE_DIR=$PWD/.ccache" >> $CIRRUS_ENV + + # required for figuring out the system tags in openblas_support + pip install packaging + + pip install -r build_requirements.txt + pip install -r test_requirements.txt + + build_script: | + spin build -- -Dallow-noblas=true + + test_script: | + spin test -j 1 + ccache -s + + macos_arm64_test_task: + depends_on: + - linux_aarch64_test macos_instance: image: ghcr.io/cirruslabs/macos-monterey-xcode:14 <<: *MODIFIED_CLONE + ccache_cache: + folder: .ccache + populate_script: + - mkdir -p .ccache + fingerprint_key: ccache-macosx_arm64 + pip_cache: folder: ~/.cache/pip test_script: | - brew install python@3.10 + brew install python@3.10 ccache export PATH=/opt/homebrew/opt/python@3.10/libexec/bin:$PATH + export PATH=/opt/homebrew/opt/ccache/libexec:$PATH + export CCACHE_DIR=$PWD/.ccache + echo "PATH=$PATH" >> $CIRRUS_ENV + python --version RUNNER_OS="macOS" @@ -55,3 +111,5 @@ macos_arm64_test_task: spin build -- -Dallow-noblas=true spin test -j auto + + ccache -s From 5e63c8690a14cd09220462ddce556bbeaced80b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Fri, 1 Sep 2023 09:25:21 -0300 Subject: [PATCH 072/120] DOC: Update building docs to use Meson (#24573) Removes mentions of runtests.py when appropriate and replaces them with equivalent spin commands (or pytest in a few cases). [skip azp] Co-authored-by: Ralf Gommers --- INSTALL.rst | 55 ++-- benchmarks/README.rst | 26 +- building_with_meson.md | 9 +- doc/RELEASE_WALKTHROUGH.rst | 2 +- doc/TESTS.rst | 8 +- doc/source/dev/development_environment.rst | 189 ++++-------- doc/source/dev/howto_build_docs.rst | 36 +-- doc/source/dev/index.rst | 17 +- doc/source/user/building.rst | 329 +++------------------ 9 files changed, 169 insertions(+), 502 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 9ac3aa526f2c..93c1a15dea66 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -38,7 +38,7 @@ Hypothesis__ https://hypothesis.readthedocs.io/en/latest/ .. note:: If you want to build NumPy in order to work on NumPy itself, use - ``runtests.py``. For more details, see + ``spin``. For more details, see https://numpy.org/devdocs/dev/development_environment.html .. note:: @@ -52,37 +52,36 @@ Basic Installation To install NumPy, run:: - python setup.py build -j 4 install --prefix $HOME/.local + pip install . -This will compile numpy on 4 CPUs and install it into the specified prefix. -To perform an inplace build that can be run from the source folder run:: +This will compile NumPy on all available CPUs and install it into the active +environment. - python setup.py build_ext --inplace -j 4 +To run the build from the source folder for development purposes, use the +``spin`` development CLI:: + + spin build # installs in-tree under `build-install/` + spin ipython # drop into an interpreter where `import numpy` picks up the local build + +Alternatively, use an editable install with:: + + pip install -e . --no-build-isolation See `Requirements for Installing Packages `_ for more details. -The number of build jobs can also be specified via the environment variable -NPY_NUM_BUILD_JOBS. - Choosing compilers ================== -NumPy needs a C compiler, and for development versions also needs Cython. A Fortran -compiler isn't needed to build NumPy itself; the ``numpy.f2py`` tests will be -skipped when running the test suite if no Fortran compiler is available. For -building Scipy a Fortran compiler is needed though, so we include some details -on Fortran compilers in the rest of this section. - -On OS X and Linux, all common compilers will work. The minimum supported GCC -version is 6.5. - -For Fortran, ``gfortran`` works, ``g77`` does not. In case ``g77`` is -installed then ``g77`` will be detected and used first. To explicitly select -``gfortran`` in that case, do:: +NumPy needs C and C++ compilers, and for development versions also needs +Cython. A Fortran compiler isn't needed to build NumPy itself; the +``numpy.f2py`` tests will be skipped when running the test suite if no Fortran +compiler is available. - python setup.py build --fcompiler=gnu95 +For more options including selecting compilers, setting custom compiler flags +and controlling parallelism, see +https://scipy.github.io/devdocs/building/compilers_and_options.html Windows ------- @@ -104,10 +103,11 @@ for more details. Building with optimized BLAS support ==================================== -Configuring which BLAS/LAPACK is used if you have multiple libraries installed, -or you have only one installed but in a non-standard location, is done via a -``site.cfg`` file. See the ``site.cfg.example`` shipped with NumPy for more -details. +Configuring which BLAS/LAPACK is used if you have multiple libraries installed +is done via a ``--config-settings`` CLI flag - if not given, the default choice +is OpenBLAS. If your installed library is in a non-standard location, selecting +that location is done via a pkg-config ``.pc`` file. +See http://scipy.github.io/devdocs/building/blas_lapack.html for more details. Windows ------- @@ -120,9 +120,8 @@ For an overview of the state of BLAS/LAPACK libraries on Windows, see macOS ----- -You will need to install a BLAS/LAPACK library. We recommend using OpenBLAS or -Intel MKL. Apple's Accelerate also still works, however it has bugs and we are -likely to drop support for it in the near future. +On macOS >= 13.3, you can use Apple's Accelerate library. On older macOS versions, +Accelerate has bugs and we recommend using OpenBLAS or (on x86-64) Intel MKL. Ubuntu/Debian ------------- diff --git a/benchmarks/README.rst b/benchmarks/README.rst index ef841a81872d..e44f8fe02f1e 100644 --- a/benchmarks/README.rst +++ b/benchmarks/README.rst @@ -11,9 +11,7 @@ Usage ----- Airspeed Velocity manages building and Python virtualenvs by itself, -unless told otherwise. Some of the benchmarking features in -``runtests.py`` also tell ASV to use the NumPy compiled by -``runtests.py``. To run the benchmarks, you do not need to install a +unless told otherwise. To run the benchmarks, you do not need to install a development version of NumPy to your current Python environment. Before beginning, ensure that *airspeed velocity* is installed. @@ -28,10 +26,9 @@ submitting a pull request. To run all benchmarks, navigate to the root NumPy directory at the command line and execute:: - python runtests.py --bench + spin bench -where ``--bench`` activates the benchmark suite instead of the -test suite. This builds NumPy and runs all available benchmarks +This builds NumPy and runs all available benchmarks defined in ``benchmarks/``. (Note: this could take a while. Each benchmark is run multiple times to measure the distribution in execution times.) @@ -49,18 +46,19 @@ and `--quick` is used to avoid repetitions. To run benchmarks from a particular benchmark module, such as ``bench_core.py``, simply append the filename without the extension:: - python runtests.py --bench bench_core + spin bench -t bench_core -To run a benchmark defined in a class, such as ``Mandelbrot`` -from ``bench_avx.py``:: +To run a benchmark defined in a class, such as ``MeshGrid`` +from ``bench_creation.py``:: - python runtests.py --bench bench_avx.Mandelbrot + spin bench -t bench_creation.MeshGrid -Compare change in benchmark results to another version/commit/branch:: +Compare changes in benchmark results to another version/commit/branch, use the +``--compare`` option (or the equivalent ``-c``):: - python runtests.py --bench-compare v1.6.2 bench_core - python runtests.py --bench-compare 8bf4e9b bench_core - python runtests.py --bench-compare main bench_core + spin bench --compare v1.6.2 -t bench_core + spin bench --compare 20d03bcfd -t bench_core + spin bench -c main -t bench_core All of the commands above display the results in plain text in the console, and the results are not saved for comparison with diff --git a/building_with_meson.md b/building_with_meson.md index 59269877176e..2319c0c748cc 100644 --- a/building_with_meson.md +++ b/building_with_meson.md @@ -37,12 +37,9 @@ pytest --pyargs numpy ### pip install -Note that `pip` will use the default build system, which is (as of now) still -`numpy.distutils`. In order to switch that default to Meson, uncomment the -`build-backend = "mesonpy"` line at the top of `pyproject.toml`. - -After that is done, `pip install .` or `pip install --no-build-isolation .` -will work as expected. As does building an sdist or wheel with `python -m build`, +Note that `pip` will use the default build system, which is now Meson. +Commands such as `pip install .` or `pip install --no-build-isolation .` +will work as expected, as does building an sdist or wheel with `python -m build`, or `pip install -e . --no-build-isolation` for an editable install. For a more complete developer experience than editable installs, consider using `spin` instead though (see above). diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 855181c9f3a7..f064b3405b34 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -122,7 +122,7 @@ repository:: Sanity check:: - $ python3 runtests.py -m "full" + $ python3 -m spin test -m full Tag the release and push the tag. This requires write permission for the numpy repository:: diff --git a/doc/TESTS.rst b/doc/TESTS.rst index 9c5e8571ff64..c2a697b581f5 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -63,14 +63,14 @@ example, the ``core`` module, use the following:: Running tests from the command line ----------------------------------- -If you want to build NumPy in order to work on NumPy itself, use -``runtests.py``.To run NumPy's full test suite:: +If you want to build NumPy in order to work on NumPy itself, use the ``spin`` +utility. To run NumPy's full test suite:: - $ python runtests.py + $ spin test -m full Testing a subset of NumPy:: - $python runtests.py -t numpy/core/tests + $ spin test -t numpy/core/tests For detailed info on testing, see :ref:`testing-builds` diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 8c3227083cb2..a1bce4f9d194 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -38,6 +38,34 @@ of this chapter we assume that you have set up your git repo as described in relevant parts of the NumPy documentation to build, test, develop, write docs, and contribute to NumPy. +Using virtual environments +-------------------------- + +A frequently asked question is "How do I set up a development version of NumPy +in parallel to a released version that I use to do my job/research?". + +One simple way to achieve this is to install the released version in +site-packages, by using pip or conda for example, and set +up the development version in a virtual environment. + +If you use conda, we recommend creating a separate virtual environment for +numpy development using the ``environment.yml`` file in the root of the repo +(this will create the environment and install all development dependencies at +once):: + + $ conda env create -f environment.yml # `mamba` works too for this command + $ conda activate numpy-dev + +If you installed Python some other way than conda, first install +`virtualenv`_ (optionally use `virtualenvwrapper`_), then create your +virtualenv (named ``numpy-dev`` here) with:: + + $ virtualenv numpy-dev + +Now, whenever you want to switch to the virtual environment, you can use the +command ``source numpy-dev/bin/activate``, and ``deactivate`` to exit from the +virtual environment and back to your previous shell. + .. _testing-builds: @@ -47,153 +75,65 @@ Testing builds Before running the tests, first install the test dependencies:: $ python -m pip install -r test_requirements.txt + $ python -m pip install asv # only for running benchmarks To build the development version of NumPy and run tests, spawn -interactive shells with the Python import paths properly set up etc., -do one of:: +interactive shells with the Python import paths properly set up etc., use the +`spin `_ utility. To run tests, do +one of:: - $ python runtests.py -v - $ python runtests.py -v -s random - $ python runtests.py -v -t numpy/core/tests/test_nditer.py::test_iter_c_order - $ python runtests.py --ipython - $ python runtests.py --python somescript.py - $ python runtests.py --bench - $ python runtests.py -g -m full + $ spin test -v + $ spin test numpy/random # to run the tests in a specific module + $ spin test -v -t numpy/core/tests/test_nditer.py::test_iter_c_order -This builds NumPy first, so the first time it may take a few minutes. If -you specify ``-n``, the tests are run against the version of NumPy (if -any) found on current PYTHONPATH. +This builds NumPy first, so the first time it may take a few minutes. + +You can also use ``spin bench`` for benchmarking. See ``spin --help`` for more +command line options. .. note:: If the above commands result in ``RuntimeError: Cannot parse version 0+untagged.xxxxx``, run ``git pull upstream main --tags``. -When specifying a target using ``-s``, ``-t``, or ``--python``, additional -arguments may be forwarded to the target embedded by ``runtests.py`` by passing -the extra arguments after a bare ``--``. For example, to run a test method with -the ``--pdb`` flag forwarded to the target, run the following:: +Additional arguments may be forwarded to ``pytest`` by passing the extra +arguments after a bare ``--``. For example, to run a test method with the +``--pdb`` flag forwarded to the target, run the following:: - $ python runtests.py -t numpy/tests/test_scripts.py::test_f2py -- --pdb + $ spin test -t numpy/tests/test_scripts.py::test_f2py -- --pdb -When using pytest as a target (the default), you can -`match test names using python operators`_ by passing the ``-k`` argument to pytest:: +You can also `match test names using python operators`_ by passing the ``-k`` +argument to pytest:: - $ python runtests.py -v -t numpy/core/tests/test_multiarray.py -- -k "MatMul and not vector" + $ spin test -v -t numpy/core/tests/test_multiarray.py -- -k "MatMul and not vector" .. note:: Remember that all tests of NumPy should pass before committing your changes. -Using ``runtests.py`` is the recommended approach to running tests. -There are also a number of alternatives to it, for example in-place -build or installing to a virtualenv or a conda environment. See the FAQ below -for details. - .. note:: Some of the tests in the test suite require a large amount of memory, and are skipped if your system does not have enough. +.. To override the automatic detection of available memory, set the environment variable ``NPY_AVAILABLE_MEM``, for example ``NPY_AVAILABLE_MEM=32GB``, or using pytest ``--available-memory=32GB`` target option. - -Building in-place ------------------ - -For development, you can set up an in-place build so that changes made to -``.py`` files have effect without rebuild. First, run:: - - $ python setup.py build_ext -i - -This allows you to import the in-place built NumPy *from the repo base -directory only*. If you want the in-place build to be visible outside that -base dir, you need to point your ``PYTHONPATH`` environment variable to this -directory. Some IDEs (`Spyder`_ for example) have utilities to manage -``PYTHONPATH``. On Linux and OSX, you can run the command:: - - $ export PYTHONPATH=$PWD - -and on Windows:: - - $ set PYTHONPATH=/path/to/numpy - -Now editing a Python source file in NumPy allows you to immediately -test and use your changes (in ``.py`` files), by simply restarting the -interpreter. - -Note that another way to do an inplace build visible outside the repo base dir -is with ``python setup.py develop``. Instead of adjusting ``PYTHONPATH``, this -installs a ``.egg-link`` file into your site-packages as well as adjusts the -``easy-install.pth`` there, so its a more permanent (and magical) operation. - - -.. _Spyder: https://www.spyder-ide.org/ - Other build options ------------------- -Build options can be discovered by running any of:: - - $ python setup.py --help - $ python setup.py --help-commands - -It's possible to do a parallel build with ``numpy.distutils`` with the ``-j`` option; -see :ref:`parallel-builds` for more details. - -A similar approach to in-place builds and use of ``PYTHONPATH`` but outside the -source tree is to use:: - - $ pip install . --prefix /some/owned/folder - $ export PYTHONPATH=/some/owned/folder/lib/python3.4/site-packages - - -NumPy uses a series of tests to probe the compiler and libc libraries for -functions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files -using ``HAVE_XXX`` definitions. These tests are run during the ``build_src`` -phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and -``generate_numpyconfig_h`` functions. Since the output of these calls includes -many compiler warnings and errors, by default it is run quietly. If you wish -to see this output, you can run the ``build_src`` stage verbosely:: - - $ python build build_src -v - -Using virtual environments --------------------------- - -A frequently asked question is "How do I set up a development version of NumPy -in parallel to a released version that I use to do my job/research?". - -One simple way to achieve this is to install the released version in -site-packages, by using pip or conda for example, and set -up the development version in a virtual environment. - -If you use conda, we recommend creating a separate virtual environment for -numpy development using the ``environment.yml`` file in the root of the repo -(this will create the environment and install all development dependencies at -once):: - - $ conda env create -f environment.yml # `mamba` works too for this command - $ conda activate numpy-dev - -If you installed Python some other way than conda, first install -`virtualenv`_ (optionally use `virtualenvwrapper`_), then create your -virtualenv (named ``numpy-dev`` here) with:: - - $ virtualenv numpy-dev - -Now, whenever you want to switch to the virtual environment, you can use the -command ``source numpy-dev/bin/activate``, and ``deactivate`` to exit from the -virtual environment and back to your previous shell. +For more options including selecting compilers, setting custom compiler flags +and controlling parallelism, see :doc:`scipy:building/compilers_and_options` +(from the SciPy documentation.) Running tests ------------- -Besides using ``runtests.py``, there are various ways to run the tests. Inside +Besides using ``spin``, there are various ways to run the tests. Inside the interpreter, tests can be run like this:: >>> np.test() # doctest: +SKIPBLOCK @@ -208,7 +148,7 @@ Or a similar way from the command line:: $ python -c "import numpy as np; np.test()" Tests can also be run with ``pytest numpy``, however then the NumPy-specific -plugin is not found which causes strange side effects +plugin is not found which causes strange side effects. Running individual test files can be useful; it's much faster than running the whole test suite or that of a whole module (example: ``np.random.test()``). @@ -224,10 +164,10 @@ run the test suite with Python 3.9, use:: $ tox -e py39 -For more extensive information, see :ref:`testing-guidelines` +For more extensive information, see :ref:`testing-guidelines`. -*Note: do not run the tests from the root directory of your numpy git repo without ``runtests.py``, -that will result in strange test errors.* +Note: do not run the tests from the root directory of your numpy git repo without ``spin``, +that will result in strange test errors. Running Linting --------------- @@ -239,15 +179,16 @@ Install all dependent packages using pip:: To run lint checks before committing new code, run:: - $ python runtests.py --lint uncommitted + $ python tools/linter.py To check all changes in newly added Python code of current branch with target branch, run:: - $ python runtests.py --lint main + $ python tools/linter.py --branch main -If there are no errors, the script exits with no message. In case of errors:: +If there are no errors, the script exits with no message. In case of errors, +check the error message for details:: - $ python runtests.py --lint main + $ python tools/linter.py --branch main ./numpy/core/tests/test_scalarmath.py:34:5: E303 too many blank lines (3) 1 E303 too many blank lines (3) @@ -256,8 +197,8 @@ since the linter runs as part of the CI pipeline. For more details on Style Guidelines: - - `Python Style Guide`_ - - `C Style Guide`_ +- `Python Style Guide`_ +- `C Style Guide`_ Rebuilding & cleaning the workspace ----------------------------------- @@ -306,7 +247,7 @@ you want to debug. For instance ``mytest.py``:: Now, you can run:: - $ gdb --args python runtests.py -g --python mytest.py + $ spin gdb mytest.py And then in the debugger:: @@ -337,10 +278,6 @@ needs a ``.gdbinit`` file with the following contents: add-auto-load-safe-path ~/.pyenv -Instead of plain ``gdb`` you can of course use your favourite -alternative debugger; run it on the python binary with arguments -``runtests.py -g --python mytest.py``. - Building NumPy with a Python built with debug support (on Linux distributions typically packaged as ``python-dbg``) is highly recommended. diff --git a/doc/source/dev/howto_build_docs.rst b/doc/source/dev/howto_build_docs.rst index b3d2e3055a28..0af9bd7132fc 100644 --- a/doc/source/dev/howto_build_docs.rst +++ b/doc/source/dev/howto_build_docs.rst @@ -7,7 +7,7 @@ Building the NumPy API and reference docs If you only want to get the documentation, note that pre-built versions can be found at - https://numpy.org/doc/ +https://numpy.org/doc/ in several different formats. @@ -89,37 +89,11 @@ Instructions Now you are ready to generate the docs, so write:: - cd doc - make html + spin docs -If all goes well, this will generate a -``build/html`` subdirectory in the ``/doc`` directory, containing the built documentation. If -you get a message about ``installed numpy != current repo git version``, you must -either override the check by setting ``GITVER`` or re-install NumPy. - -If you have built NumPy into a virtual environment and get an error -that says ``numpy not found, cannot build documentation without...``, -you need to override the makefile ``PYTHON`` variable at the command -line, so instead of writing ``make html`` write:: - - make PYTHON=python html - -To build the PDF documentation, do instead:: - - make latex - make -C build/latex all-pdf - -You will need to have LaTeX_ installed for this, inclusive of support for -Greek letters. For example, on Ubuntu xenial ``texlive-lang-greek`` and -``cm-super`` are needed. Also, ``latexmk`` is needed on non-Windows systems. - -Instead of the above, you can also do:: - - make dist - -which will rebuild NumPy, install it to a temporary location, and -build the documentation in all formats. This will most likely again -only work on Unix platforms. +This will build NumPy from source if you haven't already, and run Sphinx to +build the ``html`` docs. If all goes well, this will generate a ``build/html`` +subdirectory in the ``/doc`` directory, containing the built documentation. The documentation for NumPy distributed at https://numpy.org/doc in html and pdf format is also built with ``make dist``. See `HOWTO RELEASE`_ for details diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index b4479fa0d925..3ecaff91d3a6 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -219,13 +219,12 @@ be installed with:: Tests for a module should ideally cover all code in that module, i.e., statement coverage should be at 100%. -To measure the test coverage, install -`pytest-cov `__ -and then run:: +To measure the test coverage, run:: - $ python runtests.py --coverage + $ spin test --coverage -This will create a report in ``build/coverage``, which can be viewed with:: +This will create a report in ``html`` format at ``build/coverage``, which can be +viewed with your browser, e.g.:: $ firefox build/coverage/index.html @@ -234,10 +233,12 @@ This will create a report in ``build/coverage``, which can be viewed with:: Building docs ------------- -To build docs, run ``make`` from the ``doc`` directory. ``make help`` lists -all targets. For example, to build the HTML documentation, you can run:: +To build the HTML documentation, use:: - make html + spin docs + +You can also run ``make`` from the ``doc`` directory. ``make help`` lists +all targets. To get the appropriate dependencies and other requirements, see :ref:`howto-build-docs`. diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index 2ee310b42c79..c3fa350a74e4 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -50,11 +50,8 @@ Building NumPy requires the following software installed: them and use them for building. A number of different LAPACK library setups can be used, including optimized LAPACK libraries such as OpenBLAS or MKL. The choice and location of these libraries as well as include paths and - other such build options can be specified in a ``site.cfg`` file located in - the NumPy root repository or a ``.numpy-site.cfg`` file in your home - directory. See the ``site.cfg.example`` example file included in the NumPy - repository or sdist for documentation, and below for specifying search - priority from environmental variables. + other such build options can be specified in a ``.pc`` file, as documented in + :ref:`scipy:using-pkg-config-to-detect-libraries-in-a-nonstandard-location`. 4) Cython @@ -64,20 +61,51 @@ Building NumPy requires the following software installed: Clone the repository following the instructions in :doc:`/dev/index`. -Basic Installation +.. note:: + + Starting on version 1.26, NumPy will adopt Meson as its build system (see + :ref:`distutils-status-migration` and + :doc:`scipy:building/understanding_meson` for more details.) + +Basic installation ------------------ -To install NumPy, run:: +To build and install NumPy from a local copy of the source code, run:: pip install . +This will install all build dependencies and use Meson to compile and install +the NumPy C-extensions and Python modules. If you need more control of build +options and commands, see the following sections. + To perform an in-place build that can be run from the source folder run:: - python setup.py build_ext --inplace + pip install -r build_requirements.txt + pip install -e . --no-build-isolation *Note: for build instructions to do development work on NumPy itself, see* :ref:`development-environment`. + +Advanced building with Meson +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Meson supports the standard environment variables ``CC``, ``CXX`` and ``FC`` to +select specific C, C++ and/or Fortran compilers. These environment variables are +documented in `the reference tables in the Meson docs +`_. + +Note that environment variables only get applied from a clean build, because +they affect the configure stage (i.e., meson setup). An incremental rebuild does +not react to changes in environment variables - you have to run +``git clean -xdf`` and do a full rebuild, or run ``meson setup --reconfigure``. + +For more options including selecting compilers, setting custom compiler flags +and controlling parallelism, see :doc:`scipy:building/compilers_and_options` +(from the SciPy documentation) and `the Meson FAQ +`_. + + Testing ------- @@ -87,53 +115,15 @@ all tests pass. The test suite requires additional dependencies, which can easily be installed with:: - $ python -m pip install -r test_requirements.txt + python -m pip install -r test_requirements.txt -Run tests:: +Run the full test suite with:: - $ python runtests.py -v -m full + cd .. # avoid picking up the source tree + pytest --pyargs numpy For detailed info on testing, see :ref:`testing-builds`. -.. _parallel-builds: - -Parallel builds -~~~~~~~~~~~~~~~ - -It's possible to do a parallel build with:: - - python setup.py build -j 4 install --prefix $HOME/.local - -This will compile numpy on 4 CPUs and install it into the specified prefix. -to perform a parallel in-place build, run:: - - python setup.py build_ext --inplace -j 4 - -The number of build jobs can also be specified via the environment variable -``NPY_NUM_BUILD_JOBS``. - -Choosing the fortran compiler -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Compilers are auto-detected; building with a particular compiler can be done -with ``--fcompiler``. E.g. to select gfortran:: - - python setup.py build --fcompiler=gnu95 - -For more information see:: - - python setup.py build --help-fcompiler - -How to check the ABI of BLAS/LAPACK libraries -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One relatively simple and reliable way to check for the compiler used to -build a library is to use ldd on the library. If libg2c.so is a dependency, -this means that g77 has been used (note: g77 is no longer supported for -building NumPy). If libgfortran.so is a dependency, gfortran has been used. -If both are dependencies, this means both have been used, which is almost -always a very bad idea. - .. _accelerated-blas-lapack-libraries: Accelerated BLAS/LAPACK libraries @@ -141,243 +131,14 @@ Accelerated BLAS/LAPACK libraries NumPy searches for optimized linear algebra libraries such as BLAS and LAPACK. There are specific orders for searching these libraries, as described below and -in the ``site.cfg.example`` file. - -BLAS -~~~~ - -Note that both BLAS and CBLAS interfaces are needed for a properly -optimized build of NumPy. - -The default order for the libraries are: - -1. MKL -2. BLIS -3. OpenBLAS -4. ATLAS -5. BLAS (NetLIB) - -The detection of BLAS libraries may be bypassed by defining the environment -variable ``NPY_BLAS_LIBS`` , which should contain the exact linker flags you -want to use (interface is assumed to be Fortran 77). Also define -``NPY_CBLAS_LIBS`` (even empty if CBLAS is contained in your BLAS library) to -trigger use of CBLAS and avoid slow fallback code for matrix calculations. - -If you wish to build against OpenBLAS but you also have BLIS available one -may predefine the order of searching via the environment variable -``NPY_BLAS_ORDER`` which is a comma-separated list of the above names which -is used to determine what to search for, for instance:: - - NPY_BLAS_ORDER=ATLAS,blis,openblas,MKL python setup.py build - -will prefer to use ATLAS, then BLIS, then OpenBLAS and as a last resort MKL. -If neither of these exists the build will fail (names are compared -lower case). - -Alternatively one may use ``!`` or ``^`` to negate all items:: - - NPY_BLAS_ORDER='^blas,atlas' python setup.py build - -will allow using anything **but** NetLIB BLAS and ATLAS libraries, the order -of the above list is retained. - -One cannot mix negation and positives, nor have multiple negations, such -cases will raise an error. - -LAPACK -~~~~~~ - -The default order for the libraries are: - -1. MKL -2. OpenBLAS -3. libFLAME -4. ATLAS -5. LAPACK (NetLIB) - -The detection of LAPACK libraries may be bypassed by defining the environment -variable ``NPY_LAPACK_LIBS``, which should contain the exact linker flags you -want to use (language is assumed to be Fortran 77). - -If you wish to build against OpenBLAS but you also have MKL available one -may predefine the order of searching via the environment variable -``NPY_LAPACK_ORDER`` which is a comma-separated list of the above names, -for instance:: - - NPY_LAPACK_ORDER=ATLAS,openblas,MKL python setup.py build - -will prefer to use ATLAS, then OpenBLAS and as a last resort MKL. -If neither of these exists the build will fail (names are compared -lower case). - -Alternatively one may use ``!`` or ``^`` to negate all items:: - - NPY_LAPACK_ORDER='^lapack' python setup.py build - -will allow using anything **but** the NetLIB LAPACK library, the order of -the above list is retained. - -One cannot mix negation and positives, nor have multiple negations, such -cases will raise an error. - -.. deprecated:: 1.20 - The native libraries on macOS, provided by Accelerate, are not fit for use - in NumPy since they have bugs that cause wrong output under easily - reproducible conditions. If the vendor fixes those bugs, the library could - be reinstated, but until then users compiling for themselves should use - another linear algebra library or use the built-in (but slower) default, - see the next section. - - -Disabling ATLAS and other accelerated libraries -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Usage of ATLAS and other accelerated libraries in NumPy can be disabled -via:: - - NPY_BLAS_ORDER= NPY_LAPACK_ORDER= python setup.py build - -or:: - - BLAS=None LAPACK=None ATLAS=None python setup.py build - - -64-bit BLAS and LAPACK -~~~~~~~~~~~~~~~~~~~~~~ - -You can tell Numpy to use 64-bit BLAS/LAPACK libraries by setting the -environment variable:: - - NPY_USE_BLAS_ILP64=1 - -when building Numpy. The following 64-bit BLAS/LAPACK libraries are -supported: - -1. OpenBLAS ILP64 with ``64_`` symbol suffix (``openblas64_``) -2. OpenBLAS ILP64 without symbol suffix (``openblas_ilp64``) - -The order in which they are preferred is determined by -``NPY_BLAS_ILP64_ORDER`` and ``NPY_LAPACK_ILP64_ORDER`` environment -variables. The default value is ``openblas64_,openblas_ilp64``. - -.. note:: - - Using non-symbol-suffixed 64-bit BLAS/LAPACK in a program that also - uses 32-bit BLAS/LAPACK can cause crashes under certain conditions - (e.g. with embedded Python interpreters on Linux). - - The 64-bit OpenBLAS with ``64_`` symbol suffix is obtained by - compiling OpenBLAS with settings:: - - make INTERFACE64=1 SYMBOLSUFFIX=64_ - - The symbol suffix avoids the symbol name clashes between 32-bit and - 64-bit BLAS/LAPACK libraries. - - -Supplying additional compiler flags ------------------------------------ - -Additional compiler flags can be supplied by setting the ``OPT``, -``FOPT`` (for Fortran), and ``CC`` environment variables. -When providing options that should improve the performance of the code -ensure that you also set ``-DNDEBUG`` so that debugging code is not -executed. +in the +`meson_options.txt `_ +file. Cross compilation ----------------- -Although ``numpy.distutils`` and ``setuptools`` do not directly support cross -compilation, it is possible to build NumPy on one system for different -architectures with minor modifications to the build environment. This may be -desirable, for example, to use the power of a high-performance desktop to -create a NumPy package for a low-power, single-board computer. Because the -``setup.py`` scripts are unaware of cross-compilation environments and tend to -make decisions based on the environment detected on the build system, it is -best to compile for the same type of operating system that runs on the builder. -Attempting to compile a Mac version of NumPy on Windows, for example, is likely -to be met with challenges not considered here. - -For the purpose of this discussion, the nomenclature adopted by `meson`_ will -be used: the "build" system is that which will be running the NumPy build -process, while the "host" is the platform on which the compiled package will be -run. A native Python interpreter, the setuptools and Cython packages and the -desired cross compiler must be available for the build system. In addition, a -Python interpreter and its development headers as well as any external linear -algebra libraries must be available for the host platform. For convenience, it -is assumed that all host software is available under a separate prefix -directory, here called ``$CROSS_PREFIX``. +For cross compilation instructions, see :doc:`scipy:cross_compilation` and the +`Meson documentation `_. .. _meson: https://mesonbuild.com/Cross-compilation.html#cross-compilation - -When building and installing NumPy for a host system, the ``CC`` environment -variable must provide the path the cross compiler that will be used to build -NumPy C extensions. It may also be necessary to set the ``LDSHARED`` -environment variable to the path to the linker that can link compiled objects -for the host system. The compiler must be told where it can find Python -libraries and development headers. On Unix-like systems, this generally -requires adding, *e.g.*, the following parameters to the ``CFLAGS`` environment -variable:: - - -I${CROSS_PREFIX}/usr/include - -I${CROSS_PREFIX}/usr/include/python3.y - -for Python version 3.y. (Replace the "y" in this path with the actual minor -number of the installed Python runtime.) Likewise, the linker should be told -where to find host libraries by adding a parameter to the ``LDFLAGS`` -environment variable:: - - -L${CROSS_PREFIX}/usr/lib - -To make sure Python-specific system configuration options are provided for the -intended host and not the build system, set:: - - _PYTHON_SYSCONFIGDATA_NAME=_sysconfigdata_${ARCH_TRIPLET} - -where ``${ARCH_TRIPLET}`` is an architecture-dependent suffix appropriate for -the host architecture. (This should be the name of a ``_sysconfigdata`` file, -without the ``.py`` extension, found in the host Python library directory.) - -When using external linear algebra libraries, include and library directories -should be provided for the desired libraries in ``site.cfg`` as described -above and in the comments of the ``site.cfg.example`` file included in the -NumPy repository or sdist. In this example, set:: - - include_dirs = ${CROSS_PREFIX}/usr/include - library_dirs = ${CROSS_PREFIX}/usr/lib - -under appropriate sections of the file to allow ``numpy.distutils`` to find the -libraries. - -As of NumPy 1.22.0, a vendored copy of SVML will be built on ``x86_64`` Linux -hosts to provide AVX-512 acceleration of floating-point operations. When using -an ``x86_64`` Linux build system to cross compile NumPy for hosts other than -``x86_64`` Linux, set the environment variable ``NPY_DISABLE_SVML`` to prevent -the NumPy build script from incorrectly attempting to cross-compile this -platform-specific library:: - - NPY_DISABLE_SVML=1 - -With the environment configured, NumPy may be built as it is natively:: - - python setup.py build - -When the ``wheel`` package is available, the cross-compiled package may be -packed into a wheel for installation on the host with:: - - python setup.py bdist_wheel - -It may be possible to use ``pip`` to build a wheel, but ``pip`` configures its -own environment; adapting the ``pip`` environment to cross-compilation is -beyond the scope of this guide. - -The cross-compiled package may also be installed into the host prefix for -cross-compilation of other packages using, *e.g.*, the command:: - - python setup.py install --prefix=${CROSS_PREFIX} - -When cross compiling other packages that depend on NumPy, the host -npy-pkg-config file must be made available. For further discussion, refer to -`numpy distutils documentation`_. - -.. _numpy distutils documentation: https://numpy.org/devdocs/reference/distutils.html#numpy.distutils.misc_util.Configuration.add_npy_pkg_config From 7404e37619c058117f0b520ecfebc6a9318ba244 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 1 Sep 2023 15:16:16 +0200 Subject: [PATCH 073/120] TYP: Add the missing `casting` keyword to `np.clip` --- numpy/core/fromnumeric.pyi | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index 43d1785578da..4fda86c0104a 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -21,6 +21,7 @@ from numpy import ( _PartitionKind, _SortKind, _SortSide, + _CastingKind, ) from numpy._typing import ( DTypeLike, @@ -392,7 +393,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., - extobj: list[Any] = ..., + casting: _CastingKind = ..., ) -> _SCT: ... @overload def clip( @@ -406,7 +407,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., - extobj: list[Any] = ..., + casting: _CastingKind = ..., ) -> Any: ... @overload def clip( @@ -420,7 +421,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., - extobj: list[Any] = ..., + casting: _CastingKind = ..., ) -> NDArray[_SCT]: ... @overload def clip( @@ -434,7 +435,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., - extobj: list[Any] = ..., + casting: _CastingKind = ..., ) -> NDArray[Any]: ... @overload def clip( @@ -448,7 +449,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., - extobj: list[Any] = ..., + casting: _CastingKind = ..., ) -> Any: ... @overload def clip( @@ -462,7 +463,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., - extobj: list[Any] = ..., + casting: _CastingKind = ..., ) -> _ArrayType: ... @overload From ed0ba4da66b99244978ac1142cd49df44eeda3cf Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 31 Aug 2023 12:12:50 +0300 Subject: [PATCH 074/120] TST: convert cython test from setup.py to meson (#24206) The limited-api test has to wait for a new Meson version (see gh-24206). This converts the regular Cython test for `numpy.core`. [skip ci] --- numpy/core/tests/examples/cython/meson.build | 27 ++++++++++ numpy/core/tests/test_cython.py | 55 ++++++++------------ 2 files changed, 48 insertions(+), 34 deletions(-) create mode 100644 numpy/core/tests/examples/cython/meson.build diff --git a/numpy/core/tests/examples/cython/meson.build b/numpy/core/tests/examples/cython/meson.build new file mode 100644 index 000000000000..12fc640b88b4 --- /dev/null +++ b/numpy/core/tests/examples/cython/meson.build @@ -0,0 +1,27 @@ +project('checks', 'c', 'cython') + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +if not cy.version().version_compare('>=0.29.35') + error('tests requires Cython >= 0.29.35') +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +py.extension_module( + 'checks', + 'checks.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', # Cython still uses old NumPy C API + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + ], + include_directories: [npy_include_path], +) diff --git a/numpy/core/tests/test_cython.py b/numpy/core/tests/test_cython.py index e916adceb114..29473f5ba424 100644 --- a/numpy/core/tests/test_cython.py +++ b/numpy/core/tests/test_cython.py @@ -29,44 +29,31 @@ @pytest.fixture -def install_temp(request, tmp_path): +def install_temp(tmp_path): # Based in part on test_cython from random.tests.test_extending if IS_WASM: pytest.skip("No subprocess") - here = os.path.dirname(__file__) - ext_dir = os.path.join(here, "examples", "cython") - - cytest = str(tmp_path / "cytest") - - shutil.copytree(ext_dir, cytest) - # build the examples and "install" them into a temporary directory - - install_log = str(tmp_path / "tmp_install_log.txt") - subprocess.check_output( - [ - sys.executable, - "setup.py", - "build", - "install", - "--prefix", str(tmp_path / "installdir"), - "--single-version-externally-managed", - "--record", - install_log, - ], - cwd=cytest, - ) - - # In order to import the built module, we need its path to sys.path - # so parse that out of the record - with open(install_log) as fid: - for line in fid: - if "checks" in line: - sys.path.append(os.path.dirname(line)) - break - else: - raise RuntimeError(f'could not parse "{install_log}"') - + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython') + build_dir = tmp_path / "build" + os.makedirs(build_dir, exist_ok=True) + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", str(srcdir)], + cwd=build_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + + sys.path.append(str(build_dir)) def test_is_timedelta64_object(install_temp): import checks From cd3bb38de960dc0f5c27217cc08ca0851d58b3dd Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 1 Sep 2023 16:20:55 -0600 Subject: [PATCH 075/120] MAINT: Fixup ``fromnumeric.pyi`` The line ``` extobj: list[Any] = ..., ``` was cut when backporting #24611. We want to keep it in 1.26.x. --- numpy/core/fromnumeric.pyi | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi index 4fda86c0104a..5438b2700bd5 100644 --- a/numpy/core/fromnumeric.pyi +++ b/numpy/core/fromnumeric.pyi @@ -393,6 +393,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., casting: _CastingKind = ..., ) -> _SCT: ... @overload @@ -407,6 +408,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., casting: _CastingKind = ..., ) -> Any: ... @overload @@ -421,6 +423,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., casting: _CastingKind = ..., ) -> NDArray[_SCT]: ... @overload @@ -435,6 +438,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., casting: _CastingKind = ..., ) -> NDArray[Any]: ... @overload @@ -449,6 +453,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., casting: _CastingKind = ..., ) -> Any: ... @overload @@ -463,6 +468,7 @@ def clip( order: _OrderKACF = ..., subok: bool = ..., signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., casting: _CastingKind = ..., ) -> _ArrayType: ... From 091e89510200a84c16799154bdf79a0b9694c362 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 12:39:34 +0000 Subject: [PATCH 076/120] MAINT: fix ISO_C type maps in f2py --- numpy/f2py/auxfuncs.py | 26 +++++++++++++++++++++++- numpy/f2py/capi_maps.py | 44 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 3f9b0ceafa21..db89f949ff6a 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -18,6 +18,7 @@ import sys import types from functools import reduce +from copy import deepcopy from . import __version__ from . import cfuncs @@ -47,7 +48,8 @@ 'isunsigned_chararray', 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value' + 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', + 'deep_merge' ] @@ -888,3 +890,25 @@ def applyrules(rules, d, var={}): if ret[k] == []: del ret[k] return ret + +def deep_merge(dict1, dict2): + """Deep merge two dictionaries and return a new dictionary. + + Parameters: + - dict1: The base dictionary. + - dict2: The dictionary to merge into a copy of dict1. + If a key exists in both, the dict2 value will take precedence. + + Returns: + - A new merged dictionary. + """ + merged_dict = deepcopy(dict1) + for key, value in dict2.items(): + if key in merged_dict: + if isinstance(merged_dict[key], dict) and isinstance(value, dict): + merged_dict[key] = deep_merge(merged_dict[key], value) + else: + merged_dict[key] = value + else: + merged_dict[key] = value + return merged_dict diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index c7efe87e82ba..c14febeebeb4 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -130,7 +130,49 @@ 'byte': {'': 'char'}, } -f2cmap_default = copy.deepcopy(f2cmap_all) +iso_c_binding_map = { + 'integer': { + 'c_int': 'int', + 'c_short': 'short int', + 'c_long': 'long int', + 'c_long_long': 'long long int', + 'c_signed_char': 'signed char', + 'c_size_t': 'size_t', + 'c_int8_t': 'int8_t', + 'c_int16_t': 'int16_t', + 'c_int32_t': 'int32_t', + 'c_int64_t': 'int64_t', + 'c_int_least8_t': 'int_least8_t', + 'c_int_least16_t': 'int_least16_t', + 'c_int_least32_t': 'int_least32_t', + 'c_int_least64_t': 'int_least64_t', + 'c_int_fast8_t': 'int_fast8_t', + 'c_int_fast16_t': 'int_fast16_t', + 'c_int_fast32_t': 'int_fast32_t', + 'c_int_fast64_t': 'int_fast64_t', + 'c_intmax_t': 'intmax_t', + 'c_intptr_t': 'intptr_t', + 'c_ptrdiff_t': 'intptr_t', + }, + 'real': { + 'c_float': 'float', + 'c_double': 'double', + 'c_long_double': 'long double' + }, + 'complex': { + 'c_float_complex': 'float _Complex', + 'c_double_complex': 'double _Complex', + 'c_long_double_complex': 'long double _Complex' + }, + 'logical': { + 'c_bool': '_Bool' + }, + 'character': { + 'c_char': 'char' + } +} + +f2cmap_default = deep_merge(f2cmap_all, iso_c_binding_map) f2cmap_mapped = [] From d3cd40938aa63d6844576045e6c5f88385b9dde8 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 12:44:16 +0000 Subject: [PATCH 077/120] TST: Add a test for gh-24553 --- numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 | 10 ++++++++++ numpy/f2py/tests/test_f2cmap.py | 11 +++++++++++ 2 files changed, 21 insertions(+) create mode 100644 numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 diff --git a/numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 b/numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 new file mode 100644 index 000000000000..69aaf8342258 --- /dev/null +++ b/numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 @@ -0,0 +1,10 @@ + module coddity + use iso_c_binding, only: c_double + implicit none + contains + subroutine c_add(a, b, c) bind(c, name="c_add") + real(c_double), intent(in) :: a, b + real(c_double), intent(out) :: c + c = a + b + end subroutine c_add + end module coddity diff --git a/numpy/f2py/tests/test_f2cmap.py b/numpy/f2py/tests/test_f2cmap.py index d2967e4f73d7..5a34e7d4d1e3 100644 --- a/numpy/f2py/tests/test_f2cmap.py +++ b/numpy/f2py/tests/test_f2cmap.py @@ -13,3 +13,14 @@ def test_long_long_map(self): out = self.module.func1(inp) exp_out = 3 assert out == exp_out + +class TestISOCmap(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "f2cmap", "iso_c_oddity.f90"), + ] + + # gh-24553 + def test_c_double(self): + out = self.module.coddity.c_add(1, 2) + exp_out = 3 + assert out == exp_out From a765be17fe24afe482e34ae36a9d0241b503a99c Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 13:06:53 +0000 Subject: [PATCH 078/120] DOC: Add a news fragment for iso_c_binding [f2py] --- doc/release/upcoming_changes/24555.improvement.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/24555.improvement.rst diff --git a/doc/release/upcoming_changes/24555.improvement.rst b/doc/release/upcoming_changes/24555.improvement.rst new file mode 100644 index 000000000000..65fae76088ee --- /dev/null +++ b/doc/release/upcoming_changes/24555.improvement.rst @@ -0,0 +1,5 @@ +``iso_c_binding`` support for ``f2py`` +-------------------------------------- +Previously, users would have to define their own custom ``f2cmap`` file to use +type mappings defined by the Fortran2003 ``iso_c_binding`` intrinsic module. +These type maps are now natively supported by ``f2py`` From 319abdb6f4a06508d3ca7cc9203e1ed92cf0cfbc Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 13:26:37 +0000 Subject: [PATCH 079/120] MAINT: Rework to keep older logic --- numpy/f2py/capi_maps.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index c14febeebeb4..f168e9bb0cba 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -172,7 +172,8 @@ } } -f2cmap_default = deep_merge(f2cmap_all, iso_c_binding_map) +f2cmap_all = deep_merge(f2cmap_all, iso_c_binding_map) +f2cmap_default = copy.deepcopy(f2cmap_all) f2cmap_mapped = [] From 37cb4757d1e307a295538886cb269ec9238a48d1 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 14:17:09 +0000 Subject: [PATCH 080/120] MAINT: Move into private module --- numpy/f2py/_isocbind.py | 41 +++++++++++++++++++++++++++++++++++++++ numpy/f2py/capi_maps.py | 43 +---------------------------------------- 2 files changed, 42 insertions(+), 42 deletions(-) create mode 100644 numpy/f2py/_isocbind.py diff --git a/numpy/f2py/_isocbind.py b/numpy/f2py/_isocbind.py new file mode 100644 index 000000000000..3acf8c714f91 --- /dev/null +++ b/numpy/f2py/_isocbind.py @@ -0,0 +1,41 @@ +iso_c_binding_map = { + 'integer': { + 'c_int': 'int', + 'c_short': 'short int', + 'c_long': 'long int', + 'c_long_long': 'long long int', + 'c_signed_char': 'signed char', + 'c_size_t': 'size_t', + 'c_int8_t': 'int8_t', + 'c_int16_t': 'int16_t', + 'c_int32_t': 'int32_t', + 'c_int64_t': 'int64_t', + 'c_int_least8_t': 'int_least8_t', + 'c_int_least16_t': 'int_least16_t', + 'c_int_least32_t': 'int_least32_t', + 'c_int_least64_t': 'int_least64_t', + 'c_int_fast8_t': 'int_fast8_t', + 'c_int_fast16_t': 'int_fast16_t', + 'c_int_fast32_t': 'int_fast32_t', + 'c_int_fast64_t': 'int_fast64_t', + 'c_intmax_t': 'intmax_t', + 'c_intptr_t': 'intptr_t', + 'c_ptrdiff_t': 'intptr_t', + }, + 'real': { + 'c_float': 'float', + 'c_double': 'double', + 'c_long_double': 'long double' + }, + 'complex': { + 'c_float_complex': 'float _Complex', + 'c_double_complex': 'double _Complex', + 'c_long_double_complex': 'long double _Complex' + }, + 'logical': { + 'c_bool': '_Bool' + }, + 'character': { + 'c_char': 'char' + } +} diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index f168e9bb0cba..32b6db5c5935 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -19,6 +19,7 @@ import os from .crackfortran import markoutercomma from . import cb_rules +from ._isocbind import iso_c_binding_map # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the @@ -130,48 +131,6 @@ 'byte': {'': 'char'}, } -iso_c_binding_map = { - 'integer': { - 'c_int': 'int', - 'c_short': 'short int', - 'c_long': 'long int', - 'c_long_long': 'long long int', - 'c_signed_char': 'signed char', - 'c_size_t': 'size_t', - 'c_int8_t': 'int8_t', - 'c_int16_t': 'int16_t', - 'c_int32_t': 'int32_t', - 'c_int64_t': 'int64_t', - 'c_int_least8_t': 'int_least8_t', - 'c_int_least16_t': 'int_least16_t', - 'c_int_least32_t': 'int_least32_t', - 'c_int_least64_t': 'int_least64_t', - 'c_int_fast8_t': 'int_fast8_t', - 'c_int_fast16_t': 'int_fast16_t', - 'c_int_fast32_t': 'int_fast32_t', - 'c_int_fast64_t': 'int_fast64_t', - 'c_intmax_t': 'intmax_t', - 'c_intptr_t': 'intptr_t', - 'c_ptrdiff_t': 'intptr_t', - }, - 'real': { - 'c_float': 'float', - 'c_double': 'double', - 'c_long_double': 'long double' - }, - 'complex': { - 'c_float_complex': 'float _Complex', - 'c_double_complex': 'double _Complex', - 'c_long_double_complex': 'long double _Complex' - }, - 'logical': { - 'c_bool': '_Bool' - }, - 'character': { - 'c_char': 'char' - } -} - f2cmap_all = deep_merge(f2cmap_all, iso_c_binding_map) f2cmap_default = copy.deepcopy(f2cmap_all) From 2d57c1bff3f914020fa49d2ed8794a995e5e9190 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 16:03:41 +0000 Subject: [PATCH 081/120] MAINT: Add a kind_map for function use --- numpy/f2py/_isocbind.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/f2py/_isocbind.py b/numpy/f2py/_isocbind.py index 3acf8c714f91..81f52fb4dece 100644 --- a/numpy/f2py/_isocbind.py +++ b/numpy/f2py/_isocbind.py @@ -39,3 +39,8 @@ 'c_char': 'char' } } + +isoc_kindmap = {} +for fortran_type, c_type_dict in iso_c_binding_map.items(): + for c_type in c_type_dict.keys(): + isoc_kindmap[c_type] = fortran_type From 6ddbacedb0599fbebeb6187de6b321c8b3340d37 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 18:15:05 +0000 Subject: [PATCH 082/120] ENH: Rework bind(c) detection Also prevents terrible name mangling / general function binding failures --- numpy/f2py/crackfortran.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 1b8672410e4d..be16314889b3 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -696,7 +696,8 @@ def _simplifyargs(argsline): return ','.join(a) crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) - +crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) def crackline(line, reset=0): """ @@ -967,12 +968,22 @@ def _resolvetypedefpattern(line): return m1.group('name'), attrs, m1.group('params') return None, [], None +def parse_name_for_bind(line): + pattern = re.compile(r'bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) + match = pattern.search(line) + bind_statement = None + if match: + bind_statement = match.group(0) + # Remove the 'bind' construct from the line. + line = line[:match.start()] + line[match.end():] + return line, bind_statement def _resolvenameargspattern(line): + line, bind_cname = parse_name_for_bind(line) line = markouterparen(line) m1 = nameargspattern.match(line) if m1: - return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') + return m1.group('name'), m1.group('args'), m1.group('result'), bind_cname m1 = operatorpattern.match(line) if m1: name = m1.group('scheme') + '(' + m1.group('name') + ')' @@ -1022,7 +1033,7 @@ def analyzeline(m, case, line): args = [] result = None else: - name, args, result, _ = _resolvenameargspattern(m.group('after')) + name, args, result, bindcline = _resolvenameargspattern(m.group('after')) if name is None: if block == 'block data': name = '_BLOCK_DATA_' @@ -1140,6 +1151,13 @@ def analyzeline(m, case, line): except Exception: pass if block in ['function', 'subroutine']: # set global attributes + # name is fortran name + bindcdat = re.search(crackline_bindlang, bindcline) + if bindcdat: + groupcache[groupcounter]['bindlang'] = {name : {}} + groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') + if bindcdat.group('lang_name'): + groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') try: groupcache[groupcounter]['vars'][name] = appenddecl( groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) @@ -1173,7 +1191,7 @@ def analyzeline(m, case, line): groupcounter = groupcounter - 1 # end interface elif case == 'entry': - name, args, result, bind = _resolvenameargspattern(m.group('after')) + name, args, result, _= _resolvenameargspattern(m.group('after')) if name is not None: if args: args = rmbadname([x.strip() From 6f2cababc5b3cda1fdf75f57bf7974ac0466c6fd Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 18:35:38 +0000 Subject: [PATCH 083/120] ENH: Add bind(c) to function subroutine bindings --- numpy/f2py/func2subr.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index cc3cdc5b4f90..f30915b0121f 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -118,6 +118,13 @@ def add(line, ret=ret): rl = None sargs = ', '.join(args) + bindline = "" + if 'bindlang' in rout and not signature: + if rout['bindlang'][fortranname]: + bindline = f"bind({rout['bindlang'][fortranname]['lang']}" + if rout['bindlang'][fortranname]['name']: + bindline += f", name='f2py_{rout['bindlang'][fortranname]['name']}'" + bindline += ")" if f90mode: # gh-23598 fix warning # Essentially, this gets called again with modules where the name of the @@ -125,12 +132,16 @@ def add(line, ret=ret): sargs = sargs.replace(f"{name}, ", '') args = [arg for arg in args if arg != name] rout['args'] = args - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) + add('subroutine f2pywrap_%s_%s (%s) %s' % + (rout['modulename'], name, sargs, bindline)) if not signature: add('use %s, only : %s' % (rout['modulename'], fortranname)) + if 'bindlang' in rout: + add('use iso_c_binding') else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) + add('subroutine f2pywrap%s (%s) %s' % (name, sargs, bindline)) + if 'bindlang' in rout: + add('use iso_c_binding') if not need_interface: add('external %s' % (fortranname)) rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname From 8c83f84f3dbf2743310b2b70fa8dd00da53df12d Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 18:36:23 +0000 Subject: [PATCH 084/120] MAINT: Do not use bind(c) for subroutine wrappers --- numpy/f2py/func2subr.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index f30915b0121f..f5061889dddb 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -118,13 +118,6 @@ def add(line, ret=ret): rl = None sargs = ', '.join(args) - bindline = "" - if 'bindlang' in rout and not signature: - if rout['bindlang'][fortranname]: - bindline = f"bind({rout['bindlang'][fortranname]['lang']}" - if rout['bindlang'][fortranname]['name']: - bindline += f", name='f2py_{rout['bindlang'][fortranname]['name']}'" - bindline += ")" if f90mode: # gh-23598 fix warning # Essentially, this gets called again with modules where the name of the @@ -132,14 +125,14 @@ def add(line, ret=ret): sargs = sargs.replace(f"{name}, ", '') args = [arg for arg in args if arg != name] rout['args'] = args - add('subroutine f2pywrap_%s_%s (%s) %s' % - (rout['modulename'], name, sargs, bindline)) + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) if not signature: add('use %s, only : %s' % (rout['modulename'], fortranname)) if 'bindlang' in rout: add('use iso_c_binding') else: - add('subroutine f2pywrap%s (%s) %s' % (name, sargs, bindline)) + add('subroutine f2pywrap%s (%s)' % (name, sargs)) if 'bindlang' in rout: add('use iso_c_binding') if not need_interface: From 840cae20759ffdca6016f46f587a20032f41ed85 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 18:37:03 +0000 Subject: [PATCH 085/120] MAINT: Use iso_c_binding where necessary --- numpy/f2py/func2subr.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index f5061889dddb..53c4901e0710 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -226,10 +226,14 @@ def add(line, ret=ret): if f90mode: add('subroutine f2pywrap_%s_%s (%s)' % (rout['modulename'], name, sargs)) + if 'bindlang' in rout: + add('use iso_c_binding') if not signature: add('use %s, only : %s' % (rout['modulename'], fortranname)) else: add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if 'bindlang' in rout: + add('use iso_c_binding') if not need_interface: add('external %s' % (fortranname)) From 8fad79373c81ea80fffdf563bcdf4102bf3c50ea Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 18:48:27 +0000 Subject: [PATCH 086/120] ENH: Load iso_c_binding module only when needed --- numpy/f2py/func2subr.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 53c4901e0710..490a90ef8efc 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -70,6 +70,13 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) return vardef +def useiso_c_binding(rout): + useisoc = False + fortranname = getfortranname(rout) + if 'bindlang' in rout: + if rout['bindlang'][fortranname]: + useisoc = True + return useisoc def createfuncwrapper(rout, signature=0): assert isfunction(rout) @@ -117,6 +124,7 @@ def add(line, ret=ret): l1 = l_tmpl.replace('@@@NAME@@@', newname) rl = None + useisoc = useiso_c_binding(rout) sargs = ', '.join(args) if f90mode: # gh-23598 fix warning @@ -129,11 +137,11 @@ def add(line, ret=ret): (rout['modulename'], name, sargs)) if not signature: add('use %s, only : %s' % (rout['modulename'], fortranname)) - if 'bindlang' in rout: + if useisoc: add('use iso_c_binding') else: add('subroutine f2pywrap%s (%s)' % (name, sargs)) - if 'bindlang' in rout: + if useisoc: add('use iso_c_binding') if not need_interface: add('external %s' % (fortranname)) @@ -222,17 +230,18 @@ def add(line, ret=ret): args = rout['args'] + useisoc = useiso_c_binding(rout) sargs = ', '.join(args) if f90mode: add('subroutine f2pywrap_%s_%s (%s)' % (rout['modulename'], name, sargs)) - if 'bindlang' in rout: + if useisoc: add('use iso_c_binding') if not signature: add('use %s, only : %s' % (rout['modulename'], fortranname)) else: add('subroutine f2pywrap%s (%s)' % (name, sargs)) - if 'bindlang' in rout: + if useisoc: add('use iso_c_binding') if not need_interface: add('external %s' % (fortranname)) From ff652f2b0a04ec7022057beb168eb2a9dce7d89a Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 19:44:29 +0000 Subject: [PATCH 087/120] MAINT: Fix regep for bind matches Make name optional --- numpy/f2py/crackfortran.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index be16314889b3..b8a48e5a04bb 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -969,7 +969,7 @@ def _resolvetypedefpattern(line): return None, [], None def parse_name_for_bind(line): - pattern = re.compile(r'bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) + pattern = re.compile(r'bind\(\s*(?P[^,]+)(?:\s*,\s*name\s*=\s*["\'](?P[^"\']+)["\']\s*)?\)', re.I) match = pattern.search(line) bind_statement = None if match: From aaa98e09b25f88873d8a88c48ea81d1853d114c6 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 19:49:11 +0000 Subject: [PATCH 088/120] MAINT: More robust test for using iso_c_binding --- numpy/f2py/func2subr.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 490a90ef8efc..2eedc0ade85e 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -21,6 +21,7 @@ issubroutine, issubroutine_wrap, outmess, show ) +from ._isocbind import isoc_kindmap def var2fixfortran(vars, a, fa=None, f90mode=None): if fa is None: @@ -72,10 +73,10 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): def useiso_c_binding(rout): useisoc = False - fortranname = getfortranname(rout) - if 'bindlang' in rout: - if rout['bindlang'][fortranname]: - useisoc = True + for key, value in rout['vars'].items(): + kind_value = value.get('kindselector', {}).get('kind') + if kind_value in isoc_kindmap: + return True return useisoc def createfuncwrapper(rout, signature=0): From e5080f84bffb8a8d0c60ed6a7e98505a9653ba7b Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 19:54:26 +0000 Subject: [PATCH 089/120] MAINT: Be more forgiving with bindc matches --- numpy/f2py/crackfortran.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index b8a48e5a04bb..2f1686f64c3e 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1152,12 +1152,13 @@ def analyzeline(m, case, line): pass if block in ['function', 'subroutine']: # set global attributes # name is fortran name - bindcdat = re.search(crackline_bindlang, bindcline) - if bindcdat: - groupcache[groupcounter]['bindlang'] = {name : {}} - groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') - if bindcdat.group('lang_name'): - groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') + if bindcline: + bindcdat = re.search(crackline_bindlang, bindcline) + if bindcdat: + groupcache[groupcounter]['bindlang'] = {name : {}} + groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') + if bindcdat.group('lang_name'): + groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') try: groupcache[groupcounter]['vars'][name] = appenddecl( groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) From 3755a4b6496c06bbc5c785df5cb8034b81ab6f63 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 20:09:21 +0000 Subject: [PATCH 090/120] TST: Add more tests for iso_c_binding Functions and subroutines --- .../isoCtests.f90} | 9 ++++++++- numpy/f2py/tests/test_f2cmap.py | 11 ----------- numpy/f2py/tests/test_isoc.py | 19 +++++++++++++++++++ 3 files changed, 27 insertions(+), 12 deletions(-) rename numpy/f2py/tests/src/{f2cmap/iso_c_oddity.f90 => isocintrin/isoCtests.f90} (51%) create mode 100644 numpy/f2py/tests/test_isoc.py diff --git a/numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 b/numpy/f2py/tests/src/isocintrin/isoCtests.f90 similarity index 51% rename from numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 rename to numpy/f2py/tests/src/isocintrin/isoCtests.f90 index 69aaf8342258..4d53ef5c7d28 100644 --- a/numpy/f2py/tests/src/f2cmap/iso_c_oddity.f90 +++ b/numpy/f2py/tests/src/isocintrin/isoCtests.f90 @@ -1,5 +1,5 @@ module coddity - use iso_c_binding, only: c_double + use iso_c_binding, only: c_double, c_int implicit none contains subroutine c_add(a, b, c) bind(c, name="c_add") @@ -7,4 +7,11 @@ subroutine c_add(a, b, c) bind(c, name="c_add") real(c_double), intent(out) :: c c = a + b end subroutine c_add + ! gh-9693 + function wat(x, y) result(z) bind(c, name='wat') + integer(c_int), intent(in) :: x, y + integer(c_int) :: z + + z = x + 7 + end function wat end module coddity diff --git a/numpy/f2py/tests/test_f2cmap.py b/numpy/f2py/tests/test_f2cmap.py index 5a34e7d4d1e3..d2967e4f73d7 100644 --- a/numpy/f2py/tests/test_f2cmap.py +++ b/numpy/f2py/tests/test_f2cmap.py @@ -13,14 +13,3 @@ def test_long_long_map(self): out = self.module.func1(inp) exp_out = 3 assert out == exp_out - -class TestISOCmap(util.F2PyTest): - sources = [ - util.getpath("tests", "src", "f2cmap", "iso_c_oddity.f90"), - ] - - # gh-24553 - def test_c_double(self): - out = self.module.coddity.c_add(1, 2) - exp_out = 3 - assert out == exp_out diff --git a/numpy/f2py/tests/test_isoc.py b/numpy/f2py/tests/test_isoc.py new file mode 100644 index 000000000000..7e189bd7b830 --- /dev/null +++ b/numpy/f2py/tests/test_isoc.py @@ -0,0 +1,19 @@ +from . import util +import numpy as np + +class TestISOC(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), + ] + + # gh-24553 + def test_c_double(self): + out = self.module.coddity.c_add(1, 2) + exp_out = 3 + assert out == exp_out + + # gh-9693 + def test_bindc_function(self): + out = self.module.coddity.wat(1, 20) + exp_out = 8 + assert out == exp_out From 19c359163a862f4d95579c468743efe3386e94fd Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 26 Aug 2023 20:31:03 +0000 Subject: [PATCH 091/120] DOC: Add a release note --- doc/release/upcoming_changes/24555.new_feature.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 doc/release/upcoming_changes/24555.new_feature.rst diff --git a/doc/release/upcoming_changes/24555.new_feature.rst b/doc/release/upcoming_changes/24555.new_feature.rst new file mode 100644 index 000000000000..770754f4f29f --- /dev/null +++ b/doc/release/upcoming_changes/24555.new_feature.rst @@ -0,0 +1,10 @@ +``bind(c)`` support for ``f2py`` +-------------------------------- +Both functions and subroutines can be annotated with ``bind(c)``. ``f2py`` will +handle both the correct type mapping, and preserve the unique label for other +``C`` interfaces. + +**Note:** ``bind(c, name = 'routine_name_other_than_fortran_routine')`` is not + honored by the ``f2py`` bindings by design, since ``bind(c)`` with the ``name`` + is meant to guarantee only the same name in ``C`` and ``Fortran``, not in + ``Python`` and ``Fortran``. From 71c1a52967490c64a7e85e69eacaf13be559056e Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 27 Aug 2023 12:21:22 +0000 Subject: [PATCH 092/120] MAINT: Rename docstring, modify tests Co-authored-by: mattip --- numpy/f2py/auxfuncs.py | 2 +- numpy/f2py/tests/src/isocintrin/isoCtests.f90 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index db89f949ff6a..c0864b5bc613 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -892,7 +892,7 @@ def applyrules(rules, d, var={}): return ret def deep_merge(dict1, dict2): - """Deep merge two dictionaries and return a new dictionary. + """Recursively merge two dictionaries into a new dictionary. Parameters: - dict1: The base dictionary. diff --git a/numpy/f2py/tests/src/isocintrin/isoCtests.f90 b/numpy/f2py/tests/src/isocintrin/isoCtests.f90 index 4d53ef5c7d28..42db6cccc14d 100644 --- a/numpy/f2py/tests/src/isocintrin/isoCtests.f90 +++ b/numpy/f2py/tests/src/isocintrin/isoCtests.f90 @@ -8,7 +8,7 @@ subroutine c_add(a, b, c) bind(c, name="c_add") c = a + b end subroutine c_add ! gh-9693 - function wat(x, y) result(z) bind(c, name='wat') + function wat(x, y) result(z) bind(c) integer(c_int), intent(in) :: x, y integer(c_int) :: z From 832f2620399c10a21957e703bae7786f1208fb81 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 3 Sep 2023 17:30:27 +0200 Subject: [PATCH 093/120] TYP: Allow `binary_repr` to accept any object implementing `__index__` --- numpy/core/numeric.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi index eccfb237bbe9..fc10bb88f54a 100644 --- a/numpy/core/numeric.pyi +++ b/numpy/core/numeric.pyi @@ -600,7 +600,7 @@ def isscalar(element: object) -> TypeGuard[ generic | bool | int | float | complex | str | bytes | memoryview ]: ... -def binary_repr(num: int, width: None | int = ...) -> str: ... +def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ... def base_repr( number: SupportsAbs[float], From 21e9b6c943cf439be5fdfba15e9d485dc8fde9dc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sun, 3 Sep 2023 00:13:51 +0200 Subject: [PATCH 094/120] TYP: Explicitly declare `dtype` and `generic` hashable --- numpy/__init__.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6f6acd893aea..32d084b6e137 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -682,6 +682,7 @@ _ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] @final class dtype(Generic[_DTypeScalar_co]): names: None | tuple[builtins.str, ...] + def __hash__(self) -> int: ... # Overload for subclass of generic @overload def __new__( @@ -2554,6 +2555,7 @@ class generic(_ArrayOrScalarCommon): def __array__(self: _ScalarType, dtype: None = ..., /) -> ndarray[Any, _dtype[_ScalarType]]: ... @overload def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... + def __hash__(self) -> int: ... @property def base(self) -> None: ... @property From 76900632b84c27753d0322cd3122e57f773e0dbc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 31 Aug 2023 20:13:57 +0200 Subject: [PATCH 095/120] TYP: Refactor the typing reveal-test cases using `typing.assert_type` --- numpy/random/mtrand.pyi | 3 +- numpy/typing/tests/data/reveal/arithmetic.pyi | 906 +++--- .../tests/data/reveal/array_constructors.pyi | 380 +-- numpy/typing/tests/data/reveal/arraypad.pyi | 14 +- numpy/typing/tests/data/reveal/arrayprint.pyi | 30 +- .../typing/tests/data/reveal/arraysetops.pyi | 106 +- .../typing/tests/data/reveal/arrayterator.pyi | 37 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 226 +- numpy/typing/tests/data/reveal/char.pyi | 213 +- numpy/typing/tests/data/reveal/chararray.pyi | 190 +- .../typing/tests/data/reveal/comparisons.pyi | 437 +-- numpy/typing/tests/data/reveal/constants.pyi | 101 +- numpy/typing/tests/data/reveal/ctypeslib.pyi | 127 +- numpy/typing/tests/data/reveal/datasource.pyi | 26 +- numpy/typing/tests/data/reveal/dtype.pyi | 116 +- numpy/typing/tests/data/reveal/einsumfunc.pyi | 55 +- numpy/typing/tests/data/reveal/emath.pyi | 96 +- .../tests/data/reveal/false_positives.pyi | 12 +- numpy/typing/tests/data/reveal/fft.pyi | 52 +- numpy/typing/tests/data/reveal/flatiter.pyi | 38 +- .../typing/tests/data/reveal/fromnumeric.pyi | 554 ++-- numpy/typing/tests/data/reveal/getlimits.pyi | 79 +- numpy/typing/tests/data/reveal/histograms.pyi | 30 +- .../typing/tests/data/reveal/index_tricks.pyi | 94 +- .../tests/data/reveal/lib_function_base.pyi | 316 +- .../tests/data/reveal/lib_polynomial.pyi | 235 +- numpy/typing/tests/data/reveal/lib_utils.pyi | 41 +- .../typing/tests/data/reveal/lib_version.pyi | 33 +- numpy/typing/tests/data/reveal/linalg.pyi | 183 +- numpy/typing/tests/data/reveal/matrix.pyi | 113 +- numpy/typing/tests/data/reveal/memmap.pyi | 29 +- numpy/typing/tests/data/reveal/mod.pyi | 225 +- numpy/typing/tests/data/reveal/modules.pyi | 90 +- numpy/typing/tests/data/reveal/multiarray.pyi | 164 +- .../tests/data/reveal/nbit_base_example.pyi | 18 +- .../tests/data/reveal/ndarray_conversion.pyi | 58 +- .../typing/tests/data/reveal/ndarray_misc.pyi | 402 +-- .../reveal/ndarray_shape_manipulation.pyi | 45 +- numpy/typing/tests/data/reveal/nditer.pyi | 81 +- .../tests/data/reveal/nested_sequence.pyi | 24 +- numpy/typing/tests/data/reveal/npyio.pyi | 142 +- numpy/typing/tests/data/reveal/numeric.pyi | 222 +- .../typing/tests/data/reveal/numerictypes.pyi | 104 +- numpy/typing/tests/data/reveal/random.pyi | 2887 +++++++++-------- numpy/typing/tests/data/reveal/rec.pyi | 249 +- numpy/typing/tests/data/reveal/scalars.pyi | 270 +- numpy/typing/tests/data/reveal/shape_base.pyi | 78 +- .../tests/data/reveal/stride_tricks.pyi | 38 +- numpy/typing/tests/data/reveal/testing.pyi | 241 +- .../typing/tests/data/reveal/twodim_base.pyi | 121 +- numpy/typing/tests/data/reveal/type_check.pyi | 114 +- .../typing/tests/data/reveal/ufunc_config.pyi | 42 +- numpy/typing/tests/data/reveal/ufunclike.pyi | 42 +- numpy/typing/tests/data/reveal/ufuncs.pyi | 120 +- .../tests/data/reveal/warnings_and_errors.pyi | 21 +- 55 files changed, 5625 insertions(+), 5045 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 271cb9787470..b5f600652b54 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,3 +1,4 @@ +import builtins from collections.abc import Callable from typing import Any, Union, overload, Literal @@ -224,7 +225,7 @@ class RandomState: size: None | _ShapeLike = ..., dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., ) -> ndarray[Any, dtype[uint]]: ... - def bytes(self, length: int) -> bytes: ... + def bytes(self, length: int) -> builtins.bytes: ... @overload def choice( self, diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index fe983cca1f2b..5725e5c4da73 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -1,7 +1,14 @@ +import sys from typing import Any import numpy as np -from numpy._typing import NDArray, _128Bit +import numpy.typing as npt +from numpy._typing import _32Bit,_64Bit, _128Bit + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type # Can't directly import `np.float128` as it is not available on all platforms f16: np.floating[_128Bit] @@ -26,15 +33,15 @@ c = complex() f = float() i = int() -AR_b: np.ndarray[Any, np.dtype[np.bool_]] -AR_u: np.ndarray[Any, np.dtype[np.uint32]] -AR_i: np.ndarray[Any, np.dtype[np.int64]] -AR_f: np.ndarray[Any, np.dtype[np.float64]] -AR_c: np.ndarray[Any, np.dtype[np.complex128]] -AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] -AR_M: np.ndarray[Any, np.dtype[np.datetime64]] -AR_O: np.ndarray[Any, np.dtype[np.object_]] -AR_number: NDArray[np.number[Any]] +AR_b: npt.NDArray[np.bool_] +AR_u: npt.NDArray[np.uint32] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_number: npt.NDArray[np.number[Any]] AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] @@ -47,464 +54,463 @@ AR_LIKE_O: list[np.object_] # Array subtraction -reveal_type(AR_number - AR_number) # E: ndarray[Any, dtype[number[Any]]] - -reveal_type(AR_b - AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_b - AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_b - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_b - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_b - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_b - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_u - AR_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_LIKE_i - AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_f - AR_b) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_c - AR_b) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_m - AR_b) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_M - AR_b) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_LIKE_O - AR_b) # E: Any - -reveal_type(AR_u - AR_LIKE_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_u - AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_u - AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_u - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_u - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_u - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_u - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b - AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_LIKE_u - AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_LIKE_i - AR_u) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_f - AR_u) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_c - AR_u) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_m - AR_u) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_M - AR_u) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_LIKE_O - AR_u) # E: Any - -reveal_type(AR_i - AR_LIKE_b) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_i - AR_LIKE_u) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_i - AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_i - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_i - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_i - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_i - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b - AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_u - AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_i - AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_f - AR_i) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_c - AR_i) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_m - AR_i) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_M - AR_i) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_LIKE_O - AR_i) # E: Any - -reveal_type(AR_f - AR_LIKE_b) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f - AR_LIKE_u) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f - AR_LIKE_i) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_f - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b - AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_u - AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_i - AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_f - AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_c - AR_f) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_O - AR_f) # E: Any - -reveal_type(AR_c - AR_LIKE_b) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_c - AR_LIKE_u) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_c - AR_LIKE_i) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_c - AR_LIKE_f) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_c - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_c - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_u - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_i - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_f - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_c - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(AR_LIKE_O - AR_c) # E: Any - -reveal_type(AR_m - AR_LIKE_b) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_m - AR_LIKE_u) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_m - AR_LIKE_i) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_m - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_m - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b - AR_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_u - AR_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_i - AR_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_m - AR_m) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_M - AR_m) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_LIKE_O - AR_m) # E: Any - -reveal_type(AR_M - AR_LIKE_b) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_M - AR_LIKE_u) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_M - AR_LIKE_i) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_M - AR_LIKE_m) # E: ndarray[Any, dtype[datetime64]] -reveal_type(AR_M - AR_LIKE_M) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_M - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_M - AR_M) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_O - AR_M) # E: Any - -reveal_type(AR_O - AR_LIKE_b) # E: Any -reveal_type(AR_O - AR_LIKE_u) # E: Any -reveal_type(AR_O - AR_LIKE_i) # E: Any -reveal_type(AR_O - AR_LIKE_f) # E: Any -reveal_type(AR_O - AR_LIKE_c) # E: Any -reveal_type(AR_O - AR_LIKE_m) # E: Any -reveal_type(AR_O - AR_LIKE_M) # E: Any -reveal_type(AR_O - AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b - AR_O) # E: Any -reveal_type(AR_LIKE_u - AR_O) # E: Any -reveal_type(AR_LIKE_i - AR_O) # E: Any -reveal_type(AR_LIKE_f - AR_O) # E: Any -reveal_type(AR_LIKE_c - AR_O) # E: Any -reveal_type(AR_LIKE_m - AR_O) # E: Any -reveal_type(AR_LIKE_M - AR_O) # E: Any -reveal_type(AR_LIKE_O - AR_O) # E: Any +assert_type(AR_number - AR_number, npt.NDArray[np.number[Any]]) + +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_b - AR_LIKE_O, Any) + +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_b, Any) + +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_u - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_u, Any) + +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_i - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_i, Any) + +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.floating[Any]]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.floating[Any]]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.floating[Any]]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_O - AR_f, Any) + +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_O - AR_c, Any) + +assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_u - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_i - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_m - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_m, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_m, Any) + +assert_type(AR_M - AR_LIKE_b, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_u, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_i, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_m, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_M, npt.NDArray[np.timedelta64]) +assert_type(AR_M - AR_LIKE_O, Any) + +assert_type(AR_LIKE_M - AR_M, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O - AR_M, Any) + +assert_type(AR_O - AR_LIKE_b, Any) +assert_type(AR_O - AR_LIKE_u, Any) +assert_type(AR_O - AR_LIKE_i, Any) +assert_type(AR_O - AR_LIKE_f, Any) +assert_type(AR_O - AR_LIKE_c, Any) +assert_type(AR_O - AR_LIKE_m, Any) +assert_type(AR_O - AR_LIKE_M, Any) +assert_type(AR_O - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_O, Any) +assert_type(AR_LIKE_u - AR_O, Any) +assert_type(AR_LIKE_i - AR_O, Any) +assert_type(AR_LIKE_f - AR_O, Any) +assert_type(AR_LIKE_c - AR_O, Any) +assert_type(AR_LIKE_m - AR_O, Any) +assert_type(AR_LIKE_M - AR_O, Any) +assert_type(AR_LIKE_O - AR_O, Any) # Array floor division -reveal_type(AR_b // AR_LIKE_b) # E: ndarray[Any, dtype[{int8}]] -reveal_type(AR_b // AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_b // AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_b // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_b // AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b // AR_b) # E: ndarray[Any, dtype[{int8}]] -reveal_type(AR_LIKE_u // AR_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_LIKE_i // AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_f // AR_b) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_O // AR_b) # E: Any - -reveal_type(AR_u // AR_LIKE_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_u // AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_u // AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_u // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_u // AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b // AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_LIKE_u // AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(AR_LIKE_i // AR_u) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_f // AR_u) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_m // AR_u) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_O // AR_u) # E: Any - -reveal_type(AR_i // AR_LIKE_b) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_i // AR_LIKE_u) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_i // AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_i // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_i // AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b // AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_u // AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_i // AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(AR_LIKE_f // AR_i) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_m // AR_i) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_O // AR_i) # E: Any - -reveal_type(AR_f // AR_LIKE_b) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f // AR_LIKE_u) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f // AR_LIKE_i) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_f // AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b // AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_u // AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_i // AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_f // AR_f) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(AR_LIKE_m // AR_f) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_LIKE_O // AR_f) # E: Any - -reveal_type(AR_m // AR_LIKE_u) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_m // AR_LIKE_i) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_m // AR_LIKE_f) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(AR_m // AR_LIKE_m) # E: ndarray[Any, dtype[{int64}]] -reveal_type(AR_m // AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_m // AR_m) # E: ndarray[Any, dtype[{int64}]] -reveal_type(AR_LIKE_O // AR_m) # E: Any - -reveal_type(AR_O // AR_LIKE_b) # E: Any -reveal_type(AR_O // AR_LIKE_u) # E: Any -reveal_type(AR_O // AR_LIKE_i) # E: Any -reveal_type(AR_O // AR_LIKE_f) # E: Any -reveal_type(AR_O // AR_LIKE_m) # E: Any -reveal_type(AR_O // AR_LIKE_M) # E: Any -reveal_type(AR_O // AR_LIKE_O) # E: Any - -reveal_type(AR_LIKE_b // AR_O) # E: Any -reveal_type(AR_LIKE_u // AR_O) # E: Any -reveal_type(AR_LIKE_i // AR_O) # E: Any -reveal_type(AR_LIKE_f // AR_O) # E: Any -reveal_type(AR_LIKE_m // AR_O) # E: Any -reveal_type(AR_LIKE_M // AR_O) # E: Any -reveal_type(AR_LIKE_O // AR_O) # E: Any +assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_b // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_O // AR_b, Any) + +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_u // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_u, Any) + +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_i // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_i, Any) + +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_f, Any) + +assert_type(AR_m // AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_m, npt.NDArray[np.int64]) +assert_type(AR_m // AR_LIKE_O, Any) + +assert_type(AR_LIKE_m // AR_m, npt.NDArray[np.int64]) +assert_type(AR_LIKE_O // AR_m, Any) + +assert_type(AR_O // AR_LIKE_b, Any) +assert_type(AR_O // AR_LIKE_u, Any) +assert_type(AR_O // AR_LIKE_i, Any) +assert_type(AR_O // AR_LIKE_f, Any) +assert_type(AR_O // AR_LIKE_m, Any) +assert_type(AR_O // AR_LIKE_M, Any) +assert_type(AR_O // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_O, Any) +assert_type(AR_LIKE_u // AR_O, Any) +assert_type(AR_LIKE_i // AR_O, Any) +assert_type(AR_LIKE_f // AR_O, Any) +assert_type(AR_LIKE_m // AR_O, Any) +assert_type(AR_LIKE_M // AR_O, Any) +assert_type(AR_LIKE_O // AR_O, Any) # unary ops -reveal_type(-f16) # E: {float128} -reveal_type(-c16) # E: {complex128} -reveal_type(-c8) # E: {complex64} -reveal_type(-f8) # E: {float64} -reveal_type(-f4) # E: {float32} -reveal_type(-i8) # E: {int64} -reveal_type(-i4) # E: {int32} -reveal_type(-u8) # E: {uint64} -reveal_type(-u4) # E: {uint32} -reveal_type(-td) # E: timedelta64 -reveal_type(-AR_f) # E: Any - -reveal_type(+f16) # E: {float128} -reveal_type(+c16) # E: {complex128} -reveal_type(+c8) # E: {complex64} -reveal_type(+f8) # E: {float64} -reveal_type(+f4) # E: {float32} -reveal_type(+i8) # E: {int64} -reveal_type(+i4) # E: {int32} -reveal_type(+u8) # E: {uint64} -reveal_type(+u4) # E: {uint32} -reveal_type(+td) # E: timedelta64 -reveal_type(+AR_f) # E: Any - -reveal_type(abs(f16)) # E: {float128} -reveal_type(abs(c16)) # E: {float64} -reveal_type(abs(c8)) # E: {float32} -reveal_type(abs(f8)) # E: {float64} -reveal_type(abs(f4)) # E: {float32} -reveal_type(abs(i8)) # E: {int64} -reveal_type(abs(i4)) # E: {int32} -reveal_type(abs(u8)) # E: {uint64} -reveal_type(abs(u4)) # E: {uint32} -reveal_type(abs(td)) # E: timedelta64 -reveal_type(abs(b_)) # E: bool_ -reveal_type(abs(AR_f)) # E: Any +assert_type(-f16, np.floating[_128Bit]) +assert_type(-c16, np.complex128) +assert_type(-c8, np.complex64) +assert_type(-f8, np.float64) +assert_type(-f4, np.float32) +assert_type(-i8, np.int64) +assert_type(-i4, np.int32) +assert_type(-u8, np.uint64) +assert_type(-u4, np.uint32) +assert_type(-td, np.timedelta64) +assert_type(-AR_f, npt.NDArray[np.float64]) + +assert_type(+f16, np.floating[_128Bit]) +assert_type(+c16, np.complex128) +assert_type(+c8, np.complex64) +assert_type(+f8, np.float64) +assert_type(+f4, np.float32) +assert_type(+i8, np.int64) +assert_type(+i4, np.int32) +assert_type(+u8, np.uint64) +assert_type(+u4, np.uint32) +assert_type(+td, np.timedelta64) +assert_type(+AR_f, npt.NDArray[np.float64]) + +assert_type(abs(f16), np.floating[_128Bit]) +assert_type(abs(c16), np.float64) +assert_type(abs(c8), np.float32) +assert_type(abs(f8), np.float64) +assert_type(abs(f4), np.float32) +assert_type(abs(i8), np.int64) +assert_type(abs(i4), np.int32) +assert_type(abs(u8), np.uint64) +assert_type(abs(u4), np.uint32) +assert_type(abs(td), np.timedelta64) +assert_type(abs(b_), np.bool_) # Time structures -reveal_type(dt + td) # E: datetime64 -reveal_type(dt + i) # E: datetime64 -reveal_type(dt + i4) # E: datetime64 -reveal_type(dt + i8) # E: datetime64 -reveal_type(dt - dt) # E: timedelta64 -reveal_type(dt - i) # E: datetime64 -reveal_type(dt - i4) # E: datetime64 -reveal_type(dt - i8) # E: datetime64 - -reveal_type(td + td) # E: timedelta64 -reveal_type(td + i) # E: timedelta64 -reveal_type(td + i4) # E: timedelta64 -reveal_type(td + i8) # E: timedelta64 -reveal_type(td - td) # E: timedelta64 -reveal_type(td - i) # E: timedelta64 -reveal_type(td - i4) # E: timedelta64 -reveal_type(td - i8) # E: timedelta64 -reveal_type(td / f) # E: timedelta64 -reveal_type(td / f4) # E: timedelta64 -reveal_type(td / f8) # E: timedelta64 -reveal_type(td / td) # E: {float64} -reveal_type(td // td) # E: {int64} +assert_type(dt + td, np.datetime64) +assert_type(dt + i, np.datetime64) +assert_type(dt + i4, np.datetime64) +assert_type(dt + i8, np.datetime64) +assert_type(dt - dt, np.timedelta64) +assert_type(dt - i, np.datetime64) +assert_type(dt - i4, np.datetime64) +assert_type(dt - i8, np.datetime64) + +assert_type(td + td, np.timedelta64) +assert_type(td + i, np.timedelta64) +assert_type(td + i4, np.timedelta64) +assert_type(td + i8, np.timedelta64) +assert_type(td - td, np.timedelta64) +assert_type(td - i, np.timedelta64) +assert_type(td - i4, np.timedelta64) +assert_type(td - i8, np.timedelta64) +assert_type(td / f, np.timedelta64) +assert_type(td / f4, np.timedelta64) +assert_type(td / f8, np.timedelta64) +assert_type(td / td, np.float64) +assert_type(td // td, np.int64) # boolean -reveal_type(b_ / b) # E: {float64} -reveal_type(b_ / b_) # E: {float64} -reveal_type(b_ / i) # E: {float64} -reveal_type(b_ / i8) # E: {float64} -reveal_type(b_ / i4) # E: {float64} -reveal_type(b_ / u8) # E: {float64} -reveal_type(b_ / u4) # E: {float64} -reveal_type(b_ / f) # E: {float64} -reveal_type(b_ / f16) # E: {float128} -reveal_type(b_ / f8) # E: {float64} -reveal_type(b_ / f4) # E: {float32} -reveal_type(b_ / c) # E: {complex128} -reveal_type(b_ / c16) # E: {complex128} -reveal_type(b_ / c8) # E: {complex64} - -reveal_type(b / b_) # E: {float64} -reveal_type(b_ / b_) # E: {float64} -reveal_type(i / b_) # E: {float64} -reveal_type(i8 / b_) # E: {float64} -reveal_type(i4 / b_) # E: {float64} -reveal_type(u8 / b_) # E: {float64} -reveal_type(u4 / b_) # E: {float64} -reveal_type(f / b_) # E: {float64} -reveal_type(f16 / b_) # E: {float128} -reveal_type(f8 / b_) # E: {float64} -reveal_type(f4 / b_) # E: {float32} -reveal_type(c / b_) # E: {complex128} -reveal_type(c16 / b_) # E: {complex128} -reveal_type(c8 / b_) # E: {complex64} +assert_type(b_ / b, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(b_ / i, np.float64) +assert_type(b_ / i8, np.float64) +assert_type(b_ / i4, np.float64) +assert_type(b_ / u8, np.float64) +assert_type(b_ / u4, np.float64) +assert_type(b_ / f, np.float64) +assert_type(b_ / f16, np.floating[_128Bit]) +assert_type(b_ / f8, np.float64) +assert_type(b_ / f4, np.float32) +assert_type(b_ / c, np.complex128) +assert_type(b_ / c16, np.complex128) +assert_type(b_ / c8, np.complex64) + +assert_type(b / b_, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(i / b_, np.float64) +assert_type(i8 / b_, np.float64) +assert_type(i4 / b_, np.float64) +assert_type(u8 / b_, np.float64) +assert_type(u4 / b_, np.float64) +assert_type(f / b_, np.float64) +assert_type(f16 / b_, np.floating[_128Bit]) +assert_type(f8 / b_, np.float64) +assert_type(f4 / b_, np.float32) +assert_type(c / b_, np.complex128) +assert_type(c16 / b_, np.complex128) +assert_type(c8 / b_, np.complex64) # Complex -reveal_type(c16 + f16) # E: complexfloating[Union[_64Bit, _128Bit], Union[_64Bit, _128Bit]] -reveal_type(c16 + c16) # E: {complex128} -reveal_type(c16 + f8) # E: {complex128} -reveal_type(c16 + i8) # E: {complex128} -reveal_type(c16 + c8) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] -reveal_type(c16 + f4) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] -reveal_type(c16 + i4) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] -reveal_type(c16 + b_) # E: {complex128} -reveal_type(c16 + b) # E: {complex128} -reveal_type(c16 + c) # E: {complex128} -reveal_type(c16 + f) # E: {complex128} -reveal_type(c16 + AR_f) # E: Any - -reveal_type(f16 + c16) # E: complexfloating[Union[_64Bit, _128Bit], Union[_64Bit, _128Bit]] -reveal_type(c16 + c16) # E: {complex128} -reveal_type(f8 + c16) # E: {complex128} -reveal_type(i8 + c16) # E: {complex128} -reveal_type(c8 + c16) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(f4 + c16) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] -reveal_type(i4 + c16) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] -reveal_type(b_ + c16) # E: {complex128} -reveal_type(b + c16) # E: {complex128} -reveal_type(c + c16) # E: {complex128} -reveal_type(f + c16) # E: {complex128} -reveal_type(AR_f + c16) # E: Any - -reveal_type(c8 + f16) # E: complexfloating[Union[_32Bit, _128Bit], Union[_32Bit, _128Bit]] -reveal_type(c8 + c16) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(c8 + f8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(c8 + i8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(c8 + c8) # E: {complex64} -reveal_type(c8 + f4) # E: {complex64} -reveal_type(c8 + i4) # E: {complex64} -reveal_type(c8 + b_) # E: {complex64} -reveal_type(c8 + b) # E: {complex64} -reveal_type(c8 + c) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(c8 + f) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(c8 + AR_f) # E: Any - -reveal_type(f16 + c8) # E: complexfloating[Union[_32Bit, _128Bit], Union[_32Bit, _128Bit]] -reveal_type(c16 + c8) # E: complexfloating[Union[_64Bit, _32Bit], Union[_64Bit, _32Bit]] -reveal_type(f8 + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(i8 + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(c8 + c8) # E: {complex64} -reveal_type(f4 + c8) # E: {complex64} -reveal_type(i4 + c8) # E: {complex64} -reveal_type(b_ + c8) # E: {complex64} -reveal_type(b + c8) # E: {complex64} -reveal_type(c + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(f + c8) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(AR_f + c8) # E: Any +assert_type(c16 + f16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit]) +assert_type(c16 + c16, np.complex128) +assert_type(c16 + f8, np.complex128) +assert_type(c16 + i8, np.complex128) +assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c16 + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c16 + i4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c16 + b_, np.complex128) +assert_type(c16 + b, np.complex128) +assert_type(c16 + c, np.complex128) +assert_type(c16 + f, np.complex128) +assert_type(c16 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(f16 + c16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit]) +assert_type(c16 + c16, np.complex128) +assert_type(f8 + c16, np.complex128) +assert_type(i8 + c16, np.complex128) +assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(f4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(i4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(b_ + c16, np.complex128) +assert_type(b + c16, np.complex128) +assert_type(c + c16, np.complex128) +assert_type(f + c16, np.complex128) +assert_type(AR_f + c16, npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(c8 + f16, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit]) +assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + f8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + i8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + c8, np.complex64) +assert_type(c8 + f4, np.complex64) +assert_type(c8 + i4, np.complex64) +assert_type(c8 + b_, np.complex64) +assert_type(c8 + b, np.complex64) +assert_type(c8 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + f, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(f16 + c8, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit]) +assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(f8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(i8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + c8, np.complex64) +assert_type(f4 + c8, np.complex64) +assert_type(i4 + c8, np.complex64) +assert_type(b_ + c8, np.complex64) +assert_type(b + c8, np.complex64) +assert_type(c + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(f + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating[Any, Any]]) # Float -reveal_type(f8 + f16) # E: floating[Union[_64Bit, _128Bit]] -reveal_type(f8 + f8) # E: {float64} -reveal_type(f8 + i8) # E: {float64} -reveal_type(f8 + f4) # E: floating[Union[_64Bit, _32Bit]] -reveal_type(f8 + i4) # E: floating[Union[_64Bit, _32Bit]] -reveal_type(f8 + b_) # E: {float64} -reveal_type(f8 + b) # E: {float64} -reveal_type(f8 + c) # E: {complex128} -reveal_type(f8 + f) # E: {float64} -reveal_type(f8 + AR_f) # E: Any - -reveal_type(f16 + f8) # E: floating[Union[_128Bit, _64Bit]] -reveal_type(f8 + f8) # E: {float64} -reveal_type(i8 + f8) # E: {float64} -reveal_type(f4 + f8) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(i4 + f8) # E: floating[Union[_64Bit, _32Bit]] -reveal_type(b_ + f8) # E: {float64} -reveal_type(b + f8) # E: {float64} -reveal_type(c + f8) # E: {complex128} -reveal_type(f + f8) # E: {float64} -reveal_type(AR_f + f8) # E: Any - -reveal_type(f4 + f16) # E: floating[Union[_32Bit, _128Bit]] -reveal_type(f4 + f8) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(f4 + i8) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(f4 + f4) # E: {float32} -reveal_type(f4 + i4) # E: {float32} -reveal_type(f4 + b_) # E: {float32} -reveal_type(f4 + b) # E: {float32} -reveal_type(f4 + c) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(f4 + f) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(f4 + AR_f) # E: Any - -reveal_type(f16 + f4) # E: floating[Union[_128Bit, _32Bit]] -reveal_type(f8 + f4) # E: floating[Union[_64Bit, _32Bit]] -reveal_type(i8 + f4) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(f4 + f4) # E: {float32} -reveal_type(i4 + f4) # E: {float32} -reveal_type(b_ + f4) # E: {float32} -reveal_type(b + f4) # E: {float32} -reveal_type(c + f4) # E: complexfloating[Union[_32Bit, _64Bit], Union[_32Bit, _64Bit]] -reveal_type(f + f4) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(AR_f + f4) # E: Any +assert_type(f8 + f16, np.floating[_64Bit | _128Bit]) +assert_type(f8 + f8, np.float64) +assert_type(f8 + i8, np.float64) +assert_type(f8 + f4, np.floating[_32Bit | _64Bit]) +assert_type(f8 + i4, np.floating[_32Bit | _64Bit]) +assert_type(f8 + b_, np.float64) +assert_type(f8 + b, np.float64) +assert_type(f8 + c, np.complex128) +assert_type(f8 + f, np.float64) +assert_type(f8 + AR_f, npt.NDArray[np.floating[Any]]) + +assert_type(f16 + f8, np.floating[_64Bit | _128Bit]) +assert_type(f8 + f8, np.float64) +assert_type(i8 + f8, np.float64) +assert_type(f4 + f8, np.floating[_32Bit | _64Bit]) +assert_type(i4 + f8, np.floating[_32Bit | _64Bit]) +assert_type(b_ + f8, np.float64) +assert_type(b + f8, np.float64) +assert_type(c + f8, np.complex128) +assert_type(f + f8, np.float64) +assert_type(AR_f + f8, npt.NDArray[np.floating[Any]]) + +assert_type(f4 + f16, np.floating[_32Bit | _128Bit]) +assert_type(f4 + f8, np.floating[_32Bit | _64Bit]) +assert_type(f4 + i8, np.floating[_32Bit | _64Bit]) +assert_type(f4 + f4, np.float32) +assert_type(f4 + i4, np.float32) +assert_type(f4 + b_, np.float32) +assert_type(f4 + b, np.float32) +assert_type(f4 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(f4 + f, np.floating[_32Bit | _64Bit]) +assert_type(f4 + AR_f, npt.NDArray[np.floating[Any]]) + +assert_type(f16 + f4, np.floating[_32Bit | _128Bit]) +assert_type(f8 + f4, np.floating[_32Bit | _64Bit]) +assert_type(i8 + f4, np.floating[_32Bit | _64Bit]) +assert_type(f4 + f4, np.float32) +assert_type(i4 + f4, np.float32) +assert_type(b_ + f4, np.float32) +assert_type(b + f4, np.float32) +assert_type(c + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(f + f4, np.floating[_32Bit | _64Bit]) +assert_type(AR_f + f4, npt.NDArray[np.floating[Any]]) # Int -reveal_type(i8 + i8) # E: {int64} -reveal_type(i8 + u8) # E: Any -reveal_type(i8 + i4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(i8 + u4) # E: Any -reveal_type(i8 + b_) # E: {int64} -reveal_type(i8 + b) # E: {int64} -reveal_type(i8 + c) # E: {complex128} -reveal_type(i8 + f) # E: {float64} -reveal_type(i8 + AR_f) # E: Any - -reveal_type(u8 + u8) # E: {uint64} -reveal_type(u8 + i4) # E: Any -reveal_type(u8 + u4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(u8 + b_) # E: {uint64} -reveal_type(u8 + b) # E: {uint64} -reveal_type(u8 + c) # E: {complex128} -reveal_type(u8 + f) # E: {float64} -reveal_type(u8 + AR_f) # E: Any - -reveal_type(i8 + i8) # E: {int64} -reveal_type(u8 + i8) # E: Any -reveal_type(i4 + i8) # E: signedinteger[Union[_32Bit, _64Bit]] -reveal_type(u4 + i8) # E: Any -reveal_type(b_ + i8) # E: {int64} -reveal_type(b + i8) # E: {int64} -reveal_type(c + i8) # E: {complex128} -reveal_type(f + i8) # E: {float64} -reveal_type(AR_f + i8) # E: Any - -reveal_type(u8 + u8) # E: {uint64} -reveal_type(i4 + u8) # E: Any -reveal_type(u4 + u8) # E: unsignedinteger[Union[_32Bit, _64Bit]] -reveal_type(b_ + u8) # E: {uint64} -reveal_type(b + u8) # E: {uint64} -reveal_type(c + u8) # E: {complex128} -reveal_type(f + u8) # E: {float64} -reveal_type(AR_f + u8) # E: Any - -reveal_type(i4 + i8) # E: signedinteger[Union[_32Bit, _64Bit]] -reveal_type(i4 + i4) # E: {int32} -reveal_type(i4 + b_) # E: {int32} -reveal_type(i4 + b) # E: {int32} -reveal_type(i4 + AR_f) # E: Any - -reveal_type(u4 + i8) # E: Any -reveal_type(u4 + i4) # E: Any -reveal_type(u4 + u8) # E: unsignedinteger[Union[_32Bit, _64Bit]] -reveal_type(u4 + u4) # E: {uint32} -reveal_type(u4 + b_) # E: {uint32} -reveal_type(u4 + b) # E: {uint32} -reveal_type(u4 + AR_f) # E: Any - -reveal_type(i8 + i4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(i4 + i4) # E: {int32} -reveal_type(b_ + i4) # E: {int32} -reveal_type(b + i4) # E: {int32} -reveal_type(AR_f + i4) # E: Any - -reveal_type(i8 + u4) # E: Any -reveal_type(i4 + u4) # E: Any -reveal_type(u8 + u4) # E: unsignedinteger[Union[_64Bit, _32Bit]] -reveal_type(u4 + u4) # E: {uint32} -reveal_type(b_ + u4) # E: {uint32} -reveal_type(b + u4) # E: {uint32} -reveal_type(AR_f + u4) # E: Any +assert_type(i8 + i8, np.int64) +assert_type(i8 + u8, Any) +assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 + u4, Any) +assert_type(i8 + b_, np.int64) +assert_type(i8 + b, np.int64) +assert_type(i8 + c, np.complex128) +assert_type(i8 + f, np.float64) +assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]]) + +assert_type(u8 + u8, np.uint64) +assert_type(u8 + i4, Any) +assert_type(u8 + u4, np.signedinteger[_32Bit | _64Bit]) +assert_type(u8 + b_, np.uint64) +assert_type(u8 + b, np.uint64) +assert_type(u8 + c, np.complex128) +assert_type(u8 + f, np.float64) +assert_type(u8 + AR_f, npt.NDArray[np.floating[Any]]) + +assert_type(i8 + i8, np.int64) +assert_type(u8 + i8, Any) +assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit]) +assert_type(u4 + i8, Any) +assert_type(b_ + i8, np.int64) +assert_type(b + i8, np.int64) +assert_type(c + i8, np.complex128) +assert_type(f + i8, np.float64) +assert_type(AR_f + i8, npt.NDArray[np.floating[Any]]) + +assert_type(u8 + u8, np.uint64) +assert_type(i4 + u8, Any) +assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(b_ + u8, np.uint64) +assert_type(b + u8, np.uint64) +assert_type(c + u8, np.complex128) +assert_type(f + u8, np.float64) +assert_type(AR_f + u8, npt.NDArray[np.floating[Any]]) + +assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit]) +assert_type(i4 + i4, np.int32) +assert_type(i4 + b_, np.int32) +assert_type(i4 + b, np.int32) +assert_type(i4 + AR_f, npt.NDArray[np.floating[Any]]) + +assert_type(u4 + i8, Any) +assert_type(u4 + i4, Any) +assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u4 + u4, np.uint32) +assert_type(u4 + b_, np.uint32) +assert_type(u4 + b, np.uint32) +assert_type(u4 + AR_f, npt.NDArray[np.floating[Any]]) + +assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i4 + i4, np.int32) +assert_type(b_ + i4, np.int32) +assert_type(b + i4, np.int32) +assert_type(AR_f + i4, npt.NDArray[np.floating[Any]]) + +assert_type(i8 + u4, Any) +assert_type(i4 + u4, Any) +assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u4 + u4, np.uint32) +assert_type(b_ + u4, np.uint32) +assert_type(b + u4, np.uint32) +assert_type(AR_f + u4, npt.NDArray[np.floating[Any]]) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 61d3705b1fe2..3eb560aafd9e 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,3 +1,4 @@ +import sys from typing import Any, TypeVar from pathlib import Path from collections import deque @@ -5,6 +6,11 @@ from collections import deque import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + _SCT = TypeVar("_SCT", bound=np.generic, covariant=True) class SubClass(np.ndarray[Any, np.dtype[_SCT]]): ... @@ -17,191 +23,191 @@ C: list[int] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... -reveal_type(np.empty_like(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.empty_like(B)) # E: SubClass[{float64}] -reveal_type(np.empty_like([1, 1.0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.empty_like(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.empty_like(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.array(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.array(B)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.array(B, subok=True)) # E: SubClass[{float64}] -reveal_type(np.array([1, 1.0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.array(deque([1, 2, 3]))) # E: ndarray[Any, dtype[Any]] -reveal_type(np.array(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.array(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] -reveal_type(np.array(A, like=A)) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.zeros([1, 5, 6])) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.zeros([1, 5, 6], dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.zeros([1, 5, 6], dtype='c16')) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.empty([1, 5, 6])) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.empty([1, 5, 6], dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.empty([1, 5, 6], dtype='c16')) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.concatenate(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.concatenate([A, A])) # E: Any -reveal_type(np.concatenate([[1], A])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.concatenate([[1], [1]])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.concatenate((A, A))) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.concatenate(([1], [1]))) # E: ndarray[Any, dtype[Any]] -reveal_type(np.concatenate([1, 1.0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.concatenate(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.concatenate(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] -reveal_type(np.concatenate([1, 1.0], out=A)) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.asarray(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asarray(B)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asarray([1, 1.0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.asarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.asarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.asanyarray(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asanyarray(B)) # E: SubClass[{float64}] -reveal_type(np.asanyarray([1, 1.0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.asanyarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.asanyarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.ascontiguousarray(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.ascontiguousarray(B)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.ascontiguousarray([1, 1.0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.ascontiguousarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.ascontiguousarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.asfortranarray(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asfortranarray(B)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asfortranarray([1, 1.0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.asfortranarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.asfortranarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.fromstring("1 1 1", sep=" ")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fromstring(b"1 1 1", sep=" ")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fromstring("1 1 1", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.fromstring("1 1 1", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]] -reveal_type(np.fromstring(b"1 1 1", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.fromfile("test.txt", sep=" ")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fromfile("test.txt", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.fromfile("test.txt", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]] +assert_type(np.empty_like(A), npt.NDArray[np.float64]) +assert_type(np.empty_like(B), SubClass[np.float64]) +assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.array(A), npt.NDArray[np.float64]) +assert_type(np.array(B), npt.NDArray[np.float64]) +assert_type(np.array(B, subok=True), SubClass[np.float64]) +assert_type(np.array([1, 1.0]), npt.NDArray[Any]) +assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) +assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, like=A), npt.NDArray[np.float64]) + +assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) + +assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) + +assert_type(np.concatenate(A), npt.NDArray[np.float64]) +assert_type(np.concatenate([A, A]), Any) +assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) +assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) +assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) +assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) + +assert_type(np.asarray(A), npt.NDArray[np.float64]) +assert_type(np.asarray(B), npt.NDArray[np.float64]) +assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.asanyarray(A), npt.NDArray[np.float64]) +assert_type(np.asanyarray(B), SubClass[np.float64]) +assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) +assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) +assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) +assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) + +assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any]) with open("test.txt") as f: - reveal_type(np.fromfile(f, sep=" ")) # E: ndarray[Any, dtype[{float64}]] - reveal_type(np.fromfile(b"test.txt", sep=" ")) # E: ndarray[Any, dtype[{float64}]] - reveal_type(np.fromfile(Path("test.txt"), sep=" ")) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.fromiter("12345", np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fromiter("12345", float)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.frombuffer(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.frombuffer(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.frombuffer(A, dtype="c16")) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.arange(False, True)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.arange(10)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.arange(0, 10, step=2)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.arange(10.0)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.arange(start=0, stop=10.0)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.arange(np.timedelta64(0))) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.arange(0, np.timedelta64(10))) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.arange(np.datetime64("0"), np.datetime64("10"))) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.arange(10, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.arange(0, 10, step=2, dtype=np.int16)) # E: ndarray[Any, dtype[{int16}]] -reveal_type(np.arange(10, dtype=int)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.arange(0, 10, dtype="f8")) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.require(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.require(B)) # E: SubClass[{float64}] -reveal_type(np.require(B, requirements=None)) # E: SubClass[{float64}] -reveal_type(np.require(B, dtype=int)) # E: ndarray[Any, Any] -reveal_type(np.require(B, requirements="E")) # E: ndarray[Any, Any] -reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray[Any, Any] -reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray[Any, Any] -reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass[{float64}] -reveal_type(np.require(B, requirements="W")) # E: SubClass[{float64}] -reveal_type(np.require(B, requirements="A")) # E: SubClass[{float64}] -reveal_type(np.require(C)) # E: ndarray[Any, Any] - -reveal_type(np.linspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.linspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.linspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.linspace(0, 10, retstep=True)) # E: tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]] -reveal_type(np.linspace(0j, 10, retstep=True)) # E: tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]] -reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: tuple[ndarray[Any, dtype[{int64}]], {int64}] -reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: tuple[ndarray[Any, dtype[Any]], Any] - -reveal_type(np.logspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.logspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.logspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.logspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.geomspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.geomspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.geomspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.geomspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.zeros_like(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.zeros_like(C)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.zeros_like(A, dtype=float)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.zeros_like(B)) # E: SubClass[{float64}] -reveal_type(np.zeros_like(B, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] - -reveal_type(np.ones_like(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.ones_like(C)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.ones_like(A, dtype=float)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.ones_like(B)) # E: SubClass[{float64}] -reveal_type(np.ones_like(B, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] - -reveal_type(np.full_like(A, i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.full_like(C, i8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.full_like(A, i8, dtype=int)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.full_like(B, i8)) # E: SubClass[{float64}] -reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] - -reveal_type(np.ones(1)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.ones([1, 1, 1])) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.ones(5, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.ones(5, dtype=int)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.full(1, i8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.full([1, 1, 1], i8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.full(1, i8, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.full(1, i8, dtype=float)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.indices([1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[ndarray[Any, dtype[{int_}]], ...] - -reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass[{float64}] - -reveal_type(np.identity(10)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.identity(10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.identity(10, dtype=int)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.atleast_1d(A)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.atleast_1d(C)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.atleast_1d(A, A)) # E: list[ndarray[Any, dtype[Any]]] -reveal_type(np.atleast_1d(A, C)) # E: list[ndarray[Any, dtype[Any]]] -reveal_type(np.atleast_1d(C, C)) # E: list[ndarray[Any, dtype[Any]]] - -reveal_type(np.atleast_2d(A)) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.atleast_3d(A)) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.vstack([A, A])) # E: ndarray[Any, Any] -reveal_type(np.vstack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.vstack([A, C])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.vstack([C, C])) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.hstack([A, A])) # E: ndarray[Any, Any] -reveal_type(np.hstack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.stack([A, A])) # E: Any -reveal_type(np.stack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.stack([A, C])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.stack([C, C])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.stack([A, A], axis=0)) # E: Any -reveal_type(np.stack([A, A], out=B)) # E: SubClass[{float64}] - -reveal_type(np.block([[A, A], [A, A]])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.block(C)) # E: ndarray[Any, dtype[Any]] + assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64]) + +assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromiter("12345", float), npt.NDArray[Any]) + +assert_type(np.frombuffer(A), npt.NDArray[np.float64]) +assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.arange(False, True), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.arange(10), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.arange(0, 10, step=2), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.arange(10.0), npt.NDArray[np.floating[Any]]) +assert_type(np.arange(start=0, stop=10.0), npt.NDArray[np.floating[Any]]) +assert_type(np.arange(np.timedelta64(0)), npt.NDArray[np.timedelta64]) +assert_type(np.arange(0, np.timedelta64(10)), npt.NDArray[np.timedelta64]) +assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), npt.NDArray[np.datetime64]) +assert_type(np.arange(10, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(np.arange(10, dtype=int), npt.NDArray[Any]) +assert_type(np.arange(0, 10, dtype="f8"), npt.NDArray[Any]) + +assert_type(np.require(A), npt.NDArray[np.float64]) +assert_type(np.require(B), SubClass[np.float64]) +assert_type(np.require(B, requirements=None), SubClass[np.float64]) +assert_type(np.require(B, dtype=int), np.ndarray[Any, Any]) +assert_type(np.require(B, requirements="E"), np.ndarray[Any, Any]) +assert_type(np.require(B, requirements=["ENSUREARRAY"]), np.ndarray[Any, Any]) +assert_type(np.require(B, requirements={"F", "E"}), np.ndarray[Any, Any]) +assert_type(np.require(B, requirements=["C", "OWNDATA"]), SubClass[np.float64]) +assert_type(np.require(B, requirements="W"), SubClass[np.float64]) +assert_type(np.require(B, requirements="A"), SubClass[np.float64]) +assert_type(np.require(C), np.ndarray[Any, Any]) + +assert_type(np.linspace(0, 10), npt.NDArray[np.floating[Any]]) +assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) +assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.floating[Any]], np.floating[Any]]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating[Any, Any]], np.complexfloating[Any, Any]]) +assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) +assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) + +assert_type(np.logspace(0, 10), npt.NDArray[np.floating[Any]]) +assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.geomspace(0, 10), npt.NDArray[np.floating[Any]]) +assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.zeros_like(A), npt.NDArray[np.float64]) +assert_type(np.zeros_like(C), npt.NDArray[Any]) +assert_type(np.zeros_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.zeros_like(B), SubClass[np.float64]) +assert_type(np.zeros_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.ones_like(A), npt.NDArray[np.float64]) +assert_type(np.ones_like(C), npt.NDArray[Any]) +assert_type(np.ones_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.ones_like(B), SubClass[np.float64]) +assert_type(np.ones_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.full_like(A, i8), npt.NDArray[np.float64]) +assert_type(np.full_like(C, i8), npt.NDArray[Any]) +assert_type(np.full_like(A, i8, dtype=int), npt.NDArray[Any]) +assert_type(np.full_like(B, i8), SubClass[np.float64]) +assert_type(np.full_like(B, i8, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.ones(1), npt.NDArray[np.float64]) +assert_type(np.ones([1, 1, 1]), npt.NDArray[np.float64]) +assert_type(np.ones(5, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.ones(5, dtype=int), npt.NDArray[Any]) + +assert_type(np.full(1, i8), npt.NDArray[Any]) +assert_type(np.full([1, 1, 1], i8), npt.NDArray[Any]) +assert_type(np.full(1, i8, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.full(1, i8, dtype=float), npt.NDArray[Any]) + +assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...]) + +assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) + +assert_type(np.identity(10), npt.NDArray[np.float64]) +assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) + +assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_1d(C), npt.NDArray[Any]) +assert_type(np.atleast_1d(A, A), list[npt.NDArray[Any]]) +assert_type(np.atleast_1d(A, C), list[npt.NDArray[Any]]) +assert_type(np.atleast_1d(C, C), list[npt.NDArray[Any]]) + +assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) + +assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) + +assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) +assert_type(np.vstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.vstack([A, C]), npt.NDArray[Any]) +assert_type(np.vstack([C, C]), npt.NDArray[Any]) + +assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) +assert_type(np.hstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) + +assert_type(np.stack([A, A]), Any) +assert_type(np.stack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.stack([A, C]), npt.NDArray[Any]) +assert_type(np.stack([C, C]), npt.NDArray[Any]) +assert_type(np.stack([A, A], axis=0), Any) +assert_type(np.stack([A, A], out=B), SubClass[np.float64]) + +assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) +assert_type(np.block(C), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index a05d44034644..f53613ba2fd4 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -1,9 +1,15 @@ +import sys from collections.abc import Mapping from typing import Any, SupportsIndex import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + def mode_func( ar: npt.NDArray[np.number[Any]], width: tuple[int, int], @@ -15,8 +21,8 @@ AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] AR_LIKE: list[int] -reveal_type(np.pad(AR_i8, (2, 3), "constant")) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.pad(AR_LIKE, (2, 3), "constant")) # E: ndarray[Any, dtype[Any]] +assert_type(np.pad(AR_i8, (2, 3), "constant"), npt.NDArray[np.int64]) +assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) -reveal_type(np.pad(AR_f8, (2, 3), mode_func)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2)) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) +assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index 6e65a8d8ad24..8f41bd2fe8be 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,20 +1,30 @@ +import sys +import contextlib from collections.abc import Callable from typing import Any + import numpy as np +from numpy.core.arrayprint import _FormatOptions + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR: np.ndarray[Any, Any] func_float: Callable[[np.floating[Any]], str] func_int: Callable[[np.integer[Any]], str] -reveal_type(np.get_printoptions()) # E: TypedDict -reveal_type(np.array2string( # E: str - AR, formatter={'float_kind': func_float, 'int_kind': func_int} -)) -reveal_type(np.format_float_scientific(1.0)) # E: str -reveal_type(np.format_float_positional(1)) # E: str -reveal_type(np.array_repr(AR)) # E: str -reveal_type(np.array_str(AR)) # E: str +assert_type(np.get_printoptions(), _FormatOptions) +assert_type( + np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + str, +) +assert_type(np.format_float_scientific(1.0), str) +assert_type(np.format_float_positional(1), str) +assert_type(np.array_repr(AR), str) +assert_type(np.array_str(AR), str) -reveal_type(np.printoptions()) # E: contextlib._GeneratorContextManager +assert_type(np.printoptions(), contextlib._GeneratorContextManager[_FormatOptions]) with np.printoptions() as dct: - reveal_type(dct) # E: TypedDict + assert_type(dct, _FormatOptions) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 68d1c068003f..877ea667d520 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -1,6 +1,14 @@ +import sys +from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_b: npt.NDArray[np.bool_] AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] @@ -9,52 +17,52 @@ AR_O: npt.NDArray[np.object_] AR_LIKE_f8: list[float] -reveal_type(np.ediff1d(AR_b)) # E: ndarray[Any, dtype[{int8}]] -reveal_type(np.ediff1d(AR_i8, to_end=[1, 2, 3])) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.ediff1d(AR_M)) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.ediff1d(AR_O)) # E: ndarray[Any, dtype[object_]] -reveal_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5])) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.intersect1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.intersect1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.intersect1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] - -reveal_type(np.setxor1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.setxor1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.setxor1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.in1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.in1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.in1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.in1d(AR_f8, AR_LIKE_f8, invert=True)) # E: ndarray[Any, dtype[bool_]] - -reveal_type(np.isin(AR_i8, AR_i8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isin(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isin(AR_f8, AR_i8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isin(AR_f8, AR_LIKE_f8, invert=True)) # E: ndarray[Any, dtype[bool_]] - -reveal_type(np.union1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.union1d(AR_M, AR_M)) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.union1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.setdiff1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.setdiff1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.setdiff1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.unique(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.unique(AR_LIKE_f8, axis=0)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.unique(AR_f8, return_index=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_inverse=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_inverse=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_index=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] +assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8]) +assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64]) +assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64]) +assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) +assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) + +assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.intersect1d(AR_f8, AR_f8, return_indices=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) + +assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.in1d(AR_i8, AR_i8), npt.NDArray[np.bool_]) +assert_type(np.in1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool_]) +assert_type(np.in1d(AR_f8, AR_i8), npt.NDArray[np.bool_]) +assert_type(np.in1d(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool_]) + +assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool_]) +assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool_]) +assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool_]) +assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool_]) + +assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) +assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.unique(AR_f8, return_index=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) diff --git a/numpy/typing/tests/data/reveal/arrayterator.pyi b/numpy/typing/tests/data/reveal/arrayterator.pyi index b6c26ddb70e1..7988b5c0c767 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -1,24 +1,33 @@ +import sys from typing import Any +from collections.abc import Generator + import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR_i8: np.ndarray[Any, np.dtype[np.int64]] ar_iter = np.lib.Arrayterator(AR_i8) -reveal_type(ar_iter.var) # E: ndarray[Any, dtype[{int64}]] -reveal_type(ar_iter.buf_size) # E: Union[None, builtins.int] -reveal_type(ar_iter.start) # E: builtins.list[builtins.int] -reveal_type(ar_iter.stop) # E: builtins.list[builtins.int] -reveal_type(ar_iter.step) # E: builtins.list[builtins.int] -reveal_type(ar_iter.shape) # E: builtins.tuple[builtins.int, ...] -reveal_type(ar_iter.flat) # E: typing.Generator[{int64}, None, None] +assert_type(ar_iter.var, npt.NDArray[np.int64]) +assert_type(ar_iter.buf_size, None | int) +assert_type(ar_iter.start, list[int]) +assert_type(ar_iter.stop, list[int]) +assert_type(ar_iter.step, list[int]) +assert_type(ar_iter.shape, tuple[int, ...]) +assert_type(ar_iter.flat, Generator[np.int64, None, None]) -reveal_type(ar_iter.__array__()) # E: ndarray[Any, dtype[{int64}]] +assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) for i in ar_iter: - reveal_type(i) # E: ndarray[Any, dtype[{int64}]] + assert_type(i, npt.NDArray[np.int64]) -reveal_type(ar_iter[0]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]] -reveal_type(ar_iter[...]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]] -reveal_type(ar_iter[:]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]] -reveal_type(ar_iter[0, 0, 0]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]] -reveal_type(ar_iter[..., 0, :]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]] +assert_type(ar_iter[0], np.lib.Arrayterator[Any, np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[Any, np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[Any, np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[Any, np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[Any, np.dtype[np.int64]]) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 3273f8226776..4c51ab7154bd 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,4 +1,14 @@ +import sys +from typing import Any + import numpy as np +import numpy.typing as npt +from numpy._typing import _64Bit, _32Bit + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type i8 = np.int64(1) u8 = np.uint64(1) @@ -15,111 +25,111 @@ AR = np.array([0, 1, 2], dtype=np.int32) AR.setflags(write=False) -reveal_type(i8 << i8) # E: {int64} -reveal_type(i8 >> i8) # E: {int64} -reveal_type(i8 | i8) # E: {int64} -reveal_type(i8 ^ i8) # E: {int64} -reveal_type(i8 & i8) # E: {int64} - -reveal_type(i8 << AR) # E: Any -reveal_type(i8 >> AR) # E: Any -reveal_type(i8 | AR) # E: Any -reveal_type(i8 ^ AR) # E: Any -reveal_type(i8 & AR) # E: Any - -reveal_type(i4 << i4) # E: {int32} -reveal_type(i4 >> i4) # E: {int32} -reveal_type(i4 | i4) # E: {int32} -reveal_type(i4 ^ i4) # E: {int32} -reveal_type(i4 & i4) # E: {int32} - -reveal_type(i8 << i4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(i8 >> i4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(i8 | i4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(i8 ^ i4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(i8 & i4) # E: signedinteger[Union[_64Bit, _32Bit]] - -reveal_type(i8 << b_) # E: {int64} -reveal_type(i8 >> b_) # E: {int64} -reveal_type(i8 | b_) # E: {int64} -reveal_type(i8 ^ b_) # E: {int64} -reveal_type(i8 & b_) # E: {int64} - -reveal_type(i8 << b) # E: {int64} -reveal_type(i8 >> b) # E: {int64} -reveal_type(i8 | b) # E: {int64} -reveal_type(i8 ^ b) # E: {int64} -reveal_type(i8 & b) # E: {int64} - -reveal_type(u8 << u8) # E: {uint64} -reveal_type(u8 >> u8) # E: {uint64} -reveal_type(u8 | u8) # E: {uint64} -reveal_type(u8 ^ u8) # E: {uint64} -reveal_type(u8 & u8) # E: {uint64} - -reveal_type(u8 << AR) # E: Any -reveal_type(u8 >> AR) # E: Any -reveal_type(u8 | AR) # E: Any -reveal_type(u8 ^ AR) # E: Any -reveal_type(u8 & AR) # E: Any - -reveal_type(u4 << u4) # E: {uint32} -reveal_type(u4 >> u4) # E: {uint32} -reveal_type(u4 | u4) # E: {uint32} -reveal_type(u4 ^ u4) # E: {uint32} -reveal_type(u4 & u4) # E: {uint32} - -reveal_type(u4 << i4) # E: signedinteger[Any] -reveal_type(u4 >> i4) # E: signedinteger[Any] -reveal_type(u4 | i4) # E: signedinteger[Any] -reveal_type(u4 ^ i4) # E: signedinteger[Any] -reveal_type(u4 & i4) # E: signedinteger[Any] - -reveal_type(u4 << i) # E: signedinteger[Any] -reveal_type(u4 >> i) # E: signedinteger[Any] -reveal_type(u4 | i) # E: signedinteger[Any] -reveal_type(u4 ^ i) # E: signedinteger[Any] -reveal_type(u4 & i) # E: signedinteger[Any] - -reveal_type(u8 << b_) # E: {uint64} -reveal_type(u8 >> b_) # E: {uint64} -reveal_type(u8 | b_) # E: {uint64} -reveal_type(u8 ^ b_) # E: {uint64} -reveal_type(u8 & b_) # E: {uint64} - -reveal_type(u8 << b) # E: {uint64} -reveal_type(u8 >> b) # E: {uint64} -reveal_type(u8 | b) # E: {uint64} -reveal_type(u8 ^ b) # E: {uint64} -reveal_type(u8 & b) # E: {uint64} - -reveal_type(b_ << b_) # E: {int8} -reveal_type(b_ >> b_) # E: {int8} -reveal_type(b_ | b_) # E: bool_ -reveal_type(b_ ^ b_) # E: bool_ -reveal_type(b_ & b_) # E: bool_ - -reveal_type(b_ << AR) # E: Any -reveal_type(b_ >> AR) # E: Any -reveal_type(b_ | AR) # E: Any -reveal_type(b_ ^ AR) # E: Any -reveal_type(b_ & AR) # E: Any - -reveal_type(b_ << b) # E: {int8} -reveal_type(b_ >> b) # E: {int8} -reveal_type(b_ | b) # E: bool_ -reveal_type(b_ ^ b) # E: bool_ -reveal_type(b_ & b) # E: bool_ - -reveal_type(b_ << i) # E: {int_} -reveal_type(b_ >> i) # E: {int_} -reveal_type(b_ | i) # E: {int_} -reveal_type(b_ ^ i) # E: {int_} -reveal_type(b_ & i) # E: {int_} - -reveal_type(~i8) # E: {int64} -reveal_type(~i4) # E: {int32} -reveal_type(~u8) # E: {uint64} -reveal_type(~u4) # E: {uint32} -reveal_type(~b_) # E: bool_ -reveal_type(~AR) # E: Any +assert_type(i8 << i8, np.int64) +assert_type(i8 >> i8, np.int64) +assert_type(i8 | i8, np.int64) +assert_type(i8 ^ i8, np.int64) +assert_type(i8 & i8, np.int64) + +assert_type(i8 << AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 >> AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 | AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 ^ AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 & AR, npt.NDArray[np.signedinteger[Any]]) + +assert_type(i4 << i4, np.int32) +assert_type(i4 >> i4, np.int32) +assert_type(i4 | i4, np.int32) +assert_type(i4 ^ i4, np.int32) +assert_type(i4 & i4, np.int32) + +assert_type(i8 << i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 >> i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 | i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 ^ i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 & i4, np.signedinteger[_32Bit | _64Bit]) + +assert_type(i8 << b_, np.int64) +assert_type(i8 >> b_, np.int64) +assert_type(i8 | b_, np.int64) +assert_type(i8 ^ b_, np.int64) +assert_type(i8 & b_, np.int64) + +assert_type(i8 << b, np.int64) +assert_type(i8 >> b, np.int64) +assert_type(i8 | b, np.int64) +assert_type(i8 ^ b, np.int64) +assert_type(i8 & b, np.int64) + +assert_type(u8 << u8, np.uint64) +assert_type(u8 >> u8, np.uint64) +assert_type(u8 | u8, np.uint64) +assert_type(u8 ^ u8, np.uint64) +assert_type(u8 & u8, np.uint64) + +assert_type(u8 << AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(u8 >> AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(u8 | AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(u8 ^ AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(u8 & AR, npt.NDArray[np.signedinteger[Any]]) + +assert_type(u4 << u4, np.uint32) +assert_type(u4 >> u4, np.uint32) +assert_type(u4 | u4, np.uint32) +assert_type(u4 ^ u4, np.uint32) +assert_type(u4 & u4, np.uint32) + +assert_type(u4 << i4, np.signedinteger[Any]) +assert_type(u4 >> i4, np.signedinteger[Any]) +assert_type(u4 | i4, np.signedinteger[Any]) +assert_type(u4 ^ i4, np.signedinteger[Any]) +assert_type(u4 & i4, np.signedinteger[Any]) + +assert_type(u4 << i, np.signedinteger[Any]) +assert_type(u4 >> i, np.signedinteger[Any]) +assert_type(u4 | i, np.signedinteger[Any]) +assert_type(u4 ^ i, np.signedinteger[Any]) +assert_type(u4 & i, np.signedinteger[Any]) + +assert_type(u8 << b_, np.uint64) +assert_type(u8 >> b_, np.uint64) +assert_type(u8 | b_, np.uint64) +assert_type(u8 ^ b_, np.uint64) +assert_type(u8 & b_, np.uint64) + +assert_type(u8 << b, np.uint64) +assert_type(u8 >> b, np.uint64) +assert_type(u8 | b, np.uint64) +assert_type(u8 ^ b, np.uint64) +assert_type(u8 & b, np.uint64) + +assert_type(b_ << b_, np.int8) +assert_type(b_ >> b_, np.int8) +assert_type(b_ | b_, np.bool_) +assert_type(b_ ^ b_, np.bool_) +assert_type(b_ & b_, np.bool_) + +assert_type(b_ << AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(b_ >> AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(b_ | AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(b_ ^ AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(b_ & AR, npt.NDArray[np.signedinteger[Any]]) + +assert_type(b_ << b, np.int8) +assert_type(b_ >> b, np.int8) +assert_type(b_ | b, np.bool_) +assert_type(b_ ^ b, np.bool_) +assert_type(b_ & b, np.bool_) + +assert_type(b_ << i, np.int_) +assert_type(b_ >> i, np.int_) +assert_type(b_ | i, np.int_) +assert_type(b_ ^ i, np.int_) +assert_type(b_ & i, np.int_) + +assert_type(~i8, np.int64) +assert_type(~i4, np.int32) +assert_type(~u8, np.uint64) +assert_type(~u4, np.uint32) +assert_type(~b_, np.bool_) +assert_type(~AR, npt.NDArray[np.int32]) diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 0563b34727e4..e15ed0801a0d 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,147 +1,154 @@ +import sys +from typing import Any + import numpy as np import numpy.typing as npt -from collections.abc import Sequence + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -reveal_type(np.char.equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.not_equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.not_equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.not_equal(AR_U, AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.not_equal(AR_S, AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.greater_equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.greater_equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.greater_equal(AR_U, AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.greater_equal(AR_S, AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.less_equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.less_equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.less_equal(AR_U, AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.less_equal(AR_S, AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.greater(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.greater(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.greater(AR_U, AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.greater(AR_S, AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.less(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.less(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.less(AR_U, AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.less(AR_S, AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.multiply(AR_U, 5)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.multiply(AR_S, [5, 4, 3])) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.multiply(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) -reveal_type(np.char.mod(AR_U, "test")) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.mod(AR_S, "test")) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.mod(AR_U, "test"), npt.NDArray[np.str_]) +assert_type(np.char.mod(AR_S, "test"), npt.NDArray[np.bytes_]) -reveal_type(np.char.capitalize(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.capitalize(AR_S)) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.capitalize(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.capitalize(AR_S), npt.NDArray[np.bytes_]) -reveal_type(np.char.center(AR_U, 5)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.center(AR_S, [2, 3, 4], b"a")) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.center(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) -reveal_type(np.char.encode(AR_U)) # E: ndarray[Any, dtype[bytes_]] -reveal_type(np.char.decode(AR_S)) # E: ndarray[Any, dtype[str_]] +assert_type(np.char.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.char.decode(AR_S), npt.NDArray[np.str_]) -reveal_type(np.char.expandtabs(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.expandtabs(AR_S, tabsize=4)) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.expandtabs(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) -reveal_type(np.char.join(AR_U, "_")) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.join(AR_S, [b"_", b""])) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.join(AR_U, "_"), npt.NDArray[np.str_]) +assert_type(np.char.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) -reveal_type(np.char.ljust(AR_U, 5)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]] -reveal_type(np.char.rjust(AR_U, 5)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rjust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) -reveal_type(np.char.lstrip(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.lstrip(AR_S, chars=b"_")) # E: ndarray[Any, dtype[bytes_]] -reveal_type(np.char.rstrip(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.rstrip(AR_S, chars=b"_")) # E: ndarray[Any, dtype[bytes_]] -reveal_type(np.char.strip(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.strip(AR_S, chars=b"_")) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.lstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lstrip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.rstrip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.strip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) -reveal_type(np.char.partition(AR_U, "\n")) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.partition(AR_S, [b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]] -reveal_type(np.char.rpartition(AR_U, "\n")) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.partition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) -reveal_type(np.char.replace(AR_U, "_", "-")) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"])) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) +assert_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) -reveal_type(np.char.split(AR_U, "_")) # E: ndarray[Any, dtype[object_]] -reveal_type(np.char.split(AR_S, maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]] -reveal_type(np.char.rsplit(AR_U, "_")) # E: ndarray[Any, dtype[object_]] -reveal_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]] +assert_type(np.char.split(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) -reveal_type(np.char.splitlines(AR_U)) # E: ndarray[Any, dtype[object_]] -reveal_type(np.char.splitlines(AR_S, keepends=[True, True, False])) # E: ndarray[Any, dtype[object_]] +assert_type(np.char.splitlines(AR_U), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) -reveal_type(np.char.swapcase(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.swapcase(AR_S)) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.swapcase(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.swapcase(AR_S), npt.NDArray[np.bytes_]) -reveal_type(np.char.title(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.title(AR_S)) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.title(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.title(AR_S), npt.NDArray[np.bytes_]) -reveal_type(np.char.upper(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.upper(AR_S)) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) -reveal_type(np.char.zfill(AR_U, 5)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.char.zfill(AR_S, [2, 3, 4])) # E: ndarray[Any, dtype[bytes_]] +assert_type(np.char.zfill(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) -reveal_type(np.char.count(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] +assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) -reveal_type(np.char.endswith(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.startswith(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool_]) +assert_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool_]) +assert_type(np.char.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool_]) +assert_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool_]) -reveal_type(np.char.find(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.rfind(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] +assert_type(np.char.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) -reveal_type(np.char.index(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.rindex(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] +assert_type(np.char.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) -reveal_type(np.char.isalpha(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.isalpha(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.isalnum(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.isalnum(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.isdecimal(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.isdecimal(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.isdecimal(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.isdigit(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.isdigit(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.islower(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.islower(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.islower(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.islower(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.isnumeric(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.isnumeric(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.isnumeric(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.isspace(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.isspace(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.istitle(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.istitle(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.isupper(AR_U)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.char.isupper(AR_S)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool_]) +assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool_]) -reveal_type(np.char.str_len(AR_U)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.char.str_len(AR_S)) # E: ndarray[Any, dtype[{int_}]] +assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) -reveal_type(np.char.array(AR_U)) # E: chararray[Any, dtype[str_]] -reveal_type(np.char.array(AR_S, order="K")) # E: chararray[Any, dtype[bytes_]] -reveal_type(np.char.array("bob", copy=True)) # E: chararray[Any, dtype[str_]] -reveal_type(np.char.array(b"bob", itemsize=5)) # E: chararray[Any, dtype[bytes_]] -reveal_type(np.char.array(1, unicode=False)) # E: chararray[Any, dtype[bytes_]] -reveal_type(np.char.array(1, unicode=True)) # E: chararray[Any, dtype[str_]] +assert_type(np.char.array(AR_U), np.chararray[Any, np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.chararray[Any, np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.chararray[Any, np.dtype[np.str_]]) -reveal_type(np.char.asarray(AR_U)) # E: chararray[Any, dtype[str_]] -reveal_type(np.char.asarray(AR_S, order="K")) # E: chararray[Any, dtype[bytes_]] -reveal_type(np.char.asarray("bob")) # E: chararray[Any, dtype[str_]] -reveal_type(np.char.asarray(b"bob", itemsize=5)) # E: chararray[Any, dtype[bytes_]] -reveal_type(np.char.asarray(1, unicode=False)) # E: chararray[Any, dtype[bytes_]] -reveal_type(np.char.asarray(1, unicode=True)) # E: chararray[Any, dtype[str_]] +assert_type(np.char.asarray(AR_U), np.chararray[Any, np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.chararray[Any, np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.chararray[Any, np.dtype[np.str_]]) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index 61906c860675..4bcbeda2e6ad 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,132 +1,140 @@ -import numpy as np +import sys from typing import Any +import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_U: np.chararray[Any, np.dtype[np.str_]] AR_S: np.chararray[Any, np.dtype[np.bytes_]] -reveal_type(AR_U == AR_U) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S == AR_S) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U == AR_U, npt.NDArray[np.bool_]) +assert_type(AR_S == AR_S, npt.NDArray[np.bool_]) -reveal_type(AR_U != AR_U) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S != AR_S) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U != AR_U, npt.NDArray[np.bool_]) +assert_type(AR_S != AR_S, npt.NDArray[np.bool_]) -reveal_type(AR_U >= AR_U) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S >= AR_S) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U >= AR_U, npt.NDArray[np.bool_]) +assert_type(AR_S >= AR_S, npt.NDArray[np.bool_]) -reveal_type(AR_U <= AR_U) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S <= AR_S) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U <= AR_U, npt.NDArray[np.bool_]) +assert_type(AR_S <= AR_S, npt.NDArray[np.bool_]) -reveal_type(AR_U > AR_U) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S > AR_S) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U > AR_U, npt.NDArray[np.bool_]) +assert_type(AR_S > AR_S, npt.NDArray[np.bool_]) -reveal_type(AR_U < AR_U) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S < AR_S) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U < AR_U, npt.NDArray[np.bool_]) +assert_type(AR_S < AR_S, npt.NDArray[np.bool_]) -reveal_type(AR_U * 5) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S * [5]) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U * 5, np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S * [5], np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U % "test") # E: chararray[Any, dtype[str_]] -reveal_type(AR_S % b"test") # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U % "test", np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S % b"test", np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.capitalize()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.capitalize()) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.capitalize(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.capitalize(), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.center(5)) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.center([2, 3, 4], b"a")) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.center(5), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.center([2, 3, 4], b"a"), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.encode()) # E: chararray[Any, dtype[bytes_]] -reveal_type(AR_S.decode()) # E: chararray[Any, dtype[str_]] +assert_type(AR_U.encode(), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_S.decode(), np.chararray[Any, np.dtype[np.str_]]) -reveal_type(AR_U.expandtabs()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.expandtabs(tabsize=4)) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.expandtabs(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.expandtabs(tabsize=4), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.join("_")) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.join([b"_", b""])) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.join("_"), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.join([b"_", b""]), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.ljust(5)) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]] -reveal_type(AR_U.rjust(5)) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.ljust(5), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.rjust(5), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.lstrip()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.lstrip(chars=b"_")) # E: chararray[Any, dtype[bytes_]] -reveal_type(AR_U.rstrip()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.rstrip(chars=b"_")) # E: chararray[Any, dtype[bytes_]] -reveal_type(AR_U.strip()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.strip(chars=b"_")) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.lstrip(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.lstrip(chars=b"_"), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.rstrip(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.rstrip(chars=b"_"), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.strip(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.strip(chars=b"_"), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.partition("\n")) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.partition([b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]] -reveal_type(AR_U.rpartition("\n")) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.rpartition([b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.partition("\n"), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.partition([b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.rpartition("\n"), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.replace("_", "-")) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.replace([b"_", b""], [b"a", b"b"])) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.replace("_", "-"), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.split("_")) # E: ndarray[Any, dtype[object_]] -reveal_type(AR_S.split(maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]] -reveal_type(AR_U.rsplit("_")) # E: ndarray[Any, dtype[object_]] -reveal_type(AR_S.rsplit(maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]] +assert_type(AR_U.split("_"), npt.NDArray[np.object_]) +assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(AR_U.rsplit("_"), npt.NDArray[np.object_]) +assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) -reveal_type(AR_U.splitlines()) # E: ndarray[Any, dtype[object_]] -reveal_type(AR_S.splitlines(keepends=[True, True, False])) # E: ndarray[Any, dtype[object_]] +assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) +assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) -reveal_type(AR_U.swapcase()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.swapcase()) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.swapcase(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.swapcase(), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.title()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.title()) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.title(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.title(), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.upper()) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.upper()) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.upper(), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.upper(), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.zfill(5)) # E: chararray[Any, dtype[str_]] -reveal_type(AR_S.zfill([2, 3, 4])) # E: chararray[Any, dtype[bytes_]] +assert_type(AR_U.zfill(5), np.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_S.zfill([2, 3, 4]), np.chararray[Any, np.dtype[np.bytes_]]) -reveal_type(AR_U.count("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(AR_S.count([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] +assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) -reveal_type(AR_U.endswith("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.endswith([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_U.startswith("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.startswith([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.endswith("a", start=[1, 2, 3]), npt.NDArray[np.bool_]) +assert_type(AR_S.endswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool_]) +assert_type(AR_U.startswith("a", start=[1, 2, 3]), npt.NDArray[np.bool_]) +assert_type(AR_S.startswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool_]) -reveal_type(AR_U.find("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(AR_S.find([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(AR_U.rfind("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(AR_S.rfind([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] +assert_type(AR_U.find("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.find([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rfind("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rfind([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) -reveal_type(AR_U.index("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(AR_S.index([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(AR_U.rindex("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(AR_S.rindex([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]] +assert_type(AR_U.index("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.index([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rindex("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rindex([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) -reveal_type(AR_U.isalpha()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.isalpha()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.isalpha(), npt.NDArray[np.bool_]) +assert_type(AR_S.isalpha(), npt.NDArray[np.bool_]) -reveal_type(AR_U.isalnum()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.isalnum()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.isalnum(), npt.NDArray[np.bool_]) +assert_type(AR_S.isalnum(), npt.NDArray[np.bool_]) -reveal_type(AR_U.isdecimal()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.isdecimal()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.isdecimal(), npt.NDArray[np.bool_]) +assert_type(AR_S.isdecimal(), npt.NDArray[np.bool_]) -reveal_type(AR_U.isdigit()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.isdigit()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.isdigit(), npt.NDArray[np.bool_]) +assert_type(AR_S.isdigit(), npt.NDArray[np.bool_]) -reveal_type(AR_U.islower()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.islower()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.islower(), npt.NDArray[np.bool_]) +assert_type(AR_S.islower(), npt.NDArray[np.bool_]) -reveal_type(AR_U.isnumeric()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.isnumeric()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.isnumeric(), npt.NDArray[np.bool_]) +assert_type(AR_S.isnumeric(), npt.NDArray[np.bool_]) -reveal_type(AR_U.isspace()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.isspace()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.isspace(), npt.NDArray[np.bool_]) +assert_type(AR_S.isspace(), npt.NDArray[np.bool_]) -reveal_type(AR_U.istitle()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.istitle()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.istitle(), npt.NDArray[np.bool_]) +assert_type(AR_S.istitle(), npt.NDArray[np.bool_]) -reveal_type(AR_U.isupper()) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR_S.isupper()) # E: ndarray[Any, dtype[bool_]] +assert_type(AR_U.isupper(), npt.NDArray[np.bool_]) +assert_type(AR_S.isupper(), npt.NDArray[np.bool_]) -reveal_type(AR_U.__array_finalize__(object())) # E: None -reveal_type(AR_S.__array_finalize__(object())) # E: None +assert_type(AR_U.__array_finalize__(object()), None) +assert_type(AR_S.__array_finalize__(object()), None) diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 9b32f40576b3..5765302a02f8 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,6 +1,15 @@ -import numpy as np +import sys import fractions import decimal +from typing import Any + +import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type c16 = np.complex128() f8 = np.float64() @@ -29,233 +38,233 @@ SEQ = (0, 1, 2, 3, 4) # object-like comparisons -reveal_type(i8 > fractions.Fraction(1, 5)) # E: Any -reveal_type(i8 > [fractions.Fraction(1, 5)]) # E: Any -reveal_type(i8 > decimal.Decimal("1.5")) # E: Any -reveal_type(i8 > [decimal.Decimal("1.5")]) # E: Any +assert_type(i8 > fractions.Fraction(1, 5), Any) +assert_type(i8 > [fractions.Fraction(1, 5)], Any) +assert_type(i8 > decimal.Decimal("1.5"), Any) +assert_type(i8 > [decimal.Decimal("1.5")], Any) # Time structures -reveal_type(dt > dt) # E: bool_ +assert_type(dt > dt, np.bool_) -reveal_type(td > td) # E: bool_ -reveal_type(td > i) # E: bool_ -reveal_type(td > i4) # E: bool_ -reveal_type(td > i8) # E: bool_ +assert_type(td > td, np.bool_) +assert_type(td > i, np.bool_) +assert_type(td > i4, np.bool_) +assert_type(td > i8, np.bool_) -reveal_type(td > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(td > SEQ) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR > SEQ) # E: ndarray[Any, dtype[bool_]] -reveal_type(AR > td) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > td) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > AR) # E: ndarray[Any, dtype[bool_]] +assert_type(td > AR, npt.NDArray[np.bool_]) +assert_type(td > SEQ, npt.NDArray[np.bool_]) +assert_type(AR > SEQ, npt.NDArray[np.bool_]) +assert_type(AR > td, npt.NDArray[np.bool_]) +assert_type(SEQ > td, npt.NDArray[np.bool_]) +assert_type(SEQ > AR, npt.NDArray[np.bool_]) # boolean -reveal_type(b_ > b) # E: bool_ -reveal_type(b_ > b_) # E: bool_ -reveal_type(b_ > i) # E: bool_ -reveal_type(b_ > i8) # E: bool_ -reveal_type(b_ > i4) # E: bool_ -reveal_type(b_ > u8) # E: bool_ -reveal_type(b_ > u4) # E: bool_ -reveal_type(b_ > f) # E: bool_ -reveal_type(b_ > f8) # E: bool_ -reveal_type(b_ > f4) # E: bool_ -reveal_type(b_ > c) # E: bool_ -reveal_type(b_ > c16) # E: bool_ -reveal_type(b_ > c8) # E: bool_ -reveal_type(b_ > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(b_ > SEQ) # E: ndarray[Any, dtype[bool_]] +assert_type(b_ > b, np.bool_) +assert_type(b_ > b_, np.bool_) +assert_type(b_ > i, np.bool_) +assert_type(b_ > i8, np.bool_) +assert_type(b_ > i4, np.bool_) +assert_type(b_ > u8, np.bool_) +assert_type(b_ > u4, np.bool_) +assert_type(b_ > f, np.bool_) +assert_type(b_ > f8, np.bool_) +assert_type(b_ > f4, np.bool_) +assert_type(b_ > c, np.bool_) +assert_type(b_ > c16, np.bool_) +assert_type(b_ > c8, np.bool_) +assert_type(b_ > AR, npt.NDArray[np.bool_]) +assert_type(b_ > SEQ, npt.NDArray[np.bool_]) # Complex -reveal_type(c16 > c16) # E: bool_ -reveal_type(c16 > f8) # E: bool_ -reveal_type(c16 > i8) # E: bool_ -reveal_type(c16 > c8) # E: bool_ -reveal_type(c16 > f4) # E: bool_ -reveal_type(c16 > i4) # E: bool_ -reveal_type(c16 > b_) # E: bool_ -reveal_type(c16 > b) # E: bool_ -reveal_type(c16 > c) # E: bool_ -reveal_type(c16 > f) # E: bool_ -reveal_type(c16 > i) # E: bool_ -reveal_type(c16 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(c16 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(c16 > c16) # E: bool_ -reveal_type(f8 > c16) # E: bool_ -reveal_type(i8 > c16) # E: bool_ -reveal_type(c8 > c16) # E: bool_ -reveal_type(f4 > c16) # E: bool_ -reveal_type(i4 > c16) # E: bool_ -reveal_type(b_ > c16) # E: bool_ -reveal_type(b > c16) # E: bool_ -reveal_type(c > c16) # E: bool_ -reveal_type(f > c16) # E: bool_ -reveal_type(i > c16) # E: bool_ -reveal_type(AR > c16) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > c16) # E: ndarray[Any, dtype[bool_]] - -reveal_type(c8 > c16) # E: bool_ -reveal_type(c8 > f8) # E: bool_ -reveal_type(c8 > i8) # E: bool_ -reveal_type(c8 > c8) # E: bool_ -reveal_type(c8 > f4) # E: bool_ -reveal_type(c8 > i4) # E: bool_ -reveal_type(c8 > b_) # E: bool_ -reveal_type(c8 > b) # E: bool_ -reveal_type(c8 > c) # E: bool_ -reveal_type(c8 > f) # E: bool_ -reveal_type(c8 > i) # E: bool_ -reveal_type(c8 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(c8 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(c16 > c8) # E: bool_ -reveal_type(f8 > c8) # E: bool_ -reveal_type(i8 > c8) # E: bool_ -reveal_type(c8 > c8) # E: bool_ -reveal_type(f4 > c8) # E: bool_ -reveal_type(i4 > c8) # E: bool_ -reveal_type(b_ > c8) # E: bool_ -reveal_type(b > c8) # E: bool_ -reveal_type(c > c8) # E: bool_ -reveal_type(f > c8) # E: bool_ -reveal_type(i > c8) # E: bool_ -reveal_type(AR > c8) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > c8) # E: ndarray[Any, dtype[bool_]] +assert_type(c16 > c16, np.bool_) +assert_type(c16 > f8, np.bool_) +assert_type(c16 > i8, np.bool_) +assert_type(c16 > c8, np.bool_) +assert_type(c16 > f4, np.bool_) +assert_type(c16 > i4, np.bool_) +assert_type(c16 > b_, np.bool_) +assert_type(c16 > b, np.bool_) +assert_type(c16 > c, np.bool_) +assert_type(c16 > f, np.bool_) +assert_type(c16 > i, np.bool_) +assert_type(c16 > AR, npt.NDArray[np.bool_]) +assert_type(c16 > SEQ, npt.NDArray[np.bool_]) + +assert_type(c16 > c16, np.bool_) +assert_type(f8 > c16, np.bool_) +assert_type(i8 > c16, np.bool_) +assert_type(c8 > c16, np.bool_) +assert_type(f4 > c16, np.bool_) +assert_type(i4 > c16, np.bool_) +assert_type(b_ > c16, np.bool_) +assert_type(b > c16, np.bool_) +assert_type(c > c16, np.bool_) +assert_type(f > c16, np.bool_) +assert_type(i > c16, np.bool_) +assert_type(AR > c16, npt.NDArray[np.bool_]) +assert_type(SEQ > c16, npt.NDArray[np.bool_]) + +assert_type(c8 > c16, np.bool_) +assert_type(c8 > f8, np.bool_) +assert_type(c8 > i8, np.bool_) +assert_type(c8 > c8, np.bool_) +assert_type(c8 > f4, np.bool_) +assert_type(c8 > i4, np.bool_) +assert_type(c8 > b_, np.bool_) +assert_type(c8 > b, np.bool_) +assert_type(c8 > c, np.bool_) +assert_type(c8 > f, np.bool_) +assert_type(c8 > i, np.bool_) +assert_type(c8 > AR, npt.NDArray[np.bool_]) +assert_type(c8 > SEQ, npt.NDArray[np.bool_]) + +assert_type(c16 > c8, np.bool_) +assert_type(f8 > c8, np.bool_) +assert_type(i8 > c8, np.bool_) +assert_type(c8 > c8, np.bool_) +assert_type(f4 > c8, np.bool_) +assert_type(i4 > c8, np.bool_) +assert_type(b_ > c8, np.bool_) +assert_type(b > c8, np.bool_) +assert_type(c > c8, np.bool_) +assert_type(f > c8, np.bool_) +assert_type(i > c8, np.bool_) +assert_type(AR > c8, npt.NDArray[np.bool_]) +assert_type(SEQ > c8, npt.NDArray[np.bool_]) # Float -reveal_type(f8 > f8) # E: bool_ -reveal_type(f8 > i8) # E: bool_ -reveal_type(f8 > f4) # E: bool_ -reveal_type(f8 > i4) # E: bool_ -reveal_type(f8 > b_) # E: bool_ -reveal_type(f8 > b) # E: bool_ -reveal_type(f8 > c) # E: bool_ -reveal_type(f8 > f) # E: bool_ -reveal_type(f8 > i) # E: bool_ -reveal_type(f8 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(f8 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(f8 > f8) # E: bool_ -reveal_type(i8 > f8) # E: bool_ -reveal_type(f4 > f8) # E: bool_ -reveal_type(i4 > f8) # E: bool_ -reveal_type(b_ > f8) # E: bool_ -reveal_type(b > f8) # E: bool_ -reveal_type(c > f8) # E: bool_ -reveal_type(f > f8) # E: bool_ -reveal_type(i > f8) # E: bool_ -reveal_type(AR > f8) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > f8) # E: ndarray[Any, dtype[bool_]] - -reveal_type(f4 > f8) # E: bool_ -reveal_type(f4 > i8) # E: bool_ -reveal_type(f4 > f4) # E: bool_ -reveal_type(f4 > i4) # E: bool_ -reveal_type(f4 > b_) # E: bool_ -reveal_type(f4 > b) # E: bool_ -reveal_type(f4 > c) # E: bool_ -reveal_type(f4 > f) # E: bool_ -reveal_type(f4 > i) # E: bool_ -reveal_type(f4 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(f4 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(f8 > f4) # E: bool_ -reveal_type(i8 > f4) # E: bool_ -reveal_type(f4 > f4) # E: bool_ -reveal_type(i4 > f4) # E: bool_ -reveal_type(b_ > f4) # E: bool_ -reveal_type(b > f4) # E: bool_ -reveal_type(c > f4) # E: bool_ -reveal_type(f > f4) # E: bool_ -reveal_type(i > f4) # E: bool_ -reveal_type(AR > f4) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > f4) # E: ndarray[Any, dtype[bool_]] +assert_type(f8 > f8, np.bool_) +assert_type(f8 > i8, np.bool_) +assert_type(f8 > f4, np.bool_) +assert_type(f8 > i4, np.bool_) +assert_type(f8 > b_, np.bool_) +assert_type(f8 > b, np.bool_) +assert_type(f8 > c, np.bool_) +assert_type(f8 > f, np.bool_) +assert_type(f8 > i, np.bool_) +assert_type(f8 > AR, npt.NDArray[np.bool_]) +assert_type(f8 > SEQ, npt.NDArray[np.bool_]) + +assert_type(f8 > f8, np.bool_) +assert_type(i8 > f8, np.bool_) +assert_type(f4 > f8, np.bool_) +assert_type(i4 > f8, np.bool_) +assert_type(b_ > f8, np.bool_) +assert_type(b > f8, np.bool_) +assert_type(c > f8, np.bool_) +assert_type(f > f8, np.bool_) +assert_type(i > f8, np.bool_) +assert_type(AR > f8, npt.NDArray[np.bool_]) +assert_type(SEQ > f8, npt.NDArray[np.bool_]) + +assert_type(f4 > f8, np.bool_) +assert_type(f4 > i8, np.bool_) +assert_type(f4 > f4, np.bool_) +assert_type(f4 > i4, np.bool_) +assert_type(f4 > b_, np.bool_) +assert_type(f4 > b, np.bool_) +assert_type(f4 > c, np.bool_) +assert_type(f4 > f, np.bool_) +assert_type(f4 > i, np.bool_) +assert_type(f4 > AR, npt.NDArray[np.bool_]) +assert_type(f4 > SEQ, npt.NDArray[np.bool_]) + +assert_type(f8 > f4, np.bool_) +assert_type(i8 > f4, np.bool_) +assert_type(f4 > f4, np.bool_) +assert_type(i4 > f4, np.bool_) +assert_type(b_ > f4, np.bool_) +assert_type(b > f4, np.bool_) +assert_type(c > f4, np.bool_) +assert_type(f > f4, np.bool_) +assert_type(i > f4, np.bool_) +assert_type(AR > f4, npt.NDArray[np.bool_]) +assert_type(SEQ > f4, npt.NDArray[np.bool_]) # Int -reveal_type(i8 > i8) # E: bool_ -reveal_type(i8 > u8) # E: bool_ -reveal_type(i8 > i4) # E: bool_ -reveal_type(i8 > u4) # E: bool_ -reveal_type(i8 > b_) # E: bool_ -reveal_type(i8 > b) # E: bool_ -reveal_type(i8 > c) # E: bool_ -reveal_type(i8 > f) # E: bool_ -reveal_type(i8 > i) # E: bool_ -reveal_type(i8 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(i8 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(u8 > u8) # E: bool_ -reveal_type(u8 > i4) # E: bool_ -reveal_type(u8 > u4) # E: bool_ -reveal_type(u8 > b_) # E: bool_ -reveal_type(u8 > b) # E: bool_ -reveal_type(u8 > c) # E: bool_ -reveal_type(u8 > f) # E: bool_ -reveal_type(u8 > i) # E: bool_ -reveal_type(u8 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(u8 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(i8 > i8) # E: bool_ -reveal_type(u8 > i8) # E: bool_ -reveal_type(i4 > i8) # E: bool_ -reveal_type(u4 > i8) # E: bool_ -reveal_type(b_ > i8) # E: bool_ -reveal_type(b > i8) # E: bool_ -reveal_type(c > i8) # E: bool_ -reveal_type(f > i8) # E: bool_ -reveal_type(i > i8) # E: bool_ -reveal_type(AR > i8) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > i8) # E: ndarray[Any, dtype[bool_]] - -reveal_type(u8 > u8) # E: bool_ -reveal_type(i4 > u8) # E: bool_ -reveal_type(u4 > u8) # E: bool_ -reveal_type(b_ > u8) # E: bool_ -reveal_type(b > u8) # E: bool_ -reveal_type(c > u8) # E: bool_ -reveal_type(f > u8) # E: bool_ -reveal_type(i > u8) # E: bool_ -reveal_type(AR > u8) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > u8) # E: ndarray[Any, dtype[bool_]] - -reveal_type(i4 > i8) # E: bool_ -reveal_type(i4 > i4) # E: bool_ -reveal_type(i4 > i) # E: bool_ -reveal_type(i4 > b_) # E: bool_ -reveal_type(i4 > b) # E: bool_ -reveal_type(i4 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(i4 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(u4 > i8) # E: bool_ -reveal_type(u4 > i4) # E: bool_ -reveal_type(u4 > u8) # E: bool_ -reveal_type(u4 > u4) # E: bool_ -reveal_type(u4 > i) # E: bool_ -reveal_type(u4 > b_) # E: bool_ -reveal_type(u4 > b) # E: bool_ -reveal_type(u4 > AR) # E: ndarray[Any, dtype[bool_]] -reveal_type(u4 > SEQ) # E: ndarray[Any, dtype[bool_]] - -reveal_type(i8 > i4) # E: bool_ -reveal_type(i4 > i4) # E: bool_ -reveal_type(i > i4) # E: bool_ -reveal_type(b_ > i4) # E: bool_ -reveal_type(b > i4) # E: bool_ -reveal_type(AR > i4) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > i4) # E: ndarray[Any, dtype[bool_]] - -reveal_type(i8 > u4) # E: bool_ -reveal_type(i4 > u4) # E: bool_ -reveal_type(u8 > u4) # E: bool_ -reveal_type(u4 > u4) # E: bool_ -reveal_type(b_ > u4) # E: bool_ -reveal_type(b > u4) # E: bool_ -reveal_type(i > u4) # E: bool_ -reveal_type(AR > u4) # E: ndarray[Any, dtype[bool_]] -reveal_type(SEQ > u4) # E: ndarray[Any, dtype[bool_]] +assert_type(i8 > i8, np.bool_) +assert_type(i8 > u8, np.bool_) +assert_type(i8 > i4, np.bool_) +assert_type(i8 > u4, np.bool_) +assert_type(i8 > b_, np.bool_) +assert_type(i8 > b, np.bool_) +assert_type(i8 > c, np.bool_) +assert_type(i8 > f, np.bool_) +assert_type(i8 > i, np.bool_) +assert_type(i8 > AR, npt.NDArray[np.bool_]) +assert_type(i8 > SEQ, npt.NDArray[np.bool_]) + +assert_type(u8 > u8, np.bool_) +assert_type(u8 > i4, np.bool_) +assert_type(u8 > u4, np.bool_) +assert_type(u8 > b_, np.bool_) +assert_type(u8 > b, np.bool_) +assert_type(u8 > c, np.bool_) +assert_type(u8 > f, np.bool_) +assert_type(u8 > i, np.bool_) +assert_type(u8 > AR, npt.NDArray[np.bool_]) +assert_type(u8 > SEQ, npt.NDArray[np.bool_]) + +assert_type(i8 > i8, np.bool_) +assert_type(u8 > i8, np.bool_) +assert_type(i4 > i8, np.bool_) +assert_type(u4 > i8, np.bool_) +assert_type(b_ > i8, np.bool_) +assert_type(b > i8, np.bool_) +assert_type(c > i8, np.bool_) +assert_type(f > i8, np.bool_) +assert_type(i > i8, np.bool_) +assert_type(AR > i8, npt.NDArray[np.bool_]) +assert_type(SEQ > i8, npt.NDArray[np.bool_]) + +assert_type(u8 > u8, np.bool_) +assert_type(i4 > u8, np.bool_) +assert_type(u4 > u8, np.bool_) +assert_type(b_ > u8, np.bool_) +assert_type(b > u8, np.bool_) +assert_type(c > u8, np.bool_) +assert_type(f > u8, np.bool_) +assert_type(i > u8, np.bool_) +assert_type(AR > u8, npt.NDArray[np.bool_]) +assert_type(SEQ > u8, npt.NDArray[np.bool_]) + +assert_type(i4 > i8, np.bool_) +assert_type(i4 > i4, np.bool_) +assert_type(i4 > i, np.bool_) +assert_type(i4 > b_, np.bool_) +assert_type(i4 > b, np.bool_) +assert_type(i4 > AR, npt.NDArray[np.bool_]) +assert_type(i4 > SEQ, npt.NDArray[np.bool_]) + +assert_type(u4 > i8, np.bool_) +assert_type(u4 > i4, np.bool_) +assert_type(u4 > u8, np.bool_) +assert_type(u4 > u4, np.bool_) +assert_type(u4 > i, np.bool_) +assert_type(u4 > b_, np.bool_) +assert_type(u4 > b, np.bool_) +assert_type(u4 > AR, npt.NDArray[np.bool_]) +assert_type(u4 > SEQ, npt.NDArray[np.bool_]) + +assert_type(i8 > i4, np.bool_) +assert_type(i4 > i4, np.bool_) +assert_type(i > i4, np.bool_) +assert_type(b_ > i4, np.bool_) +assert_type(b > i4, np.bool_) +assert_type(AR > i4, npt.NDArray[np.bool_]) +assert_type(SEQ > i4, npt.NDArray[np.bool_]) + +assert_type(i8 > u4, np.bool_) +assert_type(i4 > u4, np.bool_) +assert_type(u8 > u4, np.bool_) +assert_type(u4 > u4, np.bool_) +assert_type(b_ > u4, np.bool_) +assert_type(b > u4, np.bool_) +assert_type(i > u4, np.bool_) +assert_type(AR > u4, npt.NDArray[np.bool_]) +assert_type(SEQ > u4, npt.NDArray[np.bool_]) diff --git a/numpy/typing/tests/data/reveal/constants.pyi b/numpy/typing/tests/data/reveal/constants.pyi index 37f54ccdaba3..ce2fcef1e2fc 100644 --- a/numpy/typing/tests/data/reveal/constants.pyi +++ b/numpy/typing/tests/data/reveal/constants.pyi @@ -1,52 +1,61 @@ +import sys +from typing import Literal + import numpy as np +from numpy.core._type_aliases import _SCTypes + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type -reveal_type(np.Inf) # E: float -reveal_type(np.Infinity) # E: float -reveal_type(np.NAN) # E: float -reveal_type(np.NINF) # E: float -reveal_type(np.NZERO) # E: float -reveal_type(np.NaN) # E: float -reveal_type(np.PINF) # E: float -reveal_type(np.PZERO) # E: float -reveal_type(np.e) # E: float -reveal_type(np.euler_gamma) # E: float -reveal_type(np.inf) # E: float -reveal_type(np.infty) # E: float -reveal_type(np.nan) # E: float -reveal_type(np.pi) # E: float +assert_type(np.Inf, float) +assert_type(np.Infinity, float) +assert_type(np.NAN, float) +assert_type(np.NINF, float) +assert_type(np.NZERO, float) +assert_type(np.NaN, float) +assert_type(np.PINF, float) +assert_type(np.PZERO, float) +assert_type(np.e, float) +assert_type(np.euler_gamma, float) +assert_type(np.inf, float) +assert_type(np.infty, float) +assert_type(np.nan, float) +assert_type(np.pi, float) -reveal_type(np.ALLOW_THREADS) # E: int -reveal_type(np.BUFSIZE) # E: Literal[8192] -reveal_type(np.CLIP) # E: Literal[0] -reveal_type(np.ERR_CALL) # E: Literal[3] -reveal_type(np.ERR_DEFAULT) # E: Literal[521] -reveal_type(np.ERR_IGNORE) # E: Literal[0] -reveal_type(np.ERR_LOG) # E: Literal[5] -reveal_type(np.ERR_PRINT) # E: Literal[4] -reveal_type(np.ERR_RAISE) # E: Literal[2] -reveal_type(np.ERR_WARN) # E: Literal[1] -reveal_type(np.FLOATING_POINT_SUPPORT) # E: Literal[1] -reveal_type(np.FPE_DIVIDEBYZERO) # E: Literal[1] -reveal_type(np.FPE_INVALID) # E: Literal[8] -reveal_type(np.FPE_OVERFLOW) # E: Literal[2] -reveal_type(np.FPE_UNDERFLOW) # E: Literal[4] -reveal_type(np.MAXDIMS) # E: Literal[32] -reveal_type(np.MAY_SHARE_BOUNDS) # E: Literal[0] -reveal_type(np.MAY_SHARE_EXACT) # E: Literal[-1] -reveal_type(np.RAISE) # E: Literal[2] -reveal_type(np.SHIFT_DIVIDEBYZERO) # E: Literal[0] -reveal_type(np.SHIFT_INVALID) # E: Literal[9] -reveal_type(np.SHIFT_OVERFLOW) # E: Literal[3] -reveal_type(np.SHIFT_UNDERFLOW) # E: Literal[6] -reveal_type(np.UFUNC_BUFSIZE_DEFAULT) # E: Literal[8192] -reveal_type(np.WRAP) # E: Literal[1] -reveal_type(np.tracemalloc_domain) # E: Literal[389047] +assert_type(np.ALLOW_THREADS, int) +assert_type(np.BUFSIZE, Literal[8192]) +assert_type(np.CLIP, Literal[0]) +assert_type(np.ERR_CALL, Literal[3]) +assert_type(np.ERR_DEFAULT, Literal[521]) +assert_type(np.ERR_IGNORE, Literal[0]) +assert_type(np.ERR_LOG, Literal[5]) +assert_type(np.ERR_PRINT, Literal[4]) +assert_type(np.ERR_RAISE, Literal[2]) +assert_type(np.ERR_WARN, Literal[1]) +assert_type(np.FLOATING_POINT_SUPPORT, Literal[1]) +assert_type(np.FPE_DIVIDEBYZERO, Literal[1]) +assert_type(np.FPE_INVALID, Literal[8]) +assert_type(np.FPE_OVERFLOW, Literal[2]) +assert_type(np.FPE_UNDERFLOW, Literal[4]) +assert_type(np.MAXDIMS, Literal[32]) +assert_type(np.MAY_SHARE_BOUNDS, Literal[0]) +assert_type(np.MAY_SHARE_EXACT, Literal[-1]) +assert_type(np.RAISE, Literal[2]) +assert_type(np.SHIFT_DIVIDEBYZERO, Literal[0]) +assert_type(np.SHIFT_INVALID, Literal[9]) +assert_type(np.SHIFT_OVERFLOW, Literal[3]) +assert_type(np.SHIFT_UNDERFLOW, Literal[6]) +assert_type(np.UFUNC_BUFSIZE_DEFAULT, Literal[8192]) +assert_type(np.WRAP, Literal[1]) +assert_type(np.tracemalloc_domain, Literal[389047]) -reveal_type(np.little_endian) # E: bool -reveal_type(np.True_) # E: bool_ -reveal_type(np.False_) # E: bool_ +assert_type(np.little_endian, bool) +assert_type(np.True_, np.bool_) +assert_type(np.False_, np.bool_) -reveal_type(np.UFUNC_PYVALS_NAME) # E: Literal['UFUNC_PYVALS'] +assert_type(np.UFUNC_PYVALS_NAME, Literal["UFUNC_PYVALS"]) -reveal_type(np.sctypeDict) # E: dict -reveal_type(np.sctypes) # E: TypedDict +assert_type(np.sctypeDict, dict[int | str, type[np.generic]]) +assert_type(np.sctypes, _SCTypes) diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index 2d30de3d15fe..f306bd9c1d0f 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -1,8 +1,15 @@ -import ctypes +import sys +import ctypes as ct from typing import Any import numpy as np import numpy.typing as npt +from numpy import ctypeslib + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR_bool: npt.NDArray[np.bool_] AR_ubyte: npt.NDArray[np.ubyte] @@ -20,68 +27,66 @@ AR_double: npt.NDArray[np.double] AR_longdouble: npt.NDArray[np.longdouble] AR_void: npt.NDArray[np.void] -pointer: ctypes._Pointer[Any] +pointer: ct._Pointer[Any] + +assert_type(np.ctypeslib.c_intp(), ctypeslib.c_intp) -reveal_type(np.ctypeslib.c_intp()) # E: {c_intp} +assert_type(np.ctypeslib.ndpointer(), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(dtype=np.float64), type[ctypeslib._ndptr[np.dtype[np.float64]]]) +assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype[Any]]]) +assert_type(np.ctypeslib.ndpointer(shape=(10, 3)), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._concrete_ndptr[np.dtype[np.int64]]]) +assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype[Any]]]) -reveal_type(np.ctypeslib.ndpointer()) # E: Type[ctypeslib._ndptr[None]] -reveal_type(np.ctypeslib.ndpointer(dtype=np.float64)) # E: Type[ctypeslib._ndptr[dtype[{float64}]]] -reveal_type(np.ctypeslib.ndpointer(dtype=float)) # E: Type[ctypeslib._ndptr[dtype[Any]]] -reveal_type(np.ctypeslib.ndpointer(shape=(10, 3))) # E: Type[ctypeslib._ndptr[None]] -reveal_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3))) # E: Type[ctypeslib._concrete_ndptr[dtype[{int64}]]] -reveal_type(np.ctypeslib.ndpointer(int, shape=(1,))) # E: Type[ctypeslib._concrete_ndptr[dtype[Any]]] +assert_type(np.ctypeslib.as_ctypes_type(np.bool_), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) -reveal_type(np.ctypeslib.as_ctypes_type(np.bool_)) # E: Type[ctypes.c_bool] -reveal_type(np.ctypeslib.as_ctypes_type(np.ubyte)) # E: Type[{c_ubyte}] -reveal_type(np.ctypeslib.as_ctypes_type(np.ushort)) # E: Type[{c_ushort}] -reveal_type(np.ctypeslib.as_ctypes_type(np.uintc)) # E: Type[{c_uint}] -reveal_type(np.ctypeslib.as_ctypes_type(np.uint)) # E: Type[{c_ulong}] -reveal_type(np.ctypeslib.as_ctypes_type(np.ulonglong)) # E: Type[{c_ulonglong}] -reveal_type(np.ctypeslib.as_ctypes_type(np.byte)) # E: Type[{c_byte}] -reveal_type(np.ctypeslib.as_ctypes_type(np.short)) # E: Type[{c_short}] -reveal_type(np.ctypeslib.as_ctypes_type(np.intc)) # E: Type[{c_int}] -reveal_type(np.ctypeslib.as_ctypes_type(np.int_)) # E: Type[{c_long}] -reveal_type(np.ctypeslib.as_ctypes_type(np.longlong)) # E: Type[{c_longlong}] -reveal_type(np.ctypeslib.as_ctypes_type(np.single)) # E: Type[{c_float}] -reveal_type(np.ctypeslib.as_ctypes_type(np.double)) # E: Type[{c_double}] -reveal_type(np.ctypeslib.as_ctypes_type(np.longdouble)) # E: Type[{c_longdouble}] -reveal_type(np.ctypeslib.as_ctypes_type(ctypes.c_double)) # E: Type[{c_double}] -reveal_type(np.ctypeslib.as_ctypes_type("q")) # E: Type[ctypes.c_longlong] -reveal_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)])) # E: Type[Any] -reveal_type(np.ctypeslib.as_ctypes_type("i8")) # E: Type[Any] -reveal_type(np.ctypeslib.as_ctypes_type("f8")) # E: Type[Any] +assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) +assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) +assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) +assert_type(np.ctypeslib.as_ctypes(AR_uint.take(0)), ct.c_ulong) +assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) +assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) +assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) +assert_type(np.ctypeslib.as_ctypes(AR_int.take(0)), ct.c_long) +assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) -reveal_type(np.ctypeslib.as_ctypes(AR_bool.take(0))) # E: ctypes.c_bool -reveal_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0))) # E: {c_ubyte} -reveal_type(np.ctypeslib.as_ctypes(AR_ushort.take(0))) # E: {c_ushort} -reveal_type(np.ctypeslib.as_ctypes(AR_uintc.take(0))) # E: {c_uint} -reveal_type(np.ctypeslib.as_ctypes(AR_uint.take(0))) # E: {c_ulong} -reveal_type(np.ctypeslib.as_ctypes(AR_ulonglong.take(0))) # E: {c_ulonglong} -reveal_type(np.ctypeslib.as_ctypes(AR_byte.take(0))) # E: {c_byte} -reveal_type(np.ctypeslib.as_ctypes(AR_short.take(0))) # E: {c_short} -reveal_type(np.ctypeslib.as_ctypes(AR_intc.take(0))) # E: {c_int} -reveal_type(np.ctypeslib.as_ctypes(AR_int.take(0))) # E: {c_long} -reveal_type(np.ctypeslib.as_ctypes(AR_longlong.take(0))) # E: {c_longlong} -reveal_type(np.ctypeslib.as_ctypes(AR_single.take(0))) # E: {c_float} -reveal_type(np.ctypeslib.as_ctypes(AR_double.take(0))) # E: {c_double} -reveal_type(np.ctypeslib.as_ctypes(AR_longdouble.take(0))) # E: {c_longdouble} -reveal_type(np.ctypeslib.as_ctypes(AR_void.take(0))) # E: Any -reveal_type(np.ctypeslib.as_ctypes(AR_bool)) # E: ctypes.Array[ctypes.c_bool] -reveal_type(np.ctypeslib.as_ctypes(AR_ubyte)) # E: ctypes.Array[{c_ubyte}] -reveal_type(np.ctypeslib.as_ctypes(AR_ushort)) # E: ctypes.Array[{c_ushort}] -reveal_type(np.ctypeslib.as_ctypes(AR_uintc)) # E: ctypes.Array[{c_uint}] -reveal_type(np.ctypeslib.as_ctypes(AR_uint)) # E: ctypes.Array[{c_ulong}] -reveal_type(np.ctypeslib.as_ctypes(AR_ulonglong)) # E: ctypes.Array[{c_ulonglong}] -reveal_type(np.ctypeslib.as_ctypes(AR_byte)) # E: ctypes.Array[{c_byte}] -reveal_type(np.ctypeslib.as_ctypes(AR_short)) # E: ctypes.Array[{c_short}] -reveal_type(np.ctypeslib.as_ctypes(AR_intc)) # E: ctypes.Array[{c_int}] -reveal_type(np.ctypeslib.as_ctypes(AR_int)) # E: ctypes.Array[{c_long}] -reveal_type(np.ctypeslib.as_ctypes(AR_longlong)) # E: ctypes.Array[{c_longlong}] -reveal_type(np.ctypeslib.as_ctypes(AR_single)) # E: ctypes.Array[{c_float}] -reveal_type(np.ctypeslib.as_ctypes(AR_double)) # E: ctypes.Array[{c_double}] -reveal_type(np.ctypeslib.as_ctypes(AR_longdouble)) # E: ctypes.Array[{c_longdouble}] -reveal_type(np.ctypeslib.as_ctypes(AR_void)) # E: ctypes.Array[Any] +assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) +assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) -reveal_type(np.ctypeslib.as_array(AR_ubyte)) # E: ndarray[Any, dtype[{ubyte}]] -reveal_type(np.ctypeslib.as_array(1)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.ctypeslib.as_array(pointer)) # E: ndarray[Any, dtype[Any]] +if sys.platform == "win32": + assert_type(np.ctypeslib.as_ctypes_type(np.int_), type[ct.c_int]) + assert_type(np.ctypeslib.as_ctypes_type(np.uint), type[ct.c_uint]) + assert_type(np.ctypeslib.as_ctypes(AR_uint), ct.Array[ct.c_uint]) + assert_type(np.ctypeslib.as_ctypes(AR_int), ct.Array[ct.c_int]) +else: + assert_type(np.ctypeslib.as_ctypes_type(np.int_), type[ct.c_long]) + assert_type(np.ctypeslib.as_ctypes_type(np.uint), type[ct.c_ulong]) + assert_type(np.ctypeslib.as_ctypes(AR_uint), ct.Array[ct.c_ulong]) + assert_type(np.ctypeslib.as_ctypes(AR_int), ct.Array[ct.c_long]) diff --git a/numpy/typing/tests/data/reveal/datasource.pyi b/numpy/typing/tests/data/reveal/datasource.pyi index 245ac7649e96..865722d8c944 100644 --- a/numpy/typing/tests/data/reveal/datasource.pyi +++ b/numpy/typing/tests/data/reveal/datasource.pyi @@ -1,6 +1,14 @@ +import sys from pathlib import Path +from typing import IO, Any + import numpy as np +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + path1: Path path2: str @@ -8,14 +16,14 @@ d1 = np.DataSource(path1) d2 = np.DataSource(path2) d3 = np.DataSource(None) -reveal_type(d1.abspath("...")) # E: str -reveal_type(d2.abspath("...")) # E: str -reveal_type(d3.abspath("...")) # E: str +assert_type(d1.abspath("..."), str) +assert_type(d2.abspath("..."), str) +assert_type(d3.abspath("..."), str) -reveal_type(d1.exists("...")) # E: bool -reveal_type(d2.exists("...")) # E: bool -reveal_type(d3.exists("...")) # E: bool +assert_type(d1.exists("..."), bool) +assert_type(d2.exists("..."), bool) +assert_type(d3.exists("..."), bool) -reveal_type(d1.open("...", "r")) # E: IO[Any] -reveal_type(d2.open("...", encoding="utf8")) # E: IO[Any] -reveal_type(d3.open("...", newline="/n")) # E: IO[Any] +assert_type(d1.open("...", "r"), IO[Any]) +assert_type(d2.open("...", encoding="utf8"), IO[Any]) +assert_type(d3.open("...", newline="/n"), IO[Any]) diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 477877a71a1c..19713098bba3 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -1,77 +1,85 @@ +import sys import ctypes as ct +from typing import Any + import numpy as np +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] -reveal_type(np.dtype(np.float64)) # E: dtype[{float64}] -reveal_type(np.dtype(np.float64, metadata={"test": "test"})) # E: dtype[{float64}] -reveal_type(np.dtype(np.int64)) # E: dtype[{int64}] +assert_type(np.dtype(np.float64), np.dtype[np.float64]) +assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64]) +assert_type(np.dtype(np.int64), np.dtype[np.int64]) # String aliases -reveal_type(np.dtype("float64")) # E: dtype[{float64}] -reveal_type(np.dtype("float32")) # E: dtype[{float32}] -reveal_type(np.dtype("int64")) # E: dtype[{int64}] -reveal_type(np.dtype("int32")) # E: dtype[{int32}] -reveal_type(np.dtype("bool")) # E: dtype[bool_] -reveal_type(np.dtype("bytes")) # E: dtype[bytes_] -reveal_type(np.dtype("str")) # E: dtype[str_] +assert_type(np.dtype("float64"), np.dtype[np.float64]) +assert_type(np.dtype("float32"), np.dtype[np.float32]) +assert_type(np.dtype("int64"), np.dtype[np.int64]) +assert_type(np.dtype("int32"), np.dtype[np.int32]) +assert_type(np.dtype("bool"), np.dtype[np.bool_]) +assert_type(np.dtype("bytes"), np.dtype[np.bytes_]) +assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types -reveal_type(np.dtype(complex)) # E: dtype[{cdouble}] -reveal_type(np.dtype(float)) # E: dtype[{double}] -reveal_type(np.dtype(int)) # E: dtype[{int_}] -reveal_type(np.dtype(bool)) # E: dtype[bool_] -reveal_type(np.dtype(str)) # E: dtype[str_] -reveal_type(np.dtype(bytes)) # E: dtype[bytes_] -reveal_type(np.dtype(object)) # E: dtype[object_] +assert_type(np.dtype(complex), np.dtype[np.cdouble]) +assert_type(np.dtype(float), np.dtype[np.double]) +assert_type(np.dtype(int), np.dtype[np.int_]) +assert_type(np.dtype(bool), np.dtype[np.bool_]) +assert_type(np.dtype(str), np.dtype[np.str_]) +assert_type(np.dtype(bytes), np.dtype[np.bytes_]) +assert_type(np.dtype(object), np.dtype[np.object_]) # ctypes -reveal_type(np.dtype(ct.c_double)) # E: dtype[{double}] -reveal_type(np.dtype(ct.c_longlong)) # E: dtype[{longlong}] -reveal_type(np.dtype(ct.c_uint32)) # E: dtype[{uint32}] -reveal_type(np.dtype(ct.c_bool)) # E: dtype[bool_] -reveal_type(np.dtype(ct.c_char)) # E: dtype[bytes_] -reveal_type(np.dtype(ct.py_object)) # E: dtype[object_] +assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) +assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) +assert_type(np.dtype(ct.c_bool), np.dtype[np.bool_]) +assert_type(np.dtype(ct.c_char), np.dtype[np.bytes_]) +assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None -reveal_type(np.dtype(None)) # E: dtype[{double}] +assert_type(np.dtype(None), np.dtype[np.double]) # Dtypes of dtypes -reveal_type(np.dtype(np.dtype(np.float64))) # E: dtype[{float64}] +assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) # Parameterized dtypes -reveal_type(np.dtype("S8")) # E: dtype +assert_type(np.dtype("S8"), np.dtype) # Void -reveal_type(np.dtype(("U", 10))) # E: dtype[void] +assert_type(np.dtype(("U", 10)), np.dtype[np.void]) # Methods and attributes -reveal_type(dtype_U.base) # E: dtype[Any] -reveal_type(dtype_U.subdtype) # E: Union[None, tuple[dtype[Any], builtins.tuple[builtins.int, ...]]] -reveal_type(dtype_U.newbyteorder()) # E: dtype[str_] -reveal_type(dtype_U.type) # E: Type[str_] -reveal_type(dtype_U.name) # E: str -reveal_type(dtype_U.names) # E: Union[None, builtins.tuple[builtins.str, ...]] - -reveal_type(dtype_U * 0) # E: dtype[str_] -reveal_type(dtype_U * 1) # E: dtype[str_] -reveal_type(dtype_U * 2) # E: dtype[str_] - -reveal_type(dtype_i8 * 0) # E: dtype[void] -reveal_type(dtype_i8 * 1) # E: dtype[{int64}] -reveal_type(dtype_i8 * 2) # E: dtype[void] - -reveal_type(0 * dtype_U) # E: dtype[str_] -reveal_type(1 * dtype_U) # E: dtype[str_] -reveal_type(2 * dtype_U) # E: dtype[str_] - -reveal_type(0 * dtype_i8) # E: dtype[Any] -reveal_type(1 * dtype_i8) # E: dtype[Any] -reveal_type(2 * dtype_i8) # E: dtype[Any] - -reveal_type(dtype_V["f0"]) # E: dtype[Any] -reveal_type(dtype_V[0]) # E: dtype[Any] -reveal_type(dtype_V[["f0", "f1"]]) # E: dtype[void] -reveal_type(dtype_V[["f0"]]) # E: dtype[void] +assert_type(dtype_U.base, np.dtype[Any]) +assert_type(dtype_U.subdtype, None | tuple[np.dtype[Any], tuple[int, ...]]) +assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) +assert_type(dtype_U.type, type[np.str_]) +assert_type(dtype_U.name, str) +assert_type(dtype_U.names, None | tuple[str, ...]) + +assert_type(dtype_U * 0, np.dtype[np.str_]) +assert_type(dtype_U * 1, np.dtype[np.str_]) +assert_type(dtype_U * 2, np.dtype[np.str_]) + +assert_type(dtype_i8 * 0, np.dtype[np.void]) +assert_type(dtype_i8 * 1, np.dtype[np.int64]) +assert_type(dtype_i8 * 2, np.dtype[np.void]) + +assert_type(0 * dtype_U, np.dtype[np.str_]) +assert_type(1 * dtype_U, np.dtype[np.str_]) +assert_type(2 * dtype_U, np.dtype[np.str_]) + +assert_type(0 * dtype_i8, np.dtype[Any]) +assert_type(1 * dtype_i8, np.dtype[Any]) +assert_type(2 * dtype_i8, np.dtype[Any]) + +assert_type(dtype_V["f0"], np.dtype[Any]) +assert_type(dtype_V[0], np.dtype[Any]) +assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) +assert_type(dtype_V[["f0"]], np.dtype[np.void]) diff --git a/numpy/typing/tests/data/reveal/einsumfunc.pyi b/numpy/typing/tests/data/reveal/einsumfunc.pyi index e19ed233c569..645aaad31cf1 100644 --- a/numpy/typing/tests/data/reveal/einsumfunc.pyi +++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -1,7 +1,14 @@ +import sys from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] AR_LIKE_i: list[int] @@ -12,27 +19,27 @@ AR_o: npt.NDArray[np.object_] OUT_f: npt.NDArray[np.float64] -reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Any -reveal_type(np.einsum("i,i->i", AR_o, AR_o)) # E: Any -reveal_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Any -reveal_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Any -reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Any -reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Any -reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Any -reveal_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Any - -reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f)) # E: ndarray[Any, dtype[{float64}] -reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f)) # E: ndarray[Any, dtype[{float64}] -reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")) # E: Any -reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")) # E: Any - -reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: tuple[builtins.list[Any], builtins.str] -reveal_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: tuple[builtins.list[Any], builtins.str] - -reveal_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i)) # E: Any -reveal_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i)) # E: tuple[builtins.list[Any], builtins.str] +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b), Any) +assert_type(np.einsum("i,i->i", AR_o, AR_o), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i), Any) +assert_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), Any) + +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16"), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe"), Any) + +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), tuple[list[Any], str]) + +assert_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) diff --git a/numpy/typing/tests/data/reveal/emath.pyi b/numpy/typing/tests/data/reveal/emath.pyi index 9ab2d72d2be2..d1027bf48d50 100644 --- a/numpy/typing/tests/data/reveal/emath.pyi +++ b/numpy/typing/tests/data/reveal/emath.pyi @@ -1,52 +1,60 @@ +import sys +from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] f8: np.float64 c16: np.complex128 -reveal_type(np.emath.sqrt(f8)) # E: Any -reveal_type(np.emath.sqrt(AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.sqrt(c16)) # E: complexfloating[Any, Any] -reveal_type(np.emath.sqrt(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.log(f8)) # E: Any -reveal_type(np.emath.log(AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.log(c16)) # E: complexfloating[Any, Any] -reveal_type(np.emath.log(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.log10(f8)) # E: Any -reveal_type(np.emath.log10(AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.log10(c16)) # E: complexfloating[Any, Any] -reveal_type(np.emath.log10(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.log2(f8)) # E: Any -reveal_type(np.emath.log2(AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.log2(c16)) # E: complexfloating[Any, Any] -reveal_type(np.emath.log2(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.logn(f8, 2)) # E: Any -reveal_type(np.emath.logn(AR_f8, 4)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.logn(f8, 1j)) # E: complexfloating[Any, Any] -reveal_type(np.emath.logn(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.power(f8, 2)) # E: Any -reveal_type(np.emath.power(AR_f8, 4)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.power(f8, 2j)) # E: complexfloating[Any, Any] -reveal_type(np.emath.power(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.arccos(f8)) # E: Any -reveal_type(np.emath.arccos(AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.arccos(c16)) # E: complexfloating[Any, Any] -reveal_type(np.emath.arccos(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.arcsin(f8)) # E: Any -reveal_type(np.emath.arcsin(AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.arcsin(c16)) # E: complexfloating[Any, Any] -reveal_type(np.emath.arcsin(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.emath.arctanh(f8)) # E: Any -reveal_type(np.emath.arctanh(AR_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.emath.arctanh(c16)) # E: complexfloating[Any, Any] -reveal_type(np.emath.arctanh(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] +assert_type(np.emath.sqrt(f8), Any) +assert_type(np.emath.sqrt(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.sqrt(c16), np.complexfloating[Any, Any]) +assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.log(f8), Any) +assert_type(np.emath.log(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log(c16), np.complexfloating[Any, Any]) +assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.log10(f8), Any) +assert_type(np.emath.log10(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log10(c16), np.complexfloating[Any, Any]) +assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.log2(f8), Any) +assert_type(np.emath.log2(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log2(c16), np.complexfloating[Any, Any]) +assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.logn(f8, 2), Any) +assert_type(np.emath.logn(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.logn(f8, 1j), np.complexfloating[Any, Any]) +assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.power(f8, 2), Any) +assert_type(np.emath.power(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.power(f8, 2j), np.complexfloating[Any, Any]) +assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.arccos(f8), Any) +assert_type(np.emath.arccos(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arccos(c16), np.complexfloating[Any, Any]) +assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.arcsin(f8), Any) +assert_type(np.emath.arcsin(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arcsin(c16), np.complexfloating[Any, Any]) +assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.emath.arctanh(f8), Any) +assert_type(np.emath.arctanh(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arctanh(c16), np.complexfloating[Any, Any]) +assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) diff --git a/numpy/typing/tests/data/reveal/false_positives.pyi b/numpy/typing/tests/data/reveal/false_positives.pyi index 2d7156642ec2..7a2e016245a6 100644 --- a/numpy/typing/tests/data/reveal/false_positives.pyi +++ b/numpy/typing/tests/data/reveal/false_positives.pyi @@ -1,10 +1,18 @@ +import sys from typing import Any + +import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_Any: npt.NDArray[Any] # Mypy bug where overload ambiguity is ignored for `Any`-parametrized types; # xref numpy/numpy#20099 and python/mypy#11347 # -# The expected output would be something akin to `ndarray[Any, dtype[Any]]` -reveal_type(AR_Any + 2) # E: ndarray[Any, dtype[signedinteger[Any]]] +# The expected output would be something akin to `npt.NDArray[Any]` +assert_type(AR_Any + 2, npt.NDArray[np.signedinteger[Any]]) diff --git a/numpy/typing/tests/data/reveal/fft.pyi b/numpy/typing/tests/data/reveal/fft.pyi index 0667938e4478..d6e9ba756d97 100644 --- a/numpy/typing/tests/data/reveal/fft.pyi +++ b/numpy/typing/tests/data/reveal/fft.pyi @@ -1,35 +1,43 @@ +import sys +from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] AR_LIKE_f8: list[float] -reveal_type(np.fft.fftshift(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fft.fftshift(AR_LIKE_f8, axes=0)) # E: ndarray[Any, dtype[Any]] +assert_type(np.fft.fftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) -reveal_type(np.fft.ifftshift(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fft.ifftshift(AR_LIKE_f8, axes=0)) # E: ndarray[Any, dtype[Any]] +assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) -reveal_type(np.fft.fftfreq(5, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.fft.fftfreq(np.int64(), AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -reveal_type(np.fft.fftfreq(5, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.fft.fftfreq(np.int64(), AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -reveal_type(np.fft.fft(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.ifft(AR_f8, axis=1)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.rfft(AR_f8, n=None)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.irfft(AR_f8, norm="ortho")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fft.hfft(AR_f8, n=2)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fft.ihfft(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] +assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(AR_f8, n=None), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft(AR_f8, norm="ortho"), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(AR_f8, n=2), npt.NDArray[np.float64]) +assert_type(np.fft.ihfft(AR_f8), npt.NDArray[np.complex128]) -reveal_type(np.fft.fftn(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.ifftn(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.rfftn(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.irfftn(AR_f8)) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.fft.fftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfftn(AR_f8), npt.NDArray[np.float64]) -reveal_type(np.fft.rfft2(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.ifft2(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.fft2(AR_f8)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.fft.irfft2(AR_f8)) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.fft.rfft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft2(AR_f8), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index 8d3e80632821..84d3b03b7d37 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,22 +1,30 @@ +import sys from typing import Any + import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type -a: np.flatiter[np.ndarray[Any, np.dtype[np.str_]]] +a: np.flatiter[npt.NDArray[np.str_]] -reveal_type(a.base) # E: ndarray[Any, dtype[str_]] -reveal_type(a.copy()) # E: ndarray[Any, dtype[str_]] -reveal_type(a.coords) # E: tuple[builtins.int, ...] -reveal_type(a.index) # E: int -reveal_type(iter(a)) # E: Any -reveal_type(next(a)) # E: str_ -reveal_type(a[0]) # E: str_ -reveal_type(a[[0, 1, 2]]) # E: ndarray[Any, dtype[str_]] -reveal_type(a[...]) # E: ndarray[Any, dtype[str_]] -reveal_type(a[:]) # E: ndarray[Any, dtype[str_]] -reveal_type(a[(...,)]) # E: ndarray[Any, dtype[str_]] -reveal_type(a[(0,)]) # E: str_ -reveal_type(a.__array__()) # E: ndarray[Any, dtype[str_]] -reveal_type(a.__array__(np.dtype(np.float64))) # E: ndarray[Any, dtype[{float64}]] +assert_type(a.base, npt.NDArray[np.str_]) +assert_type(a.copy(), npt.NDArray[np.str_]) +assert_type(a.coords, tuple[int, ...]) +assert_type(a.index, int) +assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) +assert_type(next(a), np.str_) +assert_type(a[0], np.str_) +assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) +assert_type(a[...], npt.NDArray[np.str_]) +assert_type(a[:], npt.NDArray[np.str_]) +assert_type(a[(...,)], npt.NDArray[np.str_]) +assert_type(a[(0,)], np.str_) +assert_type(a.__array__(), npt.NDArray[np.str_]) +assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) a[0] = "a" a[:5] = "a" a[...] = "a" diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index e769abcf5e52..aec21ec22c93 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,8 +1,16 @@ """Tests for :mod:`core.fromnumeric`.""" +import sys +from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + class NDArraySubclass(npt.NDArray[np.complex128]): ... @@ -19,279 +27,279 @@ f4: np.float32 i8: np.int64 f: float -reveal_type(np.take(b, 0)) # E: bool_ -reveal_type(np.take(f4, 0)) # E: {float32} -reveal_type(np.take(f, 0)) # E: Any -reveal_type(np.take(AR_b, 0)) # E: bool_ -reveal_type(np.take(AR_f4, 0)) # E: {float32} -reveal_type(np.take(AR_b, [0])) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.take(AR_f4, [0])) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.take([1], [0])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.take(AR_f4, [0], out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.reshape(b, 1)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.reshape(f4, 1)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.reshape(f, 1)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.reshape(AR_b, 1)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.reshape(AR_f4, 1)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.choose(1, [True, True])) # E: Any -reveal_type(np.choose([1], [True, True])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.choose([1], AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.choose([1], AR_b, out=AR_f4)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.repeat(b, 1)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.repeat(f4, 1)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.repeat(f, 1)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.repeat(AR_b, 1)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.repeat(AR_f4, 1)) # E: ndarray[Any, dtype[{float32}]] +assert_type(np.take(b, 0), np.bool_) +assert_type(np.take(f4, 0), np.float32) +assert_type(np.take(f, 0), Any) +assert_type(np.take(AR_b, 0), np.bool_) +assert_type(np.take(AR_f4, 0), np.float32) +assert_type(np.take(AR_b, [0]), npt.NDArray[np.bool_]) +assert_type(np.take(AR_f4, [0]), npt.NDArray[np.float32]) +assert_type(np.take([1], [0]), npt.NDArray[Any]) +assert_type(np.take(AR_f4, [0], out=AR_subclass), NDArraySubclass) + +assert_type(np.reshape(b, 1), npt.NDArray[np.bool_]) +assert_type(np.reshape(f4, 1), npt.NDArray[np.float32]) +assert_type(np.reshape(f, 1), npt.NDArray[Any]) +assert_type(np.reshape(AR_b, 1), npt.NDArray[np.bool_]) +assert_type(np.reshape(AR_f4, 1), npt.NDArray[np.float32]) + +assert_type(np.choose(1, [True, True]), Any) +assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) +assert_type(np.choose([1], AR_b), npt.NDArray[np.bool_]) +assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) + +assert_type(np.repeat(b, 1), npt.NDArray[np.bool_]) +assert_type(np.repeat(f4, 1), npt.NDArray[np.float32]) +assert_type(np.repeat(f, 1), npt.NDArray[Any]) +assert_type(np.repeat(AR_b, 1), npt.NDArray[np.bool_]) +assert_type(np.repeat(AR_f4, 1), npt.NDArray[np.float32]) # TODO: array_bdd tests for np.put() -reveal_type(np.swapaxes([[0, 1]], 0, 0)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.swapaxes(AR_b, 0, 0)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.swapaxes(AR_f4, 0, 0)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.transpose(b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.transpose(f4)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.transpose(f)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.transpose(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.transpose(AR_f4)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.partition(b, 0, axis=None)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.partition(f4, 0, axis=None)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.partition(f, 0, axis=None)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.partition(AR_b, 0)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.partition(AR_f4, 0)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.argpartition(b, 0)) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.argpartition(f4, 0)) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.argpartition(f, 0)) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.argpartition(AR_b, 0)) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.argpartition(AR_f4, 0)) # E: ndarray[Any, dtype[{intp}]] - -reveal_type(np.sort([2, 1], 0)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.sort(AR_b, 0)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.sort(AR_f4, 0)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.argsort(AR_b, 0)) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.argsort(AR_f4, 0)) # E: ndarray[Any, dtype[{intp}]] - -reveal_type(np.argmax(AR_b)) # E: {intp} -reveal_type(np.argmax(AR_f4)) # E: {intp} -reveal_type(np.argmax(AR_b, axis=0)) # E: Any -reveal_type(np.argmax(AR_f4, axis=0)) # E: Any -reveal_type(np.argmax(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.argmin(AR_b)) # E: {intp} -reveal_type(np.argmin(AR_f4)) # E: {intp} -reveal_type(np.argmin(AR_b, axis=0)) # E: Any -reveal_type(np.argmin(AR_f4, axis=0)) # E: Any -reveal_type(np.argmin(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.searchsorted(AR_b[0], 0)) # E: {intp} -reveal_type(np.searchsorted(AR_f4[0], 0)) # E: {intp} -reveal_type(np.searchsorted(AR_b[0], [0])) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.searchsorted(AR_f4[0], [0])) # E: ndarray[Any, dtype[{intp}]] - -reveal_type(np.resize(b, (5, 5))) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.resize(f4, (5, 5))) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.resize(f, (5, 5))) # E: ndarray[Any, dtype[Any]] -reveal_type(np.resize(AR_b, (5, 5))) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.resize(AR_f4, (5, 5))) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.squeeze(b)) # E: bool_ -reveal_type(np.squeeze(f4)) # E: {float32} -reveal_type(np.squeeze(f)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.squeeze(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.squeeze(AR_f4)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.diagonal(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.diagonal(AR_f4)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.trace(AR_b)) # E: Any -reveal_type(np.trace(AR_f4)) # E: Any -reveal_type(np.trace(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.ravel(b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.ravel(f4)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.ravel(f)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.ravel(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.ravel(AR_f4)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.nonzero(b)) # E: tuple[ndarray[Any, dtype[{intp}]], ...] -reveal_type(np.nonzero(f4)) # E: tuple[ndarray[Any, dtype[{intp}]], ...] -reveal_type(np.nonzero(f)) # E: tuple[ndarray[Any, dtype[{intp}]], ...] -reveal_type(np.nonzero(AR_b)) # E: tuple[ndarray[Any, dtype[{intp}]], ...] -reveal_type(np.nonzero(AR_f4)) # E: tuple[ndarray[Any, dtype[{intp}]], ...] - -reveal_type(np.shape(b)) # E: tuple[builtins.int, ...] -reveal_type(np.shape(f4)) # E: tuple[builtins.int, ...] -reveal_type(np.shape(f)) # E: tuple[builtins.int, ...] -reveal_type(np.shape(AR_b)) # E: tuple[builtins.int, ...] -reveal_type(np.shape(AR_f4)) # E: tuple[builtins.int, ...] - -reveal_type(np.compress([True], b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.compress([True], f4)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.compress([True], f)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.compress([True], AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.compress([True], AR_f4)) # E: ndarray[Any, dtype[{float32}]] - -reveal_type(np.clip(b, 0, 1.0)) # E: bool_ -reveal_type(np.clip(f4, -1, 1)) # E: {float32} -reveal_type(np.clip(f, 0, 1)) # E: Any -reveal_type(np.clip(AR_b, 0, 1)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.clip(AR_f4, 0, 1)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.clip([0], 0, 1)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.clip(AR_b, 0, 1, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.sum(b)) # E: bool_ -reveal_type(np.sum(f4)) # E: {float32} -reveal_type(np.sum(f)) # E: Any -reveal_type(np.sum(AR_b)) # E: bool_ -reveal_type(np.sum(AR_f4)) # E: {float32} -reveal_type(np.sum(AR_b, axis=0)) # E: Any -reveal_type(np.sum(AR_f4, axis=0)) # E: Any -reveal_type(np.sum(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.all(b)) # E: bool_ -reveal_type(np.all(f4)) # E: bool_ -reveal_type(np.all(f)) # E: bool_ -reveal_type(np.all(AR_b)) # E: bool_ -reveal_type(np.all(AR_f4)) # E: bool_ -reveal_type(np.all(AR_b, axis=0)) # E: Any -reveal_type(np.all(AR_f4, axis=0)) # E: Any -reveal_type(np.all(AR_b, keepdims=True)) # E: Any -reveal_type(np.all(AR_f4, keepdims=True)) # E: Any -reveal_type(np.all(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.any(b)) # E: bool_ -reveal_type(np.any(f4)) # E: bool_ -reveal_type(np.any(f)) # E: bool_ -reveal_type(np.any(AR_b)) # E: bool_ -reveal_type(np.any(AR_f4)) # E: bool_ -reveal_type(np.any(AR_b, axis=0)) # E: Any -reveal_type(np.any(AR_f4, axis=0)) # E: Any -reveal_type(np.any(AR_b, keepdims=True)) # E: Any -reveal_type(np.any(AR_f4, keepdims=True)) # E: Any -reveal_type(np.any(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.cumsum(b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.cumsum(f4)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.cumsum(f)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.cumsum(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.cumsum(AR_f4)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.cumsum(f, dtype=float)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.cumsum(f, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.cumsum(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.ptp(b)) # E: bool_ -reveal_type(np.ptp(f4)) # E: {float32} -reveal_type(np.ptp(f)) # E: Any -reveal_type(np.ptp(AR_b)) # E: bool_ -reveal_type(np.ptp(AR_f4)) # E: {float32} -reveal_type(np.ptp(AR_b, axis=0)) # E: Any -reveal_type(np.ptp(AR_f4, axis=0)) # E: Any -reveal_type(np.ptp(AR_b, keepdims=True)) # E: Any -reveal_type(np.ptp(AR_f4, keepdims=True)) # E: Any -reveal_type(np.ptp(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.amax(b)) # E: bool_ -reveal_type(np.amax(f4)) # E: {float32} -reveal_type(np.amax(f)) # E: Any -reveal_type(np.amax(AR_b)) # E: bool_ -reveal_type(np.amax(AR_f4)) # E: {float32} -reveal_type(np.amax(AR_b, axis=0)) # E: Any -reveal_type(np.amax(AR_f4, axis=0)) # E: Any -reveal_type(np.amax(AR_b, keepdims=True)) # E: Any -reveal_type(np.amax(AR_f4, keepdims=True)) # E: Any -reveal_type(np.amax(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.amin(b)) # E: bool_ -reveal_type(np.amin(f4)) # E: {float32} -reveal_type(np.amin(f)) # E: Any -reveal_type(np.amin(AR_b)) # E: bool_ -reveal_type(np.amin(AR_f4)) # E: {float32} -reveal_type(np.amin(AR_b, axis=0)) # E: Any -reveal_type(np.amin(AR_f4, axis=0)) # E: Any -reveal_type(np.amin(AR_b, keepdims=True)) # E: Any -reveal_type(np.amin(AR_f4, keepdims=True)) # E: Any -reveal_type(np.amin(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.prod(AR_b)) # E: {int_} -reveal_type(np.prod(AR_u8)) # E: {uint64} -reveal_type(np.prod(AR_i8)) # E: {int64} -reveal_type(np.prod(AR_f4)) # E: floating[Any] -reveal_type(np.prod(AR_c16)) # E: complexfloating[Any, Any] -reveal_type(np.prod(AR_O)) # E: Any -reveal_type(np.prod(AR_f4, axis=0)) # E: Any -reveal_type(np.prod(AR_f4, keepdims=True)) # E: Any -reveal_type(np.prod(AR_f4, dtype=np.float64)) # E: {float64} -reveal_type(np.prod(AR_f4, dtype=float)) # E: Any -reveal_type(np.prod(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.cumprod(AR_b)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.cumprod(AR_u8)) # E: ndarray[Any, dtype[{uint64}]] -reveal_type(np.cumprod(AR_i8)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.cumprod(AR_f4)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.cumprod(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.cumprod(AR_O)) # E: ndarray[Any, dtype[object_]] -reveal_type(np.cumprod(AR_f4, axis=0)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.cumprod(AR_f4, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.cumprod(AR_f4, dtype=float)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.cumprod(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.ndim(b)) # E: int -reveal_type(np.ndim(f4)) # E: int -reveal_type(np.ndim(f)) # E: int -reveal_type(np.ndim(AR_b)) # E: int -reveal_type(np.ndim(AR_f4)) # E: int - -reveal_type(np.size(b)) # E: int -reveal_type(np.size(f4)) # E: int -reveal_type(np.size(f)) # E: int -reveal_type(np.size(AR_b)) # E: int -reveal_type(np.size(AR_f4)) # E: int - -reveal_type(np.around(b)) # E: {float16} -reveal_type(np.around(f)) # E: Any -reveal_type(np.around(i8)) # E: {int64} -reveal_type(np.around(f4)) # E: {float32} -reveal_type(np.around(AR_b)) # E: ndarray[Any, dtype[{float16}]] -reveal_type(np.around(AR_i8)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.around(AR_f4)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.around([1.5])) # E: ndarray[Any, dtype[Any]] -reveal_type(np.around(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.mean(AR_b)) # E: floating[Any] -reveal_type(np.mean(AR_i8)) # E: floating[Any] -reveal_type(np.mean(AR_f4)) # E: floating[Any] -reveal_type(np.mean(AR_c16)) # E: complexfloating[Any, Any] -reveal_type(np.mean(AR_O)) # E: Any -reveal_type(np.mean(AR_f4, axis=0)) # E: Any -reveal_type(np.mean(AR_f4, keepdims=True)) # E: Any -reveal_type(np.mean(AR_f4, dtype=float)) # E: Any -reveal_type(np.mean(AR_f4, dtype=np.float64)) # E: {float64} -reveal_type(np.mean(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.std(AR_b)) # E: floating[Any] -reveal_type(np.std(AR_i8)) # E: floating[Any] -reveal_type(np.std(AR_f4)) # E: floating[Any] -reveal_type(np.std(AR_c16)) # E: floating[Any] -reveal_type(np.std(AR_O)) # E: Any -reveal_type(np.std(AR_f4, axis=0)) # E: Any -reveal_type(np.std(AR_f4, keepdims=True)) # E: Any -reveal_type(np.std(AR_f4, dtype=float)) # E: Any -reveal_type(np.std(AR_f4, dtype=np.float64)) # E: {float64} -reveal_type(np.std(AR_f4, out=AR_subclass)) # E: NDArraySubclass - -reveal_type(np.var(AR_b)) # E: floating[Any] -reveal_type(np.var(AR_i8)) # E: floating[Any] -reveal_type(np.var(AR_f4)) # E: floating[Any] -reveal_type(np.var(AR_c16)) # E: floating[Any] -reveal_type(np.var(AR_O)) # E: Any -reveal_type(np.var(AR_f4, axis=0)) # E: Any -reveal_type(np.var(AR_f4, keepdims=True)) # E: Any -reveal_type(np.var(AR_f4, dtype=float)) # E: Any -reveal_type(np.var(AR_f4, dtype=np.float64)) # E: {float64} -reveal_type(np.var(AR_f4, out=AR_subclass)) # E: NDArraySubclass +assert_type(np.swapaxes([[0, 1]], 0, 0), npt.NDArray[Any]) +assert_type(np.swapaxes(AR_b, 0, 0), npt.NDArray[np.bool_]) +assert_type(np.swapaxes(AR_f4, 0, 0), npt.NDArray[np.float32]) + +assert_type(np.transpose(b), npt.NDArray[np.bool_]) +assert_type(np.transpose(f4), npt.NDArray[np.float32]) +assert_type(np.transpose(f), npt.NDArray[Any]) +assert_type(np.transpose(AR_b), npt.NDArray[np.bool_]) +assert_type(np.transpose(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.partition(b, 0, axis=None), npt.NDArray[np.bool_]) +assert_type(np.partition(f4, 0, axis=None), npt.NDArray[np.float32]) +assert_type(np.partition(f, 0, axis=None), npt.NDArray[Any]) +assert_type(np.partition(AR_b, 0), npt.NDArray[np.bool_]) +assert_type(np.partition(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argpartition(b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) +assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool_]) +assert_type(np.sort(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argsort(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.argmax(AR_b), np.intp) +assert_type(np.argmax(AR_f4), np.intp) +assert_type(np.argmax(AR_b, axis=0), Any) +assert_type(np.argmax(AR_f4, axis=0), Any) +assert_type(np.argmax(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.argmin(AR_b), np.intp) +assert_type(np.argmin(AR_f4), np.intp) +assert_type(np.argmin(AR_b, axis=0), Any) +assert_type(np.argmin(AR_f4, axis=0), Any) +assert_type(np.argmin(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.searchsorted(AR_b[0], 0), np.intp) +assert_type(np.searchsorted(AR_f4[0], 0), np.intp) +assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) +assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) + +assert_type(np.resize(b, (5, 5)), npt.NDArray[np.bool_]) +assert_type(np.resize(f4, (5, 5)), npt.NDArray[np.float32]) +assert_type(np.resize(f, (5, 5)), npt.NDArray[Any]) +assert_type(np.resize(AR_b, (5, 5)), npt.NDArray[np.bool_]) +assert_type(np.resize(AR_f4, (5, 5)), npt.NDArray[np.float32]) + +assert_type(np.squeeze(b), np.bool_) +assert_type(np.squeeze(f4), np.float32) +assert_type(np.squeeze(f), npt.NDArray[Any]) +assert_type(np.squeeze(AR_b), npt.NDArray[np.bool_]) +assert_type(np.squeeze(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.diagonal(AR_b), npt.NDArray[np.bool_]) +assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.trace(AR_b), Any) +assert_type(np.trace(AR_f4), Any) +assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ravel(b), npt.NDArray[np.bool_]) +assert_type(np.ravel(f4), npt.NDArray[np.float32]) +assert_type(np.ravel(f), npt.NDArray[Any]) +assert_type(np.ravel(AR_b), npt.NDArray[np.bool_]) +assert_type(np.ravel(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.nonzero(b), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(f4), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(f), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) + +assert_type(np.shape(b), tuple[int, ...]) +assert_type(np.shape(f4), tuple[int, ...]) +assert_type(np.shape(f), tuple[int, ...]) +assert_type(np.shape(AR_b), tuple[int, ...]) +assert_type(np.shape(AR_f4), tuple[int, ...]) + +assert_type(np.compress([True], b), npt.NDArray[np.bool_]) +assert_type(np.compress([True], f4), npt.NDArray[np.float32]) +assert_type(np.compress([True], f), npt.NDArray[Any]) +assert_type(np.compress([True], AR_b), npt.NDArray[np.bool_]) +assert_type(np.compress([True], AR_f4), npt.NDArray[np.float32]) + +assert_type(np.clip(b, 0, 1.0), np.bool_) +assert_type(np.clip(f4, -1, 1), np.float32) +assert_type(np.clip(f, 0, 1), Any) +assert_type(np.clip(AR_b, 0, 1), npt.NDArray[np.bool_]) +assert_type(np.clip(AR_f4, 0, 1), npt.NDArray[np.float32]) +assert_type(np.clip([0], 0, 1), npt.NDArray[Any]) +assert_type(np.clip(AR_b, 0, 1, out=AR_subclass), NDArraySubclass) + +assert_type(np.sum(b), np.bool_) +assert_type(np.sum(f4), np.float32) +assert_type(np.sum(f), Any) +assert_type(np.sum(AR_b), np.bool_) +assert_type(np.sum(AR_f4), np.float32) +assert_type(np.sum(AR_b, axis=0), Any) +assert_type(np.sum(AR_f4, axis=0), Any) +assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.all(b), np.bool_) +assert_type(np.all(f4), np.bool_) +assert_type(np.all(f), np.bool_) +assert_type(np.all(AR_b), np.bool_) +assert_type(np.all(AR_f4), np.bool_) +assert_type(np.all(AR_b, axis=0), Any) +assert_type(np.all(AR_f4, axis=0), Any) +assert_type(np.all(AR_b, keepdims=True), Any) +assert_type(np.all(AR_f4, keepdims=True), Any) +assert_type(np.all(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.any(b), np.bool_) +assert_type(np.any(f4), np.bool_) +assert_type(np.any(f), np.bool_) +assert_type(np.any(AR_b), np.bool_) +assert_type(np.any(AR_f4), np.bool_) +assert_type(np.any(AR_b, axis=0), Any) +assert_type(np.any(AR_f4, axis=0), Any) +assert_type(np.any(AR_b, keepdims=True), Any) +assert_type(np.any(AR_f4, keepdims=True), Any) +assert_type(np.any(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumsum(b), npt.NDArray[np.bool_]) +assert_type(np.cumsum(f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f), npt.NDArray[Any]) +assert_type(np.cumsum(AR_b), npt.NDArray[np.bool_]) +assert_type(np.cumsum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ptp(b), np.bool_) +assert_type(np.ptp(f4), np.float32) +assert_type(np.ptp(f), Any) +assert_type(np.ptp(AR_b), np.bool_) +assert_type(np.ptp(AR_f4), np.float32) +assert_type(np.ptp(AR_b, axis=0), Any) +assert_type(np.ptp(AR_f4, axis=0), Any) +assert_type(np.ptp(AR_b, keepdims=True), Any) +assert_type(np.ptp(AR_f4, keepdims=True), Any) +assert_type(np.ptp(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amax(b), np.bool_) +assert_type(np.amax(f4), np.float32) +assert_type(np.amax(f), Any) +assert_type(np.amax(AR_b), np.bool_) +assert_type(np.amax(AR_f4), np.float32) +assert_type(np.amax(AR_b, axis=0), Any) +assert_type(np.amax(AR_f4, axis=0), Any) +assert_type(np.amax(AR_b, keepdims=True), Any) +assert_type(np.amax(AR_f4, keepdims=True), Any) +assert_type(np.amax(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amin(b), np.bool_) +assert_type(np.amin(f4), np.float32) +assert_type(np.amin(f), Any) +assert_type(np.amin(AR_b), np.bool_) +assert_type(np.amin(AR_f4), np.float32) +assert_type(np.amin(AR_b, axis=0), Any) +assert_type(np.amin(AR_f4, axis=0), Any) +assert_type(np.amin(AR_b, keepdims=True), Any) +assert_type(np.amin(AR_f4, keepdims=True), Any) +assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.prod(AR_b), np.int_) +assert_type(np.prod(AR_u8), np.uint64) +assert_type(np.prod(AR_i8), np.int64) +assert_type(np.prod(AR_f4), np.floating[Any]) +assert_type(np.prod(AR_c16), np.complexfloating[Any, Any]) +assert_type(np.prod(AR_O), Any) +assert_type(np.prod(AR_f4, axis=0), Any) +assert_type(np.prod(AR_f4, keepdims=True), Any) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=float), Any) +assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating[Any]]) +assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating[Any]]) +assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ndim(b), int) +assert_type(np.ndim(f4), int) +assert_type(np.ndim(f), int) +assert_type(np.ndim(AR_b), int) +assert_type(np.ndim(AR_f4), int) + +assert_type(np.size(b), int) +assert_type(np.size(f4), int) +assert_type(np.size(f), int) +assert_type(np.size(AR_b), int) +assert_type(np.size(AR_f4), int) + +assert_type(np.around(b), np.float16) +assert_type(np.around(f), Any) +assert_type(np.around(i8), np.int64) +assert_type(np.around(f4), np.float32) +assert_type(np.around(AR_b), npt.NDArray[np.float16]) +assert_type(np.around(AR_i8), npt.NDArray[np.int64]) +assert_type(np.around(AR_f4), npt.NDArray[np.float32]) +assert_type(np.around([1.5]), npt.NDArray[Any]) +assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.mean(AR_b), np.floating[Any]) +assert_type(np.mean(AR_i8), np.floating[Any]) +assert_type(np.mean(AR_f4), np.floating[Any]) +assert_type(np.mean(AR_c16), np.complexfloating[Any, Any]) +assert_type(np.mean(AR_O), Any) +assert_type(np.mean(AR_f4, axis=0), Any) +assert_type(np.mean(AR_f4, keepdims=True), Any) +assert_type(np.mean(AR_f4, dtype=float), Any) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.std(AR_b), np.floating[Any]) +assert_type(np.std(AR_i8), np.floating[Any]) +assert_type(np.std(AR_f4), np.floating[Any]) +assert_type(np.std(AR_c16), np.floating[Any]) +assert_type(np.std(AR_O), Any) +assert_type(np.std(AR_f4, axis=0), Any) +assert_type(np.std(AR_f4, keepdims=True), Any) +assert_type(np.std(AR_f4, dtype=float), Any) +assert_type(np.std(AR_f4, dtype=np.float64), np.float64) +assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.var(AR_b), np.floating[Any]) +assert_type(np.var(AR_i8), np.floating[Any]) +assert_type(np.var(AR_f4), np.floating[Any]) +assert_type(np.var(AR_c16), np.floating[Any]) +assert_type(np.var(AR_O), Any) +assert_type(np.var(AR_f4, axis=0), Any) +assert_type(np.var(AR_f4, keepdims=True), Any) +assert_type(np.var(AR_f4, dtype=float), Any) +assert_type(np.var(AR_f4, dtype=np.float64), np.float64) +assert_type(np.var(AR_f4, out=AR_subclass), NDArraySubclass) diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index 1614b577ee14..f53fdf48824e 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,4 +1,13 @@ +import sys +from typing import Any + import numpy as np + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + f: float f8: np.float64 c8: np.complex64 @@ -10,38 +19,38 @@ u4: np.uint32 finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] -reveal_type(np.finfo(f)) # E: finfo[{double}] -reveal_type(np.finfo(f8)) # E: finfo[{float64}] -reveal_type(np.finfo(c8)) # E: finfo[{float32}] -reveal_type(np.finfo('f2')) # E: finfo[floating[Any]] - -reveal_type(finfo_f8.dtype) # E: dtype[{float64}] -reveal_type(finfo_f8.bits) # E: int -reveal_type(finfo_f8.eps) # E: {float64} -reveal_type(finfo_f8.epsneg) # E: {float64} -reveal_type(finfo_f8.iexp) # E: int -reveal_type(finfo_f8.machep) # E: int -reveal_type(finfo_f8.max) # E: {float64} -reveal_type(finfo_f8.maxexp) # E: int -reveal_type(finfo_f8.min) # E: {float64} -reveal_type(finfo_f8.minexp) # E: int -reveal_type(finfo_f8.negep) # E: int -reveal_type(finfo_f8.nexp) # E: int -reveal_type(finfo_f8.nmant) # E: int -reveal_type(finfo_f8.precision) # E: int -reveal_type(finfo_f8.resolution) # E: {float64} -reveal_type(finfo_f8.tiny) # E: {float64} -reveal_type(finfo_f8.smallest_normal) # E: {float64} -reveal_type(finfo_f8.smallest_subnormal) # E: {float64} - -reveal_type(np.iinfo(i)) # E: iinfo[{int_}] -reveal_type(np.iinfo(i8)) # E: iinfo[{int64}] -reveal_type(np.iinfo(u4)) # E: iinfo[{uint32}] -reveal_type(np.iinfo('i2')) # E: iinfo[Any] - -reveal_type(iinfo_i8.dtype) # E: dtype[{int64}] -reveal_type(iinfo_i8.kind) # E: str -reveal_type(iinfo_i8.bits) # E: int -reveal_type(iinfo_i8.key) # E: str -reveal_type(iinfo_i8.min) # E: int -reveal_type(iinfo_i8.max) # E: int +assert_type(np.finfo(f), np.finfo[np.double]) +assert_type(np.finfo(f8), np.finfo[np.float64]) +assert_type(np.finfo(c8), np.finfo[np.float32]) +assert_type(np.finfo('f2'), np.finfo[np.floating[Any]]) + +assert_type(finfo_f8.dtype, np.dtype[np.float64]) +assert_type(finfo_f8.bits, int) +assert_type(finfo_f8.eps, np.float64) +assert_type(finfo_f8.epsneg, np.float64) +assert_type(finfo_f8.iexp, int) +assert_type(finfo_f8.machep, int) +assert_type(finfo_f8.max, np.float64) +assert_type(finfo_f8.maxexp, int) +assert_type(finfo_f8.min, np.float64) +assert_type(finfo_f8.minexp, int) +assert_type(finfo_f8.negep, int) +assert_type(finfo_f8.nexp, int) +assert_type(finfo_f8.nmant, int) +assert_type(finfo_f8.precision, int) +assert_type(finfo_f8.resolution, np.float64) +assert_type(finfo_f8.tiny, np.float64) +assert_type(finfo_f8.smallest_normal, np.float64) +assert_type(finfo_f8.smallest_subnormal, np.float64) + +assert_type(np.iinfo(i), np.iinfo[np.int_]) +assert_type(np.iinfo(i8), np.iinfo[np.int64]) +assert_type(np.iinfo(u4), np.iinfo[np.uint32]) +assert_type(np.iinfo('i2'), np.iinfo[Any]) + +assert_type(iinfo_i8.dtype, np.dtype[np.int64]) +assert_type(iinfo_i8.kind, str) +assert_type(iinfo_i8.bits, int) +assert_type(iinfo_i8.key, str) +assert_type(iinfo_i8.min, int) +assert_type(iinfo_i8.max, int) diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index 69ffd26a3361..68df0b96f48d 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -1,19 +1,27 @@ +import sys +from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] -reveal_type(np.histogram_bin_edges(AR_i8, bins="auto")) # E: ndarray[Any, dtype[Any]] -reveal_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3))) # E: ndarray[Any, dtype[Any]] -reveal_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8)) # E: ndarray[Any, dtype[Any]] +assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), npt.NDArray[Any]) -reveal_type(np.histogram(AR_i8, bins="auto")) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] -reveal_type(np.histogram(AR_i8, bins="rice", range=(0, 3))) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] -reveal_type(np.histogram(AR_i8, bins="scott", weights=AR_f8)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] -reveal_type(np.histogram(AR_f8, bins=1, density=True)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -reveal_type(np.histogramdd(AR_i8, bins=[1])) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] -reveal_type(np.histogramdd(AR_i8, range=[(0, 3)])) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] -reveal_type(np.histogramdd(AR_i8, weights=AR_f8)) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] -reveal_type(np.histogramdd(AR_f8, density=True)) # E: tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]] +assert_type(np.histogramdd(AR_i8, bins=[1]), tuple[npt.NDArray[Any], list[npt.NDArray[Any]]]) +assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), tuple[npt.NDArray[Any], list[npt.NDArray[Any]]]) +assert_type(np.histogramdd(AR_i8, weights=AR_f8), tuple[npt.NDArray[Any], list[npt.NDArray[Any]]]) +assert_type(np.histogramdd(AR_f8, density=True), tuple[npt.NDArray[Any], list[npt.NDArray[Any]]]) diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 7165189592ca..e74eb5676867 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -1,5 +1,13 @@ -from typing import Any +import sys +from typing import Any, Literal + import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR_LIKE_b: list[bool] AR_LIKE_i: list[int] @@ -8,59 +16,59 @@ AR_LIKE_U: list[str] AR_i8: np.ndarray[Any, np.dtype[np.int64]] -reveal_type(np.ndenumerate(AR_i8)) # E: ndenumerate[{int64}] -reveal_type(np.ndenumerate(AR_LIKE_f)) # E: ndenumerate[{double}] -reveal_type(np.ndenumerate(AR_LIKE_U)) # E: ndenumerate[str_] +assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) +assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) +assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) -reveal_type(np.ndenumerate(AR_i8).iter) # E: flatiter[ndarray[Any, dtype[{int64}]]] -reveal_type(np.ndenumerate(AR_LIKE_f).iter) # E: flatiter[ndarray[Any, dtype[{double}]]] -reveal_type(np.ndenumerate(AR_LIKE_U).iter) # E: flatiter[ndarray[Any, dtype[str_]]] +assert_type(np.ndenumerate(AR_i8).iter, np.flatiter[npt.NDArray[np.int64]]) +assert_type(np.ndenumerate(AR_LIKE_f).iter, np.flatiter[npt.NDArray[np.float64]]) +assert_type(np.ndenumerate(AR_LIKE_U).iter, np.flatiter[npt.NDArray[np.str_]]) -reveal_type(next(np.ndenumerate(AR_i8))) # E: tuple[builtins.tuple[builtins.int, ...], {int64}] -reveal_type(next(np.ndenumerate(AR_LIKE_f))) # E: tuple[builtins.tuple[builtins.int, ...], {double}] -reveal_type(next(np.ndenumerate(AR_LIKE_U))) # E: tuple[builtins.tuple[builtins.int, ...], str_] +assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) +assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) +assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) -reveal_type(iter(np.ndenumerate(AR_i8))) # E: ndenumerate[{int64}] -reveal_type(iter(np.ndenumerate(AR_LIKE_f))) # E: ndenumerate[{double}] -reveal_type(iter(np.ndenumerate(AR_LIKE_U))) # E: ndenumerate[str_] +assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) +assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) +assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) -reveal_type(np.ndindex(1, 2, 3)) # E: numpy.ndindex -reveal_type(np.ndindex((1, 2, 3))) # E: numpy.ndindex -reveal_type(iter(np.ndindex(1, 2, 3))) # E: ndindex -reveal_type(next(np.ndindex(1, 2, 3))) # E: builtins.tuple[builtins.int, ...] +assert_type(np.ndindex(1, 2, 3), np.ndindex) +assert_type(np.ndindex((1, 2, 3)), np.ndindex) +assert_type(iter(np.ndindex(1, 2, 3)), np.ndindex) +assert_type(next(np.ndindex(1, 2, 3)), tuple[int, ...]) -reveal_type(np.unravel_index([22, 41, 37], (7, 6))) # E: tuple[ndarray[Any, dtype[{intp}]], ...] -reveal_type(np.unravel_index([31, 41, 13], (7, 6), order="F")) # E: tuple[ndarray[Any, dtype[{intp}]], ...] -reveal_type(np.unravel_index(1621, (6, 7, 8, 9))) # E: tuple[{intp}, ...] +assert_type(np.unravel_index([22, 41, 37], (7, 6)), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index([31, 41, 13], (7, 6), order="F"), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index(1621, (6, 7, 8, 9)), tuple[np.intp, ...]) -reveal_type(np.ravel_multi_index([[1]], (7, 6))) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6))) # E: {intp} -reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F")) # E: {intp} -reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip")) # E: {intp} -reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap"))) # E: {intp} -reveal_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))) # E: {intp} +assert_type(np.ravel_multi_index([[1]], (7, 6)), npt.NDArray[np.intp]) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6)), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap")), np.intp) +assert_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), np.intp) -reveal_type(np.mgrid[1:1:2]) # E: ndarray[Any, dtype[Any]] -reveal_type(np.mgrid[1:1:2, None:10]) # E: ndarray[Any, dtype[Any]] +assert_type(np.mgrid[1:1:2], npt.NDArray[Any]) +assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) -reveal_type(np.ogrid[1:1:2]) # E: list[ndarray[Any, dtype[Any]]] -reveal_type(np.ogrid[1:1:2, None:10]) # E: list[ndarray[Any, dtype[Any]]] +assert_type(np.ogrid[1:1:2], list[npt.NDArray[Any]]) +assert_type(np.ogrid[1:1:2, None:10], list[npt.NDArray[Any]]) -reveal_type(np.index_exp[0:1]) # E: tuple[builtins.slice] -reveal_type(np.index_exp[0:1, None:3]) # E: tuple[builtins.slice, builtins.slice] -reveal_type(np.index_exp[0, 0:1, ..., [0, 1, 3]]) # E: tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] +assert_type(np.index_exp[0:1], tuple[slice]) +assert_type(np.index_exp[0:1, None:3], tuple[slice, slice]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, ellipsis, list[int]]) -reveal_type(np.s_[0:1]) # E: builtins.slice -reveal_type(np.s_[0:1, None:3]) # E: tuple[builtins.slice, builtins.slice] -reveal_type(np.s_[0, 0:1, ..., [0, 1, 3]]) # E: tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] +assert_type(np.s_[0:1], slice) +assert_type(np.s_[0:1, None:3], tuple[slice, slice]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, ellipsis, list[int]]) -reveal_type(np.ix_(AR_LIKE_b)) # E: tuple[ndarray[Any, dtype[bool_]], ...] -reveal_type(np.ix_(AR_LIKE_i, AR_LIKE_f)) # E: tuple[ndarray[Any, dtype[{double}]], ...] -reveal_type(np.ix_(AR_i8)) # E: tuple[ndarray[Any, dtype[{int64}]], ...] +assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool_], ...]) +assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.ix_(AR_i8), tuple[npt.NDArray[np.int64], ...]) -reveal_type(np.fill_diagonal(AR_i8, 5)) # E: None +assert_type(np.fill_diagonal(AR_i8, 5), None) -reveal_type(np.diag_indices(4)) # E: tuple[ndarray[Any, dtype[{int_}]], ...] -reveal_type(np.diag_indices(2, 3)) # E: tuple[ndarray[Any, dtype[{int_}]], ...] +assert_type(np.diag_indices(4), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.diag_indices(2, 3), tuple[npt.NDArray[np.int_], ...]) -reveal_type(np.diag_indices_from(AR_i8)) # E: tuple[ndarray[Any, dtype[{int_}]], ...] +assert_type(np.diag_indices_from(AR_i8), tuple[npt.NDArray[np.int_], ...]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index bf285819e364..0420511a7d72 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,8 +1,15 @@ +import sys from typing import Any +from collections.abc import Callable import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + vectorized_func: np.vectorize f8: np.float64 @@ -20,158 +27,159 @@ CHAR_AR_U: np.chararray[Any, np.dtype[np.str_]] def func(*args: Any, **kwargs: Any) -> Any: ... -reveal_type(vectorized_func.pyfunc) # E: def (*Any, **Any) -> Any -reveal_type(vectorized_func.cache) # E: bool -reveal_type(vectorized_func.signature) # E: Union[None, builtins.str] -reveal_type(vectorized_func.otypes) # E: Union[None, builtins.str] -reveal_type(vectorized_func.excluded) # E: set[Union[builtins.int, builtins.str]] -reveal_type(vectorized_func.__doc__) # E: Union[None, builtins.str] -reveal_type(vectorized_func([1])) # E: Any -reveal_type(np.vectorize(int)) # E: vectorize -reveal_type(np.vectorize( # E: vectorize - int, otypes="i", doc="doc", excluded=(), cache=True, signature=None -)) - -reveal_type(np.add_newdoc("__main__", "blabla", doc="test doc")) # E: None -reveal_type(np.add_newdoc("__main__", "blabla", doc=("meth", "test doc"))) # E: None -reveal_type(np.add_newdoc("__main__", "blabla", doc=[("meth", "test doc")])) # E: None - -reveal_type(np.rot90(AR_f8, k=2)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.rot90(AR_LIKE_f8, axes=(0, 1))) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.flip(f8)) # E: {float64} -reveal_type(np.flip(1.0)) # E: Any -reveal_type(np.flip(AR_f8, axis=(0, 1))) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.flip(AR_LIKE_f8, axis=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.iterable(1)) # E: bool -reveal_type(np.iterable([1])) # E: bool - -reveal_type(np.average(AR_f8)) # E: floating[Any] -reveal_type(np.average(AR_f8, weights=AR_c16)) # E: complexfloating[Any, Any] -reveal_type(np.average(AR_O)) # E: Any -reveal_type(np.average(AR_f8, returned=True)) # E: tuple[floating[Any], floating[Any]] -reveal_type(np.average(AR_f8, weights=AR_c16, returned=True)) # E: tuple[complexfloating[Any, Any], complexfloating[Any, Any]] -reveal_type(np.average(AR_O, returned=True)) # E: tuple[Any, Any] -reveal_type(np.average(AR_f8, axis=0)) # E: Any -reveal_type(np.average(AR_f8, axis=0, returned=True)) # E: tuple[Any, Any] - -reveal_type(np.asarray_chkfinite(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asarray_chkfinite(AR_LIKE_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.asarray_chkfinite(AR_f8, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asarray_chkfinite(AR_f8, dtype=float)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.piecewise(AR_f8, AR_b, [func])) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.piecewise(AR_LIKE_f8, AR_b, [func])) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.select([AR_f8], [AR_f8])) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.copy(AR_LIKE_f8)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.copy(AR_U)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.copy(CHAR_AR_U)) # E: ndarray[Any, Any] -reveal_type(np.copy(CHAR_AR_U, "K", subok=True)) # E: chararray[Any, dtype[str_]] -reveal_type(np.copy(CHAR_AR_U, subok=True)) # E: chararray[Any, dtype[str_]] - -reveal_type(np.gradient(AR_f8, axis=None)) # E: Any -reveal_type(np.gradient(AR_LIKE_f8, edge_order=2)) # E: Any - -reveal_type(np.diff("bob", n=0)) # E: str -reveal_type(np.diff(AR_f8, axis=0)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.diff(AR_LIKE_f8, prepend=1.5)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.angle(f8)) # E: floating[Any] -reveal_type(np.angle(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.angle(AR_c16, deg=True)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.angle(AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.unwrap(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.unwrap(AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.sort_complex(AR_f8)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.trim_zeros(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.trim_zeros(AR_LIKE_f8)) # E: list[builtins.float] - -reveal_type(np.extract(AR_i8, AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.extract(AR_i8, AR_LIKE_f8)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.place(AR_f8, mask=AR_i8, vals=5.0)) # E: None - -reveal_type(np.disp(1, linefeed=True)) # E: None +assert_type(vectorized_func.pyfunc, Callable[..., Any]) +assert_type(vectorized_func.cache, bool) +assert_type(vectorized_func.signature, None | str) +assert_type(vectorized_func.otypes, None | str) +assert_type(vectorized_func.excluded, set[int | str]) +assert_type(vectorized_func.__doc__, None | str) +assert_type(vectorized_func([1]), Any) +assert_type(np.vectorize(int), np.vectorize) +assert_type( + np.vectorize(int, otypes="i", doc="doc", excluded=(), cache=True, signature=None), + np.vectorize, +) + +assert_type(np.add_newdoc("__main__", "blabla", doc="test doc"), None) +assert_type(np.add_newdoc("__main__", "blabla", doc=("meth", "test doc")), None) +assert_type(np.add_newdoc("__main__", "blabla", doc=[("meth", "test doc")]), None) + +assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) + +assert_type(np.flip(f8), np.float64) +assert_type(np.flip(1.0), Any) +assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) +assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) + +assert_type(np.iterable(1), bool) +assert_type(np.iterable([1]), bool) + +assert_type(np.average(AR_f8), np.floating[Any]) +assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating[Any, Any]) +assert_type(np.average(AR_O), Any) +assert_type(np.average(AR_f8, returned=True), tuple[np.floating[Any], np.floating[Any]]) +assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating[Any, Any], np.complexfloating[Any, Any]]) +assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_f8, axis=0), Any) +assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) + +assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b, [func]), npt.NDArray[Any]) + +assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) + +assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.copy(AR_U), npt.NDArray[np.str_]) +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.chararray[Any, np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, subok=True), np.chararray[Any, np.dtype[np.str_]]) + +assert_type(np.gradient(AR_f8, axis=None), Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) + +assert_type(np.diff("bob", n=0), str) +assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) + +assert_type(np.angle(f8), np.floating[Any]) +assert_type(np.angle(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating[Any]]) +assert_type(np.angle(AR_O), npt.NDArray[np.object_]) + +assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) + +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) +assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) + +assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) + +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) + +assert_type(np.disp(1, linefeed=True), None) with open("test", "w") as f: - reveal_type(np.disp("message", device=f)) # E: None - -reveal_type(np.cov(AR_f8, bias=True)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.cov(AR_f8, AR_c16, ddof=1)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.cov(AR_f8, fweights=AR_f8, dtype=float)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.corrcoef(AR_f8, rowvar=True)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.corrcoef(AR_f8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.corrcoef(AR_f8, dtype=np.float32)) # E: ndarray[Any, dtype[{float32}]] -reveal_type(np.corrcoef(AR_f8, dtype=float)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.blackman(5)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.bartlett(6)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.hanning(4.5)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.hamming(0)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.i0(AR_i8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.kaiser(4, 5.9)) # E: ndarray[Any, dtype[floating[Any]]] - -reveal_type(np.sinc(1.0)) # E: floating[Any] -reveal_type(np.sinc(1j)) # E: complexfloating[Any, Any] -reveal_type(np.sinc(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.sinc(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.median(AR_f8, keepdims=False)) # E: floating[Any] -reveal_type(np.median(AR_c16, overwrite_input=True)) # E: complexfloating[Any, Any] -reveal_type(np.median(AR_m)) # E: timedelta64 -reveal_type(np.median(AR_O)) # E: Any -reveal_type(np.median(AR_f8, keepdims=True)) # E: Any -reveal_type(np.median(AR_c16, axis=0)) # E: Any -reveal_type(np.median(AR_LIKE_f8, out=AR_c16)) # E: ndarray[Any, dtype[{complex128}]] - -reveal_type(np.add_newdoc_ufunc(np.add, "docstring")) # E: None - -reveal_type(np.percentile(AR_f8, 50)) # E: floating[Any] -reveal_type(np.percentile(AR_c16, 50)) # E: complexfloating[Any, Any] -reveal_type(np.percentile(AR_m, 50)) # E: timedelta64 -reveal_type(np.percentile(AR_M, 50, overwrite_input=True)) # E: datetime64 -reveal_type(np.percentile(AR_O, 50)) # E: Any -reveal_type(np.percentile(AR_f8, [50])) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.percentile(AR_c16, [50])) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.percentile(AR_m, [50])) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.percentile(AR_M, [50], method="nearest")) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.percentile(AR_O, [50])) # E: ndarray[Any, dtype[object_]] -reveal_type(np.percentile(AR_f8, [50], keepdims=True)) # E: Any -reveal_type(np.percentile(AR_f8, [50], axis=[1])) # E: Any -reveal_type(np.percentile(AR_f8, [50], out=AR_c16)) # E: ndarray[Any, dtype[{complex128}]] - -reveal_type(np.quantile(AR_f8, 0.5)) # E: floating[Any] -reveal_type(np.quantile(AR_c16, 0.5)) # E: complexfloating[Any, Any] -reveal_type(np.quantile(AR_m, 0.5)) # E: timedelta64 -reveal_type(np.quantile(AR_M, 0.5, overwrite_input=True)) # E: datetime64 -reveal_type(np.quantile(AR_O, 0.5)) # E: Any -reveal_type(np.quantile(AR_f8, [0.5])) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.quantile(AR_c16, [0.5])) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.quantile(AR_m, [0.5])) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.quantile(AR_M, [0.5], method="nearest")) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.quantile(AR_O, [0.5])) # E: ndarray[Any, dtype[object_]] -reveal_type(np.quantile(AR_f8, [0.5], keepdims=True)) # E: Any -reveal_type(np.quantile(AR_f8, [0.5], axis=[1])) # E: Any -reveal_type(np.quantile(AR_f8, [0.5], out=AR_c16)) # E: ndarray[Any, dtype[{complex128}]] - -reveal_type(np.meshgrid(AR_f8, AR_i8, copy=False)) # E: list[ndarray[Any, dtype[Any]]] -reveal_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij")) # E: list[ndarray[Any, dtype[Any]]] - -reveal_type(np.delete(AR_f8, np.s_[:5])) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.insert(AR_f8, np.s_[:5], 5)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.append(AR_f8, 5)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.append(AR_LIKE_f8, 1j, axis=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.digitize(4.5, [1])) # E: {intp} -reveal_type(np.digitize(AR_f8, [1, 2, 3])) # E: ndarray[Any, dtype[{intp}]] + assert_type(np.disp("message", device=f), None) + +assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating[Any]]) +assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating[Any]]) +assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.blackman(5), npt.NDArray[np.floating[Any]]) +assert_type(np.bartlett(6), npt.NDArray[np.floating[Any]]) +assert_type(np.hanning(4.5), npt.NDArray[np.floating[Any]]) +assert_type(np.hamming(0), npt.NDArray[np.floating[Any]]) +assert_type(np.i0(AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating[Any]]) + +assert_type(np.sinc(1.0), np.floating[Any]) +assert_type(np.sinc(1j), np.complexfloating[Any, Any]) +assert_type(np.sinc(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.median(AR_f8, keepdims=False), np.floating[Any]) +assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating[Any, Any]) +assert_type(np.median(AR_m), np.timedelta64) +assert_type(np.median(AR_O), Any) +assert_type(np.median(AR_f8, keepdims=True), Any) +assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.add_newdoc_ufunc(np.add, "docstring"), None) + +assert_type(np.percentile(AR_f8, 50), np.floating[Any]) +assert_type(np.percentile(AR_c16, 50), np.complexfloating[Any, Any]) +assert_type(np.percentile(AR_m, 50), np.timedelta64) +assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) +assert_type(np.percentile(AR_O, 50), Any) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating[Any]]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) +assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) +assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.quantile(AR_f8, 0.5), np.floating[Any]) +assert_type(np.quantile(AR_c16, 0.5), np.complexfloating[Any, Any]) +assert_type(np.quantile(AR_m, 0.5), np.timedelta64) +assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.5), Any) +assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating[Any]]) +assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) +assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) +assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.meshgrid(AR_f8, AR_i8, copy=False), list[npt.NDArray[Any]]) +assert_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij"), list[npt.NDArray[Any]]) + +assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) + +assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) + +assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) +assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) + +assert_type(np.digitize(4.5, [1]), np.intp) +assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index 4d70c267865f..9d258ca3e0e7 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -1,6 +1,15 @@ +import sys +from typing import Any, NoReturn +from collections.abc import Iterator + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_b: npt.NDArray[np.bool_] AR_u4: npt.NDArray[np.uint32] AR_i8: npt.NDArray[np.int64] @@ -10,102 +19,132 @@ AR_O: npt.NDArray[np.object_] poly_obj: np.poly1d -reveal_type(poly_obj.variable) # E: str -reveal_type(poly_obj.order) # E: int -reveal_type(poly_obj.o) # E: int -reveal_type(poly_obj.roots) # E: ndarray[Any, dtype[Any]] -reveal_type(poly_obj.r) # E: ndarray[Any, dtype[Any]] -reveal_type(poly_obj.coeffs) # E: ndarray[Any, dtype[Any]] -reveal_type(poly_obj.c) # E: ndarray[Any, dtype[Any]] -reveal_type(poly_obj.coef) # E: ndarray[Any, dtype[Any]] -reveal_type(poly_obj.coefficients) # E: ndarray[Any, dtype[Any]] -reveal_type(poly_obj.__hash__) # E: None - -reveal_type(poly_obj(1)) # E: Any -reveal_type(poly_obj([1])) # E: ndarray[Any, dtype[Any]] -reveal_type(poly_obj(poly_obj)) # E: poly1d - -reveal_type(len(poly_obj)) # E: int -reveal_type(-poly_obj) # E: poly1d -reveal_type(+poly_obj) # E: poly1d - -reveal_type(poly_obj * 5) # E: poly1d -reveal_type(5 * poly_obj) # E: poly1d -reveal_type(poly_obj + 5) # E: poly1d -reveal_type(5 + poly_obj) # E: poly1d -reveal_type(poly_obj - 5) # E: poly1d -reveal_type(5 - poly_obj) # E: poly1d -reveal_type(poly_obj**1) # E: poly1d -reveal_type(poly_obj**1.0) # E: poly1d -reveal_type(poly_obj / 5) # E: poly1d -reveal_type(5 / poly_obj) # E: poly1d - -reveal_type(poly_obj[0]) # E: Any +assert_type(poly_obj.variable, str) +assert_type(poly_obj.order, int) +assert_type(poly_obj.o, int) +assert_type(poly_obj.roots, npt.NDArray[Any]) +assert_type(poly_obj.r, npt.NDArray[Any]) +assert_type(poly_obj.coeffs, npt.NDArray[Any]) +assert_type(poly_obj.c, npt.NDArray[Any]) +assert_type(poly_obj.coef, npt.NDArray[Any]) +assert_type(poly_obj.coefficients, npt.NDArray[Any]) +assert_type(poly_obj.__hash__, None) + +assert_type(poly_obj(1), Any) +assert_type(poly_obj([1]), npt.NDArray[Any]) +assert_type(poly_obj(poly_obj), np.poly1d) + +assert_type(len(poly_obj), int) +assert_type(-poly_obj, np.poly1d) +assert_type(+poly_obj, np.poly1d) + +assert_type(poly_obj * 5, np.poly1d) +assert_type(5 * poly_obj, np.poly1d) +assert_type(poly_obj + 5, np.poly1d) +assert_type(5 + poly_obj, np.poly1d) +assert_type(poly_obj - 5, np.poly1d) +assert_type(5 - poly_obj, np.poly1d) +assert_type(poly_obj**1, np.poly1d) +assert_type(poly_obj**1.0, np.poly1d) +assert_type(poly_obj / 5, np.poly1d) +assert_type(5 / poly_obj, np.poly1d) + +assert_type(poly_obj[0], Any) poly_obj[0] = 5 -reveal_type(iter(poly_obj)) # E: Iterator[Any] -reveal_type(poly_obj.deriv()) # E: poly1d -reveal_type(poly_obj.integ()) # E: poly1d - -reveal_type(np.poly(poly_obj)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.poly(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.poly(AR_c16)) # E: ndarray[Any, dtype[floating[Any]]] - -reveal_type(np.polyint(poly_obj)) # E: poly1d -reveal_type(np.polyint(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.polyint(AR_f8, k=AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.polyint(AR_O, m=2)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.polyder(poly_obj)) # E: poly1d -reveal_type(np.polyder(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.polyder(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.polyder(AR_O, m=2)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.polyfit(AR_f8, AR_f8, 2)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.polyfit(AR_f8, AR_i8, 1, full=True)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] -reveal_type(np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled")) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] -reveal_type(np.polyfit(AR_c16, AR_f8, 2)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(np.polyfit(AR_f8, AR_c16, 1, full=True)) # E: tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]] -reveal_type(np.polyfit(AR_u4, AR_c16, 1.0, cov=True)) # E: tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{complex128}]]] - -reveal_type(np.polyval(AR_b, AR_b)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.polyval(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.polyval(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.polyval(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.polyval(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.polyval(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.polyadd(poly_obj, AR_i8)) # E: poly1d -reveal_type(np.polyadd(AR_f8, poly_obj)) # E: poly1d -reveal_type(np.polyadd(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.polyadd(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.polyadd(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.polyadd(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.polyadd(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.polyadd(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.polysub(poly_obj, AR_i8)) # E: poly1d -reveal_type(np.polysub(AR_f8, poly_obj)) # E: poly1d -reveal_type(np.polysub(AR_b, AR_b)) # E: -reveal_type(np.polysub(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.polysub(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.polysub(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.polysub(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.polysub(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.polymul(poly_obj, AR_i8)) # E: poly1d -reveal_type(np.polymul(AR_f8, poly_obj)) # E: poly1d -reveal_type(np.polymul(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.polymul(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.polymul(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.polymul(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.polymul(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.polymul(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.polydiv(poly_obj, AR_i8)) # E: poly1d -reveal_type(np.polydiv(AR_f8, poly_obj)) # E: poly1d -reveal_type(np.polydiv(AR_b, AR_b)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_u4, AR_b)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_i8, AR_i8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_f8, AR_i8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.polydiv(AR_i8, AR_c16)) # E: tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]] -reveal_type(np.polydiv(AR_O, AR_O)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +assert_type(iter(poly_obj), Iterator[Any]) +assert_type(poly_obj.deriv(), np.poly1d) +assert_type(poly_obj.integ(), np.poly1d) + +assert_type(np.poly(poly_obj), npt.NDArray[np.floating[Any]]) +assert_type(np.poly(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.poly(AR_c16), npt.NDArray[np.floating[Any]]) + +assert_type(np.polyint(poly_obj), np.poly1d) +assert_type(np.polyint(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyder(poly_obj), np.poly1d) +assert_type(np.polyder(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyfit(AR_f8, AR_f8, 2), npt.NDArray[np.float64]) +assert_type( + np.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled"), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type(np.polyfit(AR_c16, AR_f8, 2), npt.NDArray[np.complex128]) +assert_type( + np.polyfit(AR_f8, AR_c16, 1, full=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_c16, 1.0, cov=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) + +assert_type(np.polyval(AR_b, AR_b), npt.NDArray[np.int64]) +assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polyadd(poly_obj, AR_i8), np.poly1d) +assert_type(np.polyadd(AR_f8, poly_obj), np.poly1d) +assert_type(np.polyadd(AR_b, AR_b), npt.NDArray[np.bool_]) +assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) +assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) +assert_type(np.polysub(AR_b, AR_b), NoReturn) +assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polysub(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polymul(poly_obj, AR_i8), np.poly1d) +assert_type(np.polymul(AR_f8, poly_obj), np.poly1d) +assert_type(np.polymul(AR_b, AR_b), npt.NDArray[np.bool_]) +assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polymul(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polydiv(poly_obj, AR_i8), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_f8, poly_obj), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating[Any, Any]], npt.NDArray[np.complexfloating[Any, Any]]]) +assert_type(np.polydiv(AR_O, AR_O), tuple[npt.NDArray[Any], npt.NDArray[Any]]) diff --git a/numpy/typing/tests/data/reveal/lib_utils.pyi b/numpy/typing/tests/data/reveal/lib_utils.pyi index 3214043ee438..7b15cf18fdf5 100644 --- a/numpy/typing/tests/data/reveal/lib_utils.pyi +++ b/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -1,30 +1,41 @@ +import sys from io import StringIO -from typing import Any +from typing import Any, Protocol import numpy as np +import numpy.typing as npt +from numpy.lib.utils import _Deprecate -AR: np.ndarray[Any, np.dtype[np.float64]] -AR_DICT: dict[str, np.ndarray[Any, np.dtype[np.float64]]] +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +AR: npt.NDArray[np.float64] +AR_DICT: dict[str, npt.NDArray[np.float64]] FILE: StringIO def func(a: int) -> bool: ... -reveal_type(np.deprecate(func)) # E: def (a: builtins.int) -> builtins.bool -reveal_type(np.deprecate()) # E: _Deprecate +class FuncProtocol(Protocol): + def __call__(self, a: int) -> bool: ... + +assert_type(np.deprecate(func), FuncProtocol) +assert_type(np.deprecate(), _Deprecate) -reveal_type(np.deprecate_with_doc("test")) # E: _Deprecate -reveal_type(np.deprecate_with_doc(None)) # E: _Deprecate +assert_type(np.deprecate_with_doc("test"), _Deprecate) +assert_type(np.deprecate_with_doc(None), _Deprecate) -reveal_type(np.byte_bounds(AR)) # E: tuple[builtins.int, builtins.int] -reveal_type(np.byte_bounds(np.float64())) # E: tuple[builtins.int, builtins.int] +assert_type(np.byte_bounds(AR), tuple[int, int]) +assert_type(np.byte_bounds(np.float64()), tuple[int, int]) -reveal_type(np.who(None)) # E: None -reveal_type(np.who(AR_DICT)) # E: None +assert_type(np.who(None), None) +assert_type(np.who(AR_DICT), None) -reveal_type(np.info(1, output=FILE)) # E: None +assert_type(np.info(1, output=FILE), None) -reveal_type(np.source(np.interp, output=FILE)) # E: None +assert_type(np.source(np.interp, output=FILE), None) -reveal_type(np.lookfor("binary representation", output=FILE)) # E: None +assert_type(np.lookfor("binary representation", output=FILE), None) -reveal_type(np.safe_eval("1 + 1")) # E: Any +assert_type(np.safe_eval("1 + 1"), Any) diff --git a/numpy/typing/tests/data/reveal/lib_version.pyi b/numpy/typing/tests/data/reveal/lib_version.pyi index e6f695558a40..142d88bdbb8a 100644 --- a/numpy/typing/tests/data/reveal/lib_version.pyi +++ b/numpy/typing/tests/data/reveal/lib_version.pyi @@ -1,18 +1,25 @@ +import sys + from numpy.lib import NumpyVersion +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + version = NumpyVersion("1.8.0") -reveal_type(version.vstring) # E: str -reveal_type(version.version) # E: str -reveal_type(version.major) # E: int -reveal_type(version.minor) # E: int -reveal_type(version.bugfix) # E: int -reveal_type(version.pre_release) # E: str -reveal_type(version.is_devversion) # E: bool +assert_type(version.vstring, str) +assert_type(version.version, str) +assert_type(version.major, int) +assert_type(version.minor, int) +assert_type(version.bugfix, int) +assert_type(version.pre_release, str) +assert_type(version.is_devversion, bool) -reveal_type(version == version) # E: bool -reveal_type(version != version) # E: bool -reveal_type(version < "1.8.0") # E: bool -reveal_type(version <= version) # E: bool -reveal_type(version > version) # E: bool -reveal_type(version >= "1.8.0") # E: bool +assert_type(version == version, bool) +assert_type(version != version, bool) +assert_type(version < "1.8.0", bool) +assert_type(version <= version, bool) +assert_type(version > version, bool) +assert_type(version >= "1.8.0", bool) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index e264d176eb01..f011aedd93db 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,5 +1,14 @@ +import sys +from typing import Any + import numpy as np import numpy.typing as npt +from numpy.linalg.linalg import QRResult, EigResult, EighResult, SVDResult, SlogdetResult + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] @@ -8,90 +17,90 @@ AR_O: npt.NDArray[np.object_] AR_m: npt.NDArray[np.timedelta64] AR_S: npt.NDArray[np.str_] -reveal_type(np.linalg.tensorsolve(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.tensorsolve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.tensorsolve(AR_c16, AR_f8)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.linalg.solve(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.solve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.solve(AR_c16, AR_f8)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.linalg.tensorinv(AR_i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.tensorinv(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.tensorinv(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.linalg.inv(AR_i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.inv(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.inv(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.linalg.matrix_power(AR_i8, -1)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.linalg.matrix_power(AR_f8, 0)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.linalg.matrix_power(AR_c16, 1)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.linalg.matrix_power(AR_O, 2)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.linalg.cholesky(AR_i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.cholesky(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.cholesky(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.linalg.qr(AR_i8)) # E: QRResult -reveal_type(np.linalg.qr(AR_f8)) # E: QRResult -reveal_type(np.linalg.qr(AR_c16)) # E: QRResult - -reveal_type(np.linalg.eigvals(AR_i8)) # E: Union[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{complex128}]]] -reveal_type(np.linalg.eigvals(AR_f8)) # E: Union[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]] -reveal_type(np.linalg.eigvals(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.linalg.eigvalsh(AR_i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.eigvalsh(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.eigvalsh(AR_c16)) # E: ndarray[Any, dtype[floating[Any]]] - -reveal_type(np.linalg.eig(AR_i8)) # E: EigResult -reveal_type(np.linalg.eig(AR_f8)) # E: EigResult -reveal_type(np.linalg.eig(AR_c16)) # E: EigResult - -reveal_type(np.linalg.eigh(AR_i8)) # E: EighResult -reveal_type(np.linalg.eigh(AR_f8)) # E: EighResult -reveal_type(np.linalg.eigh(AR_c16)) # E: EighResult - -reveal_type(np.linalg.svd(AR_i8)) # E: SVDResult -reveal_type(np.linalg.svd(AR_f8)) # E: SVDResult -reveal_type(np.linalg.svd(AR_c16)) # E: SVDResult -reveal_type(np.linalg.svd(AR_i8, compute_uv=False)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.svd(AR_f8, compute_uv=False)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.svd(AR_c16, compute_uv=False)) # E: ndarray[Any, dtype[floating[Any]]] - -reveal_type(np.linalg.cond(AR_i8)) # E: Any -reveal_type(np.linalg.cond(AR_f8)) # E: Any -reveal_type(np.linalg.cond(AR_c16)) # E: Any - -reveal_type(np.linalg.matrix_rank(AR_i8)) # E: Any -reveal_type(np.linalg.matrix_rank(AR_f8)) # E: Any -reveal_type(np.linalg.matrix_rank(AR_c16)) # E: Any - -reveal_type(np.linalg.pinv(AR_i8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.linalg.pinv(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.linalg.pinv(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] - -reveal_type(np.linalg.slogdet(AR_i8)) # E: SlogdetResult -reveal_type(np.linalg.slogdet(AR_f8)) # E: SlogdetResult -reveal_type(np.linalg.slogdet(AR_c16)) # E: SlogdetResult - -reveal_type(np.linalg.det(AR_i8)) # E: Any -reveal_type(np.linalg.det(AR_f8)) # E: Any -reveal_type(np.linalg.det(AR_c16)) # E: Any - -reveal_type(np.linalg.lstsq(AR_i8, AR_i8)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], {int32}, ndarray[Any, dtype[{float64}]]] -reveal_type(np.linalg.lstsq(AR_i8, AR_f8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.linalg.lstsq(AR_f8, AR_c16)) # E: tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]] - -reveal_type(np.linalg.norm(AR_i8)) # E: floating[Any] -reveal_type(np.linalg.norm(AR_f8)) # E: floating[Any] -reveal_type(np.linalg.norm(AR_c16)) # E: floating[Any] -reveal_type(np.linalg.norm(AR_S)) # E: floating[Any] -reveal_type(np.linalg.norm(AR_f8, axis=0)) # E: Any - -reveal_type(np.linalg.multi_dot([AR_i8, AR_i8])) # E: Any -reveal_type(np.linalg.multi_dot([AR_i8, AR_f8])) # E: Any -reveal_type(np.linalg.multi_dot([AR_f8, AR_c16])) # E: Any -reveal_type(np.linalg.multi_dot([AR_O, AR_O])) # E: Any -reveal_type(np.linalg.multi_dot([AR_m, AR_m])) # E: Any +assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) + +assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.linalg.qr(AR_i8), QRResult) +assert_type(np.linalg.qr(AR_f8), QRResult) +assert_type(np.linalg.qr(AR_c16), QRResult) + +assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating[Any]] | npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating[Any]]) + +assert_type(np.linalg.eig(AR_i8), EigResult) +assert_type(np.linalg.eig(AR_f8), EigResult) +assert_type(np.linalg.eig(AR_c16), EigResult) + +assert_type(np.linalg.eigh(AR_i8), EighResult) +assert_type(np.linalg.eigh(AR_f8), EighResult) +assert_type(np.linalg.eigh(AR_c16), EighResult) + +assert_type(np.linalg.svd(AR_i8), SVDResult) +assert_type(np.linalg.svd(AR_f8), SVDResult) +assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating[Any]]) + +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) + +assert_type(np.linalg.matrix_rank(AR_i8), Any) +assert_type(np.linalg.matrix_rank(AR_f8), Any) +assert_type(np.linalg.matrix_rank(AR_c16), Any) + +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) + +assert_type(np.linalg.det(AR_i8), Any) +assert_type(np.linalg.det(AR_f8), Any) +assert_type(np.linalg.det(AR_c16), Any) + +assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) +assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]], np.int32, npt.NDArray[np.floating[Any]]]) +assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating[Any, Any]], npt.NDArray[np.floating[Any]], np.int32, npt.NDArray[np.floating[Any]]]) + +assert_type(np.linalg.norm(AR_i8), np.floating[Any]) +assert_type(np.linalg.norm(AR_f8), np.floating[Any]) +assert_type(np.linalg.norm(AR_c16), np.floating[Any]) +assert_type(np.linalg.norm(AR_S), np.floating[Any]) +assert_type(np.linalg.norm(AR_f8, axis=0), Any) + +assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) +assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) +assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) +assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) +assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 21c39067e9b8..3fd1ddb94d25 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,69 +1,76 @@ +import sys from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + mat: np.matrix[Any, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] -reveal_type(mat * 5) # E: matrix[Any, Any] -reveal_type(5 * mat) # E: matrix[Any, Any] +assert_type(mat * 5, np.matrix[Any, Any]) +assert_type(5 * mat, np.matrix[Any, Any]) mat *= 5 -reveal_type(mat**5) # E: matrix[Any, Any] +assert_type(mat**5, np.matrix[Any, Any]) mat **= 5 -reveal_type(mat.sum()) # E: Any -reveal_type(mat.mean()) # E: Any -reveal_type(mat.std()) # E: Any -reveal_type(mat.var()) # E: Any -reveal_type(mat.prod()) # E: Any -reveal_type(mat.any()) # E: bool_ -reveal_type(mat.all()) # E: bool_ -reveal_type(mat.max()) # E: {int64} -reveal_type(mat.min()) # E: {int64} -reveal_type(mat.argmax()) # E: {intp} -reveal_type(mat.argmin()) # E: {intp} -reveal_type(mat.ptp()) # E: {int64} +assert_type(mat.sum(), Any) +assert_type(mat.mean(), Any) +assert_type(mat.std(), Any) +assert_type(mat.var(), Any) +assert_type(mat.prod(), Any) +assert_type(mat.any(), np.bool_) +assert_type(mat.all(), np.bool_) +assert_type(mat.max(), np.int64) +assert_type(mat.min(), np.int64) +assert_type(mat.argmax(), np.intp) +assert_type(mat.argmin(), np.intp) +assert_type(mat.ptp(), np.int64) -reveal_type(mat.sum(axis=0)) # E: matrix[Any, Any] -reveal_type(mat.mean(axis=0)) # E: matrix[Any, Any] -reveal_type(mat.std(axis=0)) # E: matrix[Any, Any] -reveal_type(mat.var(axis=0)) # E: matrix[Any, Any] -reveal_type(mat.prod(axis=0)) # E: matrix[Any, Any] -reveal_type(mat.any(axis=0)) # E: matrix[Any, dtype[bool_]] -reveal_type(mat.all(axis=0)) # E: matrix[Any, dtype[bool_]] -reveal_type(mat.max(axis=0)) # E: matrix[Any, dtype[{int64}]] -reveal_type(mat.min(axis=0)) # E: matrix[Any, dtype[{int64}]] -reveal_type(mat.argmax(axis=0)) # E: matrix[Any, dtype[{intp}]] -reveal_type(mat.argmin(axis=0)) # E: matrix[Any, dtype[{intp}]] -reveal_type(mat.ptp(axis=0)) # E: matrix[Any, dtype[{int64}]] +assert_type(mat.sum(axis=0), np.matrix[Any, Any]) +assert_type(mat.mean(axis=0), np.matrix[Any, Any]) +assert_type(mat.std(axis=0), np.matrix[Any, Any]) +assert_type(mat.var(axis=0), np.matrix[Any, Any]) +assert_type(mat.prod(axis=0), np.matrix[Any, Any]) +assert_type(mat.any(axis=0), np.matrix[Any, np.dtype[np.bool_]]) +assert_type(mat.all(axis=0), np.matrix[Any, np.dtype[np.bool_]]) +assert_type(mat.max(axis=0), np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.min(axis=0), np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.argmax(axis=0), np.matrix[Any, np.dtype[np.intp]]) +assert_type(mat.argmin(axis=0), np.matrix[Any, np.dtype[np.intp]]) +assert_type(mat.ptp(axis=0), np.matrix[Any, np.dtype[np.int64]]) -reveal_type(mat.sum(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.mean(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.std(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.var(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.prod(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.any(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.all(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.max(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.min(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.argmax(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.argmin(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(mat.ptp(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]] +assert_type(mat.sum(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.mean(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.std(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.var(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.prod(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.any(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.all(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.max(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.min(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmax(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmin(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) -reveal_type(mat.T) # E: matrix[Any, dtype[{int64}]] -reveal_type(mat.I) # E: matrix[Any, Any] -reveal_type(mat.A) # E: ndarray[Any, dtype[{int64}]] -reveal_type(mat.A1) # E: ndarray[Any, dtype[{int64}]] -reveal_type(mat.H) # E: matrix[Any, dtype[{int64}]] -reveal_type(mat.getT()) # E: matrix[Any, dtype[{int64}]] -reveal_type(mat.getI()) # E: matrix[Any, Any] -reveal_type(mat.getA()) # E: ndarray[Any, dtype[{int64}]] -reveal_type(mat.getA1()) # E: ndarray[Any, dtype[{int64}]] -reveal_type(mat.getH()) # E: matrix[Any, dtype[{int64}]] +assert_type(mat.T, np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.I, np.matrix[Any, Any]) +assert_type(mat.A, npt.NDArray[np.int64]) +assert_type(mat.A1, npt.NDArray[np.int64]) +assert_type(mat.H, np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.getT(), np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.getI(), np.matrix[Any, Any]) +assert_type(mat.getA(), npt.NDArray[np.int64]) +assert_type(mat.getA1(), npt.NDArray[np.int64]) +assert_type(mat.getH(), np.matrix[Any, np.dtype[np.int64]]) -reveal_type(np.bmat(ar_f8)) # E: matrix[Any, Any] -reveal_type(np.bmat([[0, 1, 2]])) # E: matrix[Any, Any] -reveal_type(np.bmat("mat")) # E: matrix[Any, Any] +assert_type(np.bmat(ar_f8), np.matrix[Any, Any]) +assert_type(np.bmat([[0, 1, 2]]), np.matrix[Any, Any]) +assert_type(np.bmat("mat"), np.matrix[Any, Any]) -reveal_type(np.asmatrix(ar_f8, dtype=np.int64)) # E: matrix[Any, Any] +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[Any, Any]) diff --git a/numpy/typing/tests/data/reveal/memmap.pyi b/numpy/typing/tests/data/reveal/memmap.pyi index af730749920b..53278ff1122b 100644 --- a/numpy/typing/tests/data/reveal/memmap.pyi +++ b/numpy/typing/tests/data/reveal/memmap.pyi @@ -1,18 +1,25 @@ -import numpy as np +import sys from typing import Any +import numpy as np + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + memmap_obj: np.memmap[Any, np.dtype[np.str_]] -reveal_type(np.memmap.__array_priority__) # E: float -reveal_type(memmap_obj.__array_priority__) # E: float -reveal_type(memmap_obj.filename) # E: Union[builtins.str, None] -reveal_type(memmap_obj.offset) # E: int -reveal_type(memmap_obj.mode) # E: str -reveal_type(memmap_obj.flush()) # E: None +assert_type(np.memmap.__array_priority__, float) +assert_type(memmap_obj.__array_priority__, float) +assert_type(memmap_obj.filename, str | None) +assert_type(memmap_obj.offset, int) +assert_type(memmap_obj.mode, str) +assert_type(memmap_obj.flush(), None) -reveal_type(np.memmap("file.txt", offset=5)) # E: memmap[Any, dtype[{uint8}]] -reveal_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3))) # E: memmap[Any, dtype[{float64}]] +assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) +assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) with open("file.txt", "rb") as f: - reveal_type(np.memmap(f, dtype=float, order="K")) # E: memmap[Any, dtype[Any]] + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype[Any]]) -reveal_type(memmap_obj.__array_finalize__(object())) # E: None +assert_type(memmap_obj.__array_finalize__(object()), None) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 55cb6546f8e0..48fee893cd89 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,5 +1,14 @@ +import sys from typing import Any + import numpy as np +import numpy.typing as npt +from numpy._typing import _32Bit, _64Bit + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type f8 = np.float64() i8 = np.int64() @@ -16,124 +25,124 @@ b = bool() f = float() i = int() -AR_b: np.ndarray[Any, np.dtype[np.bool_]] -AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_b: npt.NDArray[np.bool_] +AR_m: npt.NDArray[np.timedelta64] # Time structures -reveal_type(td % td) # E: timedelta64 -reveal_type(AR_m % td) # E: Any -reveal_type(td % AR_m) # E: Any +assert_type(td % td, np.timedelta64) +assert_type(AR_m % td, npt.NDArray[np.timedelta64]) +assert_type(td % AR_m, npt.NDArray[np.timedelta64]) -reveal_type(divmod(td, td)) # E: tuple[{int64}, timedelta64] -reveal_type(divmod(AR_m, td)) # E: tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]] -reveal_type(divmod(td, AR_m)) # E: tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]] +assert_type(divmod(td, td), tuple[np.int64, np.timedelta64]) +assert_type(divmod(AR_m, td), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(td, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) # Bool -reveal_type(b_ % b) # E: {int8} -reveal_type(b_ % i) # E: {int_} -reveal_type(b_ % f) # E: {float64} -reveal_type(b_ % b_) # E: {int8} -reveal_type(b_ % i8) # E: {int64} -reveal_type(b_ % u8) # E: {uint64} -reveal_type(b_ % f8) # E: {float64} -reveal_type(b_ % AR_b) # E: ndarray[Any, dtype[{int8}]] - -reveal_type(divmod(b_, b)) # E: tuple[{int8}, {int8}] -reveal_type(divmod(b_, i)) # E: tuple[{int_}, {int_}] -reveal_type(divmod(b_, f)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(b_, b_)) # E: tuple[{int8}, {int8}] -reveal_type(divmod(b_, i8)) # E: tuple[{int64}, {int64}] -reveal_type(divmod(b_, u8)) # E: tuple[{uint64}, {uint64}] -reveal_type(divmod(b_, f8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(b_, AR_b)) # E: ndarray[Any, dtype[{int8}]], ndarray[Any, dtype[{int8}]]] - -reveal_type(b % b_) # E: {int8} -reveal_type(i % b_) # E: {int_} -reveal_type(f % b_) # E: {float64} -reveal_type(b_ % b_) # E: {int8} -reveal_type(i8 % b_) # E: {int64} -reveal_type(u8 % b_) # E: {uint64} -reveal_type(f8 % b_) # E: {float64} -reveal_type(AR_b % b_) # E: ndarray[Any, dtype[{int8}]] - -reveal_type(divmod(b, b_)) # E: tuple[{int8}, {int8}] -reveal_type(divmod(i, b_)) # E: tuple[{int_}, {int_}] -reveal_type(divmod(f, b_)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(b_, b_)) # E: tuple[{int8}, {int8}] -reveal_type(divmod(i8, b_)) # E: tuple[{int64}, {int64}] -reveal_type(divmod(u8, b_)) # E: tuple[{uint64}, {uint64}] -reveal_type(divmod(f8, b_)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(AR_b, b_)) # E: ndarray[Any, dtype[{int8}]], ndarray[Any, dtype[{int8}]]] +assert_type(b_ % b, np.int8) +assert_type(b_ % i, np.int_) +assert_type(b_ % f, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(b_ % i8, np.int64) +assert_type(b_ % u8, np.uint64) +assert_type(b_ % f8, np.float64) +assert_type(b_ % AR_b, npt.NDArray[np.int8]) + +assert_type(divmod(b_, b), tuple[np.int8, np.int8]) +assert_type(divmod(b_, i), tuple[np.int_, np.int_]) +assert_type(divmod(b_, f), tuple[np.float64, np.float64]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +assert_type(divmod(b_, i8), tuple[np.int64, np.int64]) +assert_type(divmod(b_, u8), tuple[np.uint64, np.uint64]) +assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) +assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) + +assert_type(b % b_, np.int8) +assert_type(i % b_, np.int_) +assert_type(f % b_, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(i8 % b_, np.int64) +assert_type(u8 % b_, np.uint64) +assert_type(f8 % b_, np.float64) +assert_type(AR_b % b_, npt.NDArray[np.int8]) + +assert_type(divmod(b, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i, b_), tuple[np.int_, np.int_]) +assert_type(divmod(f, b_), tuple[np.float64, np.float64]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i8, b_), tuple[np.int64, np.int64]) +assert_type(divmod(u8, b_), tuple[np.uint64, np.uint64]) +assert_type(divmod(f8, b_), tuple[np.float64, np.float64]) +assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) # int -reveal_type(i8 % b) # E: {int64} -reveal_type(i8 % f) # E: {float64} -reveal_type(i8 % i8) # E: {int64} -reveal_type(i8 % f8) # E: {float64} -reveal_type(i4 % i8) # E: signedinteger[Union[_32Bit, _64Bit]] -reveal_type(i4 % f8) # E: floating[Union[_64Bit, _32Bit]] -reveal_type(i4 % i4) # E: {int32} -reveal_type(i4 % f4) # E: {float32} -reveal_type(i8 % AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]] - -reveal_type(divmod(i8, b)) # E: tuple[{int64}, {int64}] -reveal_type(divmod(i8, f)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(i8, i8)) # E: tuple[{int64}, {int64}] -reveal_type(divmod(i8, f8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(i8, i4)) # E: tuple[signedinteger[Union[_64Bit, _32Bit]], signedinteger[Union[_64Bit, _32Bit]]] -reveal_type(divmod(i8, f4)) # E: tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] -reveal_type(divmod(i4, i4)) # E: tuple[{int32}, {int32}] -reveal_type(divmod(i4, f4)) # E: tuple[{float32}, {float32}] -reveal_type(divmod(i8, AR_b)) # E: tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] - -reveal_type(b % i8) # E: {int64} -reveal_type(f % i8) # E: {float64} -reveal_type(i8 % i8) # E: {int64} -reveal_type(f8 % i8) # E: {float64} -reveal_type(i8 % i4) # E: signedinteger[Union[_64Bit, _32Bit]] -reveal_type(f8 % i4) # E: floating[Union[_64Bit, _32Bit]] -reveal_type(i4 % i4) # E: {int32} -reveal_type(f4 % i4) # E: {float32} -reveal_type(AR_b % i8) # E: ndarray[Any, dtype[signedinteger[Any]]] - -reveal_type(divmod(b, i8)) # E: tuple[{int64}, {int64}] -reveal_type(divmod(f, i8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(i8, i8)) # E: tuple[{int64}, {int64}] -reveal_type(divmod(f8, i8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(i4, i8)) # E: tuple[signedinteger[Union[_32Bit, _64Bit]], signedinteger[Union[_32Bit, _64Bit]]] -reveal_type(divmod(f4, i8)) # E: tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] -reveal_type(divmod(i4, i4)) # E: tuple[{int32}, {int32}] -reveal_type(divmod(f4, i4)) # E: tuple[{float32}, {float32}] -reveal_type(divmod(AR_b, i8)) # E: tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]] +assert_type(i8 % b, np.int64) +assert_type(i8 % f, np.float64) +assert_type(i8 % i8, np.int64) +assert_type(i8 % f8, np.float64) +assert_type(i4 % i8, np.signedinteger[_32Bit | _64Bit]) +assert_type(i4 % f8, np.floating[_32Bit | _64Bit]) +assert_type(i4 % i4, np.int32) +assert_type(i4 % f4, np.float32) +assert_type(i8 % AR_b, npt.NDArray[np.signedinteger[Any]]) + +assert_type(divmod(i8, b), tuple[np.int64, np.int64]) +assert_type(divmod(i8, f), tuple[np.float64, np.float64]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +assert_type(divmod(i8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, i4), tuple[np.signedinteger[_32Bit | _64Bit], np.signedinteger[_32Bit | _64Bit]]) +assert_type(divmod(i8, f4), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) + +assert_type(b % i8, np.int64) +assert_type(f % i8, np.float64) +assert_type(i8 % i8, np.int64) +assert_type(f8 % i8, np.float64) +assert_type(i8 % i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(f8 % i4, np.floating[_32Bit | _64Bit]) +assert_type(i4 % i4, np.int32) +assert_type(f4 % i4, np.float32) +assert_type(AR_b % i8, npt.NDArray[np.signedinteger[Any]]) + +assert_type(divmod(b, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f, i8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) +assert_type(divmod(i4, i8), tuple[np.signedinteger[_32Bit | _64Bit], np.signedinteger[_32Bit | _64Bit]]) +assert_type(divmod(f4, i8), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +assert_type(divmod(f4, i4), tuple[np.float32, np.float32]) +assert_type(divmod(AR_b, i8), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) # float -reveal_type(f8 % b) # E: {float64} -reveal_type(f8 % f) # E: {float64} -reveal_type(i8 % f4) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(f4 % f4) # E: {float32} -reveal_type(f8 % AR_b) # E: ndarray[Any, dtype[floating[Any]]] - -reveal_type(divmod(f8, b)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(f8, f)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(f8, f8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(f8, f4)) # E: tuple[floating[Union[_64Bit, _32Bit]], floating[Union[_64Bit, _32Bit]]] -reveal_type(divmod(f4, f4)) # E: tuple[{float32}, {float32}] -reveal_type(divmod(f8, AR_b)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] - -reveal_type(b % f8) # E: {float64} -reveal_type(f % f8) # E: {float64} -reveal_type(f8 % f8) # E: {float64} -reveal_type(f8 % f8) # E: {float64} -reveal_type(f4 % f4) # E: {float32} -reveal_type(AR_b % f8) # E: ndarray[Any, dtype[floating[Any]]] - -reveal_type(divmod(b, f8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(f, f8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(f8, f8)) # E: tuple[{float64}, {float64}] -reveal_type(divmod(f4, f8)) # E: tuple[floating[Union[_32Bit, _64Bit]], floating[Union[_32Bit, _64Bit]]] -reveal_type(divmod(f4, f4)) # E: tuple[{float32}, {float32}] -reveal_type(divmod(AR_b, f8)) # E: tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] +assert_type(f8 % b, np.float64) +assert_type(f8 % f, np.float64) +assert_type(i8 % f4, np.floating[_32Bit | _64Bit]) +assert_type(f4 % f4, np.float32) +assert_type(f8 % AR_b, npt.NDArray[np.floating[Any]]) + +assert_type(divmod(f8, b), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f4), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) + +assert_type(b % f8, np.float64) +assert_type(f % f8, np.float64) +assert_type(f8 % f8, np.float64) +assert_type(f8 % f8, np.float64) +assert_type(f4 % f4, np.float32) +assert_type(AR_b % f8, npt.NDArray[np.floating[Any]]) + +assert_type(divmod(b, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f4, f8), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(AR_b, f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) diff --git a/numpy/typing/tests/data/reveal/modules.pyi b/numpy/typing/tests/data/reveal/modules.pyi index 48730110e1e3..1ab01cd079c2 100644 --- a/numpy/typing/tests/data/reveal/modules.pyi +++ b/numpy/typing/tests/data/reveal/modules.pyi @@ -1,48 +1,56 @@ +import sys +import types + import numpy as np from numpy import f2py -reveal_type(np) # E: ModuleType +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +assert_type(np, types.ModuleType) -reveal_type(np.char) # E: ModuleType -reveal_type(np.ctypeslib) # E: ModuleType -reveal_type(np.emath) # E: ModuleType -reveal_type(np.fft) # E: ModuleType -reveal_type(np.lib) # E: ModuleType -reveal_type(np.linalg) # E: ModuleType -reveal_type(np.ma) # E: ModuleType -reveal_type(np.matrixlib) # E: ModuleType -reveal_type(np.polynomial) # E: ModuleType -reveal_type(np.random) # E: ModuleType -reveal_type(np.rec) # E: ModuleType -reveal_type(np.testing) # E: ModuleType -reveal_type(np.version) # E: ModuleType -reveal_type(np.exceptions) # E: ModuleType -reveal_type(np.dtypes) # E: ModuleType +assert_type(np.char, types.ModuleType) +assert_type(np.ctypeslib, types.ModuleType) +assert_type(np.emath, types.ModuleType) +assert_type(np.fft, types.ModuleType) +assert_type(np.lib, types.ModuleType) +assert_type(np.linalg, types.ModuleType) +assert_type(np.ma, types.ModuleType) +assert_type(np.matrixlib, types.ModuleType) +assert_type(np.polynomial, types.ModuleType) +assert_type(np.random, types.ModuleType) +assert_type(np.rec, types.ModuleType) +assert_type(np.testing, types.ModuleType) +assert_type(np.version, types.ModuleType) +assert_type(np.exceptions, types.ModuleType) +assert_type(np.dtypes, types.ModuleType) -reveal_type(np.lib.format) # E: ModuleType -reveal_type(np.lib.mixins) # E: ModuleType -reveal_type(np.lib.scimath) # E: ModuleType -reveal_type(np.lib.stride_tricks) # E: ModuleType -reveal_type(np.ma.extras) # E: ModuleType -reveal_type(np.polynomial.chebyshev) # E: ModuleType -reveal_type(np.polynomial.hermite) # E: ModuleType -reveal_type(np.polynomial.hermite_e) # E: ModuleType -reveal_type(np.polynomial.laguerre) # E: ModuleType -reveal_type(np.polynomial.legendre) # E: ModuleType -reveal_type(np.polynomial.polynomial) # E: ModuleType +assert_type(np.lib.format, types.ModuleType) +assert_type(np.lib.mixins, types.ModuleType) +assert_type(np.lib.scimath, types.ModuleType) +assert_type(np.lib.stride_tricks, types.ModuleType) +assert_type(np.ma.extras, types.ModuleType) +assert_type(np.polynomial.chebyshev, types.ModuleType) +assert_type(np.polynomial.hermite, types.ModuleType) +assert_type(np.polynomial.hermite_e, types.ModuleType) +assert_type(np.polynomial.laguerre, types.ModuleType) +assert_type(np.polynomial.legendre, types.ModuleType) +assert_type(np.polynomial.polynomial, types.ModuleType) -reveal_type(np.__path__) # E: list[builtins.str] -reveal_type(np.__version__) # E: str -reveal_type(np.test) # E: _pytesttester.PytestTester -reveal_type(np.test.module_name) # E: str +assert_type(np.__path__, list[str]) +assert_type(np.__version__, str) +assert_type(np.test, np._pytesttester.PytestTester) +assert_type(np.test.module_name, str) -reveal_type(np.__all__) # E: list[builtins.str] -reveal_type(np.char.__all__) # E: list[builtins.str] -reveal_type(np.ctypeslib.__all__) # E: list[builtins.str] -reveal_type(np.emath.__all__) # E: list[builtins.str] -reveal_type(np.lib.__all__) # E: list[builtins.str] -reveal_type(np.ma.__all__) # E: list[builtins.str] -reveal_type(np.random.__all__) # E: list[builtins.str] -reveal_type(np.rec.__all__) # E: list[builtins.str] -reveal_type(np.testing.__all__) # E: list[builtins.str] -reveal_type(f2py.__all__) # E: list[builtins.str] +assert_type(np.__all__, list[str]) +assert_type(np.char.__all__, list[str]) +assert_type(np.ctypeslib.__all__, list[str]) +assert_type(np.emath.__all__, list[str]) +assert_type(np.lib.__all__, list[str]) +assert_type(np.ma.__all__, list[str]) +assert_type(np.random.__all__, list[str]) +assert_type(np.rec.__all__, list[str]) +assert_type(np.testing.__all__, list[str]) +assert_type(f2py.__all__, list[str]) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index d8e0a956baeb..4254b796df76 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,3 +1,4 @@ +import sys import datetime as dt from typing import Any, TypeVar from pathlib import Path @@ -5,6 +6,11 @@ from pathlib import Path import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + _SCT = TypeVar("_SCT", bound=np.generic, covariant=True) class SubClass(np.ndarray[Any, np.dtype[_SCT]]): ... @@ -34,111 +40,111 @@ timedelta_seq: list[dt.timedelta] def func(a: int) -> bool: ... -reveal_type(next(b_f8)) # E: tuple[Any, ...] -reveal_type(b_f8.reset()) # E: None -reveal_type(b_f8.index) # E: int -reveal_type(b_f8.iters) # E: tuple[flatiter[Any], ...] -reveal_type(b_f8.nd) # E: int -reveal_type(b_f8.ndim) # E: int -reveal_type(b_f8.numiter) # E: int -reveal_type(b_f8.shape) # E: tuple[builtins.int, ...] -reveal_type(b_f8.size) # E: int +assert_type(next(b_f8), tuple[Any, ...]) +assert_type(b_f8.reset(), None) +assert_type(b_f8.index, int) +assert_type(b_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_f8.nd, int) +assert_type(b_f8.ndim, int) +assert_type(b_f8.numiter, int) +assert_type(b_f8.shape, tuple[int, ...]) +assert_type(b_f8.size, int) -reveal_type(next(b_i8_f8_f8)) # E: tuple[Any, ...] -reveal_type(b_i8_f8_f8.reset()) # E: None -reveal_type(b_i8_f8_f8.index) # E: int -reveal_type(b_i8_f8_f8.iters) # E: tuple[flatiter[Any], ...] -reveal_type(b_i8_f8_f8.nd) # E: int -reveal_type(b_i8_f8_f8.ndim) # E: int -reveal_type(b_i8_f8_f8.numiter) # E: int -reveal_type(b_i8_f8_f8.shape) # E: tuple[builtins.int, ...] -reveal_type(b_i8_f8_f8.size) # E: int +assert_type(next(b_i8_f8_f8), tuple[Any, ...]) +assert_type(b_i8_f8_f8.reset(), None) +assert_type(b_i8_f8_f8.index, int) +assert_type(b_i8_f8_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_i8_f8_f8.nd, int) +assert_type(b_i8_f8_f8.ndim, int) +assert_type(b_i8_f8_f8.numiter, int) +assert_type(b_i8_f8_f8.shape, tuple[int, ...]) +assert_type(b_i8_f8_f8.size, int) -reveal_type(np.inner(AR_f8, AR_i8)) # E: Any +assert_type(np.inner(AR_f8, AR_i8), Any) -reveal_type(np.where([True, True, False])) # E: tuple[ndarray[Any, dtype[{intp}]], ...] -reveal_type(np.where([True, True, False], 1, 0)) # E: ndarray[Any, dtype[Any]] +assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) -reveal_type(np.lexsort([0, 1, 2])) # E: Any +assert_type(np.lexsort([0, 1, 2]), Any) -reveal_type(np.can_cast(np.dtype("i8"), int)) # E: bool -reveal_type(np.can_cast(AR_f8, "f8")) # E: bool -reveal_type(np.can_cast(AR_f8, np.complex128, casting="unsafe")) # E: bool +assert_type(np.can_cast(np.dtype("i8"), int), bool) +assert_type(np.can_cast(AR_f8, "f8"), bool) +assert_type(np.can_cast(AR_f8, np.complex128, casting="unsafe"), bool) -reveal_type(np.min_scalar_type([1])) # E: dtype[Any] -reveal_type(np.min_scalar_type(AR_f8)) # E: dtype[Any] +assert_type(np.min_scalar_type([1]), np.dtype[Any]) +assert_type(np.min_scalar_type(AR_f8), np.dtype[Any]) -reveal_type(np.result_type(int, [1])) # E: dtype[Any] -reveal_type(np.result_type(AR_f8, AR_u1)) # E: dtype[Any] -reveal_type(np.result_type(AR_f8, np.complex128)) # E: dtype[Any] +assert_type(np.result_type(int, [1]), np.dtype[Any]) +assert_type(np.result_type(AR_f8, AR_u1), np.dtype[Any]) +assert_type(np.result_type(AR_f8, np.complex128), np.dtype[Any]) -reveal_type(np.dot(AR_LIKE_f, AR_i8)) # E: Any -reveal_type(np.dot(AR_u1, 1)) # E: Any -reveal_type(np.dot(1.5j, 1)) # E: Any -reveal_type(np.dot(AR_u1, 1, out=AR_f8)) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.dot(AR_LIKE_f, AR_i8), Any) +assert_type(np.dot(AR_u1, 1), Any) +assert_type(np.dot(1.5j, 1), Any) +assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) -reveal_type(np.vdot(AR_LIKE_f, AR_i8)) # E: floating[Any] -reveal_type(np.vdot(AR_u1, 1)) # E: signedinteger[Any] -reveal_type(np.vdot(1.5j, 1)) # E: complexfloating[Any, Any] +assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating[Any]) +assert_type(np.vdot(AR_u1, 1), np.signedinteger[Any]) +assert_type(np.vdot(1.5j, 1), np.complexfloating[Any, Any]) -reveal_type(np.bincount(AR_i8)) # E: ndarray[Any, dtype[{intp}]] +assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) -reveal_type(np.copyto(AR_f8, [1., 1.5, 1.6])) # E: None +assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) -reveal_type(np.putmask(AR_f8, [True, True, False], 1.5)) # E: None +assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) -reveal_type(np.packbits(AR_i8)) # ndarray[Any, dtype[{uint8}]] -reveal_type(np.packbits(AR_u1)) # ndarray[Any, dtype[{uint8}]] +assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) -reveal_type(np.unpackbits(AR_u1)) # ndarray[Any, dtype[{uint8}]] +assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) -reveal_type(np.shares_memory(1, 2)) # E: bool -reveal_type(np.shares_memory(AR_f8, AR_f8, max_work=1)) # E: bool +assert_type(np.shares_memory(1, 2), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) -reveal_type(np.may_share_memory(1, 2)) # E: bool -reveal_type(np.may_share_memory(AR_f8, AR_f8, max_work=1)) # E: bool +assert_type(np.may_share_memory(1, 2), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) -reveal_type(np.geterrobj()) # E: list[Any] +assert_type(np.geterrobj(), list[Any]) -reveal_type(np.seterrobj([8192, 521, None])) # E: None +assert_type(np.seterrobj([8192, 521, None]), None) -reveal_type(np.promote_types(np.int32, np.int64)) # E: dtype[Any] -reveal_type(np.promote_types("f4", float)) # E: dtype[Any] +assert_type(np.promote_types(np.int32, np.int64), np.dtype[Any]) +assert_type(np.promote_types("f4", float), np.dtype[Any]) -reveal_type(np.frompyfunc(func, 1, 1, identity=None)) # ufunc +assert_type(np.frompyfunc(func, 1, 1, identity=None), np.ufunc) -reveal_type(np.datetime_data("m8[D]")) # E: tuple[builtins.str, builtins.int] -reveal_type(np.datetime_data(np.datetime64)) # E: tuple[builtins.str, builtins.int] -reveal_type(np.datetime_data(np.dtype(np.timedelta64))) # E: tuple[builtins.str, builtins.int] +assert_type(np.datetime_data("m8[D]"), tuple[str, int]) +assert_type(np.datetime_data(np.datetime64), tuple[str, int]) +assert_type(np.datetime_data(np.dtype(np.timedelta64)), tuple[str, int]) -reveal_type(np.busday_count("2011-01", "2011-02")) # E: {int_} -reveal_type(np.busday_count(["2011-01"], "2011-02")) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.busday_count(["2011-01"], date_scalar)) # E: ndarray[Any, dtype[{int_}]] +assert_type(np.busday_count("2011-01", "2011-02"), np.int_) +assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) +assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) -reveal_type(np.busday_offset(M, m)) # E: datetime64 -reveal_type(np.busday_offset(date_scalar, m)) # E: datetime64 -reveal_type(np.busday_offset(M, 5)) # E: datetime64 -reveal_type(np.busday_offset(AR_M, m)) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.busday_offset(M, timedelta_seq)) # E: ndarray[Any, dtype[datetime64]] -reveal_type(np.busday_offset("2011-01", "2011-02", roll="forward")) # E: datetime64 -reveal_type(np.busday_offset(["2011-01"], "2011-02", roll="forward")) # E: ndarray[Any, dtype[datetime64]] +assert_type(np.busday_offset(M, m), np.datetime64) +assert_type(np.busday_offset(date_scalar, m), np.datetime64) +assert_type(np.busday_offset(M, 5), np.datetime64) +assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) +assert_type(np.busday_offset(["2011-01"], "2011-02", roll="forward"), npt.NDArray[np.datetime64]) -reveal_type(np.is_busday("2012")) # E: bool_ -reveal_type(np.is_busday(date_scalar)) # E: bool_ -reveal_type(np.is_busday(["2012"])) # E: ndarray[Any, dtype[bool_]] +assert_type(np.is_busday("2012"), np.bool_) +assert_type(np.is_busday(date_scalar), np.bool_) +assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool_]) -reveal_type(np.datetime_as_string(M)) # E: str_ -reveal_type(np.datetime_as_string(AR_M)) # E: ndarray[Any, dtype[str_]] +assert_type(np.datetime_as_string(M), np.str_) +assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) -reveal_type(np.busdaycalendar(holidays=date_seq)) # E: busdaycalendar -reveal_type(np.busdaycalendar(holidays=[M])) # E: busdaycalendar +assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) +assert_type(np.busdaycalendar(holidays=[M]), np.busdaycalendar) -reveal_type(np.compare_chararrays("a", "b", "!=", rstrip=False)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.compare_chararrays(b"a", b"a", "==", True)) # E: ndarray[Any, dtype[bool_]] +assert_type(np.compare_chararrays("a", "b", "!=", rstrip=False), npt.NDArray[np.bool_]) +assert_type(np.compare_chararrays(b"a", b"a", "==", True), npt.NDArray[np.bool_]) -reveal_type(np.add_docstring(func, "test")) # E: None +assert_type(np.add_docstring(func, "test"), None) -reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["c_index"])) # E: tuple[nditer, ...] -reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["readonly", "readonly"]])) # E: tuple[nditer, ...] -reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_dtypes=np.int_)) # E: tuple[nditer, ...] -reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], order="C", casting="no")) # E: tuple[nditer, ...] +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["c_index"]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["readonly", "readonly"]]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_dtypes=np.int_), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], order="C", casting="no"), tuple[np.nditer, ...]) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index f8a30689dc8b..ac2eb1d25323 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,8 +1,14 @@ -from __future__ import annotations - +import sys from typing import TypeVar + import numpy as np import numpy.typing as npt +from numpy._typing import _64Bit, _32Bit + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type T1 = TypeVar("T1", bound=npt.NBitBase) T2 = TypeVar("T2", bound=npt.NBitBase) @@ -15,7 +21,7 @@ i4: np.int32 f8: np.float64 f4: np.float32 -reveal_type(add(f8, i8)) # E: {float64} -reveal_type(add(f4, i8)) # E: floating[Union[_32Bit, _64Bit]] -reveal_type(add(f8, i4)) # E: floating[Union[_64Bit, _32Bit]] -reveal_type(add(f4, i4)) # E: {float32} +assert_type(add(f8, i8), np.float64) +assert_type(add(f4, i8), np.floating[_32Bit | _64Bit]) +assert_type(add(f8, i4), np.floating[_32Bit | _64Bit]) +assert_type(add(f4, i4), np.float32) diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 6885d4fd6574..a2fe73891f84 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -1,16 +1,24 @@ +import sys +from typing import Any + import numpy as np import numpy.typing as npt -nd: npt.NDArray[np.int_] = np.array([[1, 2], [3, 4]]) +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +nd: npt.NDArray[np.int_] # item -reveal_type(nd.item()) # E: int -reveal_type(nd.item(1)) # E: int -reveal_type(nd.item(0, 1)) # E: int -reveal_type(nd.item((0, 1))) # E: int +assert_type(nd.item(), int) +assert_type(nd.item(1), int) +assert_type(nd.item(0, 1), int) +assert_type(nd.item((0, 1)), int) # tolist -reveal_type(nd.tolist()) # E: Any +assert_type(nd.tolist(), Any) # itemset does not return a value # tostring is pretty simple @@ -20,32 +28,32 @@ reveal_type(nd.tolist()) # E: Any # dumps is pretty simple # astype -reveal_type(nd.astype("float")) # E: ndarray[Any, dtype[Any]] -reveal_type(nd.astype(float)) # E: ndarray[Any, dtype[Any]] -reveal_type(nd.astype(np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(nd.astype(np.float64, "K")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(nd.astype(np.float64, "K", "unsafe")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(nd.astype(np.float64, "K", "unsafe", True)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(nd.astype(np.float64, "K", "unsafe", True, True)) # E: ndarray[Any, dtype[{float64}]] +assert_type(nd.astype("float"), npt.NDArray[Any]) +assert_type(nd.astype(float), npt.NDArray[Any]) +assert_type(nd.astype(np.float64), npt.NDArray[np.float64]) +assert_type(nd.astype(np.float64, "K"), npt.NDArray[np.float64]) +assert_type(nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) +assert_type(nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) +assert_type(nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) # byteswap -reveal_type(nd.byteswap()) # E: ndarray[Any, dtype[{int_}]] -reveal_type(nd.byteswap(True)) # E: ndarray[Any, dtype[{int_}]] +assert_type(nd.byteswap(), npt.NDArray[np.int_]) +assert_type(nd.byteswap(True), npt.NDArray[np.int_]) # copy -reveal_type(nd.copy()) # E: ndarray[Any, dtype[{int_}]] -reveal_type(nd.copy("C")) # E: ndarray[Any, dtype[{int_}]] +assert_type(nd.copy(), npt.NDArray[np.int_]) +assert_type(nd.copy("C"), npt.NDArray[np.int_]) -reveal_type(nd.view()) # E: ndarray[Any, dtype[{int_}]] -reveal_type(nd.view(np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(nd.view(float)) # E: ndarray[Any, dtype[Any]] -reveal_type(nd.view(np.float64, np.matrix)) # E: matrix[Any, Any] +assert_type(nd.view(), npt.NDArray[np.int_]) +assert_type(nd.view(np.float64), npt.NDArray[np.float64]) +assert_type(nd.view(float), npt.NDArray[Any]) +assert_type(nd.view(np.float64, np.matrix), np.matrix[Any, Any]) # getfield -reveal_type(nd.getfield("float")) # E: ndarray[Any, dtype[Any]] -reveal_type(nd.getfield(float)) # E: ndarray[Any, dtype[Any]] -reveal_type(nd.getfield(np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(nd.getfield(np.float64, 8)) # E: ndarray[Any, dtype[{float64}]] +assert_type(nd.getfield("float"), npt.NDArray[Any]) +assert_type(nd.getfield(float), npt.NDArray[Any]) +assert_type(nd.getfield(np.float64), npt.NDArray[np.float64]) +assert_type(nd.getfield(np.float64, 8), npt.NDArray[np.float64]) # setflags does not return a value # fill does not return a value diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 4da87b662179..4c1f0935862d 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,215 +6,221 @@ function-based counterpart in `../from_numeric.py`. """ +import sys import operator import ctypes as ct -from typing import Any +from typing import Any, Literal import numpy as np -from numpy._typing import NDArray +import numpy.typing as npt -class SubClass(NDArray[np.object_]): ... +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +class SubClass(npt.NDArray[np.object_]): ... f8: np.float64 B: SubClass -AR_f8: NDArray[np.float64] -AR_i8: NDArray[np.int64] -AR_U: NDArray[np.str_] -AR_V: NDArray[np.void] +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] +AR_U: npt.NDArray[np.str_] +AR_V: npt.NDArray[np.void] ctypes_obj = AR_f8.ctypes -reveal_type(AR_f8.__dlpack__()) # E: Any -reveal_type(AR_f8.__dlpack_device__()) # E: tuple[int, Literal[0]] - -reveal_type(ctypes_obj.data) # E: int -reveal_type(ctypes_obj.shape) # E: ctypes.Array[{c_intp}] -reveal_type(ctypes_obj.strides) # E: ctypes.Array[{c_intp}] -reveal_type(ctypes_obj._as_parameter_) # E: ctypes.c_void_p - -reveal_type(ctypes_obj.data_as(ct.c_void_p)) # E: ctypes.c_void_p -reveal_type(ctypes_obj.shape_as(ct.c_longlong)) # E: ctypes.Array[ctypes.c_longlong] -reveal_type(ctypes_obj.strides_as(ct.c_ubyte)) # E: ctypes.Array[ctypes.c_ubyte] - -reveal_type(f8.all()) # E: bool_ -reveal_type(AR_f8.all()) # E: bool_ -reveal_type(AR_f8.all(axis=0)) # E: Any -reveal_type(AR_f8.all(keepdims=True)) # E: Any -reveal_type(AR_f8.all(out=B)) # E: SubClass - -reveal_type(f8.any()) # E: bool_ -reveal_type(AR_f8.any()) # E: bool_ -reveal_type(AR_f8.any(axis=0)) # E: Any -reveal_type(AR_f8.any(keepdims=True)) # E: Any -reveal_type(AR_f8.any(out=B)) # E: SubClass - -reveal_type(f8.argmax()) # E: {intp} -reveal_type(AR_f8.argmax()) # E: {intp} -reveal_type(AR_f8.argmax(axis=0)) # E: Any -reveal_type(AR_f8.argmax(out=B)) # E: SubClass - -reveal_type(f8.argmin()) # E: {intp} -reveal_type(AR_f8.argmin()) # E: {intp} -reveal_type(AR_f8.argmin(axis=0)) # E: Any -reveal_type(AR_f8.argmin(out=B)) # E: SubClass - -reveal_type(f8.argsort()) # E: ndarray[Any, Any] -reveal_type(AR_f8.argsort()) # E: ndarray[Any, Any] - -reveal_type(f8.astype(np.int64).choose([()])) # E: ndarray[Any, Any] -reveal_type(AR_f8.choose([0])) # E: ndarray[Any, Any] -reveal_type(AR_f8.choose([0], out=B)) # E: SubClass - -reveal_type(f8.clip(1)) # E: Any -reveal_type(AR_f8.clip(1)) # E: Any -reveal_type(AR_f8.clip(None, 1)) # E: Any -reveal_type(AR_f8.clip(1, out=B)) # E: SubClass -reveal_type(AR_f8.clip(None, 1, out=B)) # E: SubClass - -reveal_type(f8.compress([0])) # E: ndarray[Any, Any] -reveal_type(AR_f8.compress([0])) # E: ndarray[Any, Any] -reveal_type(AR_f8.compress([0], out=B)) # E: SubClass - -reveal_type(f8.conj()) # E: {float64} -reveal_type(AR_f8.conj()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(B.conj()) # E: SubClass - -reveal_type(f8.conjugate()) # E: {float64} -reveal_type(AR_f8.conjugate()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(B.conjugate()) # E: SubClass - -reveal_type(f8.cumprod()) # E: ndarray[Any, Any] -reveal_type(AR_f8.cumprod()) # E: ndarray[Any, Any] -reveal_type(AR_f8.cumprod(out=B)) # E: SubClass - -reveal_type(f8.cumsum()) # E: ndarray[Any, Any] -reveal_type(AR_f8.cumsum()) # E: ndarray[Any, Any] -reveal_type(AR_f8.cumsum(out=B)) # E: SubClass - -reveal_type(f8.max()) # E: Any -reveal_type(AR_f8.max()) # E: Any -reveal_type(AR_f8.max(axis=0)) # E: Any -reveal_type(AR_f8.max(keepdims=True)) # E: Any -reveal_type(AR_f8.max(out=B)) # E: SubClass - -reveal_type(f8.mean()) # E: Any -reveal_type(AR_f8.mean()) # E: Any -reveal_type(AR_f8.mean(axis=0)) # E: Any -reveal_type(AR_f8.mean(keepdims=True)) # E: Any -reveal_type(AR_f8.mean(out=B)) # E: SubClass - -reveal_type(f8.min()) # E: Any -reveal_type(AR_f8.min()) # E: Any -reveal_type(AR_f8.min(axis=0)) # E: Any -reveal_type(AR_f8.min(keepdims=True)) # E: Any -reveal_type(AR_f8.min(out=B)) # E: SubClass - -reveal_type(f8.newbyteorder()) # E: {float64} -reveal_type(AR_f8.newbyteorder()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(B.newbyteorder('|')) # E: SubClass - -reveal_type(f8.prod()) # E: Any -reveal_type(AR_f8.prod()) # E: Any -reveal_type(AR_f8.prod(axis=0)) # E: Any -reveal_type(AR_f8.prod(keepdims=True)) # E: Any -reveal_type(AR_f8.prod(out=B)) # E: SubClass - -reveal_type(f8.ptp()) # E: Any -reveal_type(AR_f8.ptp()) # E: Any -reveal_type(AR_f8.ptp(axis=0)) # E: Any -reveal_type(AR_f8.ptp(keepdims=True)) # E: Any -reveal_type(AR_f8.ptp(out=B)) # E: SubClass - -reveal_type(f8.round()) # E: {float64} -reveal_type(AR_f8.round()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(AR_f8.round(out=B)) # E: SubClass - -reveal_type(f8.repeat(1)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(AR_f8.repeat(1)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(B.repeat(1)) # E: ndarray[Any, dtype[object_]] - -reveal_type(f8.std()) # E: Any -reveal_type(AR_f8.std()) # E: Any -reveal_type(AR_f8.std(axis=0)) # E: Any -reveal_type(AR_f8.std(keepdims=True)) # E: Any -reveal_type(AR_f8.std(out=B)) # E: SubClass - -reveal_type(f8.sum()) # E: Any -reveal_type(AR_f8.sum()) # E: Any -reveal_type(AR_f8.sum(axis=0)) # E: Any -reveal_type(AR_f8.sum(keepdims=True)) # E: Any -reveal_type(AR_f8.sum(out=B)) # E: SubClass - -reveal_type(f8.take(0)) # E: {float64} -reveal_type(AR_f8.take(0)) # E: {float64} -reveal_type(AR_f8.take([0])) # E: ndarray[Any, dtype[{float64}]] -reveal_type(AR_f8.take(0, out=B)) # E: SubClass -reveal_type(AR_f8.take([0], out=B)) # E: SubClass - -reveal_type(f8.var()) # E: Any -reveal_type(AR_f8.var()) # E: Any -reveal_type(AR_f8.var(axis=0)) # E: Any -reveal_type(AR_f8.var(keepdims=True)) # E: Any -reveal_type(AR_f8.var(out=B)) # E: SubClass - -reveal_type(AR_f8.argpartition([0])) # E: ndarray[Any, dtype[{intp}]] - -reveal_type(AR_f8.diagonal()) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(AR_f8.dot(1)) # E: ndarray[Any, Any] -reveal_type(AR_f8.dot([1])) # E: Any -reveal_type(AR_f8.dot(1, out=B)) # E: SubClass - -reveal_type(AR_f8.nonzero()) # E: tuple[ndarray[Any, dtype[{intp}]], ...] - -reveal_type(AR_f8.searchsorted(1)) # E: {intp} -reveal_type(AR_f8.searchsorted([1])) # E: ndarray[Any, dtype[{intp}]] - -reveal_type(AR_f8.trace()) # E: Any -reveal_type(AR_f8.trace(out=B)) # E: SubClass - -reveal_type(AR_f8.item()) # E: float -reveal_type(AR_U.item()) # E: str - -reveal_type(AR_f8.ravel()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(AR_U.ravel()) # E: ndarray[Any, dtype[str_]] - -reveal_type(AR_f8.flatten()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(AR_U.flatten()) # E: ndarray[Any, dtype[str_]] - -reveal_type(AR_f8.reshape(1)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(AR_U.reshape(1)) # E: ndarray[Any, dtype[str_]] - -reveal_type(int(AR_f8)) # E: int -reveal_type(int(AR_U)) # E: int - -reveal_type(float(AR_f8)) # E: float -reveal_type(float(AR_U)) # E: float - -reveal_type(complex(AR_f8)) # E: complex - -reveal_type(operator.index(AR_i8)) # E: int - -reveal_type(AR_f8.__array_prepare__(B)) # E: ndarray[Any, dtype[object_]] -reveal_type(AR_f8.__array_wrap__(B)) # E: ndarray[Any, dtype[object_]] - -reveal_type(AR_V[0]) # E: Any -reveal_type(AR_V[0, 0]) # E: Any -reveal_type(AR_V[AR_i8]) # E: ndarray[Any, dtype[void]] -reveal_type(AR_V[AR_i8, AR_i8]) # E: ndarray[Any, dtype[void]] -reveal_type(AR_V[AR_i8, None]) # E: ndarray[Any, dtype[void]] -reveal_type(AR_V[0, ...]) # E: ndarray[Any, dtype[void]] -reveal_type(AR_V[[0]]) # E: ndarray[Any, dtype[void]] -reveal_type(AR_V[[0], [0]]) # E: ndarray[Any, dtype[void]] -reveal_type(AR_V[:]) # E: ndarray[Any, dtype[void]] -reveal_type(AR_V["a"]) # E: ndarray[Any, dtype[Any]] -reveal_type(AR_V[["a", "b"]]) # E: ndarray[Any, dtype[void]] +assert_type(AR_f8.__dlpack__(), Any) +assert_type(AR_f8.__dlpack_device__(), tuple[int, Literal[0]]) + +assert_type(ctypes_obj.data, int) +assert_type(ctypes_obj.shape, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj.strides, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj._as_parameter_, ct.c_void_p) + +assert_type(ctypes_obj.data_as(ct.c_void_p), ct.c_void_p) +assert_type(ctypes_obj.shape_as(ct.c_longlong), ct.Array[ct.c_longlong]) +assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) + +assert_type(f8.all(), np.bool_) +assert_type(AR_f8.all(), np.bool_) +assert_type(AR_f8.all(axis=0), Any) +assert_type(AR_f8.all(keepdims=True), Any) +assert_type(AR_f8.all(out=B), SubClass) + +assert_type(f8.any(), np.bool_) +assert_type(AR_f8.any(), np.bool_) +assert_type(AR_f8.any(axis=0), Any) +assert_type(AR_f8.any(keepdims=True), Any) +assert_type(AR_f8.any(out=B), SubClass) + +assert_type(f8.argmax(), np.intp) +assert_type(AR_f8.argmax(), np.intp) +assert_type(AR_f8.argmax(axis=0), Any) +assert_type(AR_f8.argmax(out=B), SubClass) + +assert_type(f8.argmin(), np.intp) +assert_type(AR_f8.argmin(), np.intp) +assert_type(AR_f8.argmin(axis=0), Any) +assert_type(AR_f8.argmin(out=B), SubClass) + +assert_type(f8.argsort(), np.ndarray[Any, Any]) +assert_type(AR_f8.argsort(), np.ndarray[Any, Any]) + +assert_type(f8.astype(np.int64).choose([()]), np.ndarray[Any, Any]) +assert_type(AR_f8.choose([0]), np.ndarray[Any, Any]) +assert_type(AR_f8.choose([0], out=B), SubClass) + +assert_type(f8.clip(1), np.ndarray[Any, Any]) +assert_type(AR_f8.clip(1), np.ndarray[Any, Any]) +assert_type(AR_f8.clip(None, 1), np.ndarray[Any, Any]) +assert_type(AR_f8.clip(1, out=B), SubClass) +assert_type(AR_f8.clip(None, 1, out=B), SubClass) + +assert_type(f8.compress([0]), np.ndarray[Any, Any]) +assert_type(AR_f8.compress([0]), np.ndarray[Any, Any]) +assert_type(AR_f8.compress([0], out=B), SubClass) + +assert_type(f8.conj(), np.float64) +assert_type(AR_f8.conj(), npt.NDArray[np.float64]) +assert_type(B.conj(), SubClass) + +assert_type(f8.conjugate(), np.float64) +assert_type(AR_f8.conjugate(), npt.NDArray[np.float64]) +assert_type(B.conjugate(), SubClass) + +assert_type(f8.cumprod(), np.ndarray[Any, Any]) +assert_type(AR_f8.cumprod(), np.ndarray[Any, Any]) +assert_type(AR_f8.cumprod(out=B), SubClass) + +assert_type(f8.cumsum(), np.ndarray[Any, Any]) +assert_type(AR_f8.cumsum(), np.ndarray[Any, Any]) +assert_type(AR_f8.cumsum(out=B), SubClass) + +assert_type(f8.max(), Any) +assert_type(AR_f8.max(), Any) +assert_type(AR_f8.max(axis=0), Any) +assert_type(AR_f8.max(keepdims=True), Any) +assert_type(AR_f8.max(out=B), SubClass) + +assert_type(f8.mean(), Any) +assert_type(AR_f8.mean(), Any) +assert_type(AR_f8.mean(axis=0), Any) +assert_type(AR_f8.mean(keepdims=True), Any) +assert_type(AR_f8.mean(out=B), SubClass) + +assert_type(f8.min(), Any) +assert_type(AR_f8.min(), Any) +assert_type(AR_f8.min(axis=0), Any) +assert_type(AR_f8.min(keepdims=True), Any) +assert_type(AR_f8.min(out=B), SubClass) + +assert_type(f8.newbyteorder(), np.float64) +assert_type(AR_f8.newbyteorder(), npt.NDArray[np.float64]) +assert_type(B.newbyteorder('|'), SubClass) + +assert_type(f8.prod(), Any) +assert_type(AR_f8.prod(), Any) +assert_type(AR_f8.prod(axis=0), Any) +assert_type(AR_f8.prod(keepdims=True), Any) +assert_type(AR_f8.prod(out=B), SubClass) + +assert_type(f8.ptp(), Any) +assert_type(AR_f8.ptp(), Any) +assert_type(AR_f8.ptp(axis=0), Any) +assert_type(AR_f8.ptp(keepdims=True), Any) +assert_type(AR_f8.ptp(out=B), SubClass) + +assert_type(f8.round(), np.float64) +assert_type(AR_f8.round(), npt.NDArray[np.float64]) +assert_type(AR_f8.round(out=B), SubClass) + +assert_type(f8.repeat(1), npt.NDArray[np.float64]) +assert_type(AR_f8.repeat(1), npt.NDArray[np.float64]) +assert_type(B.repeat(1), npt.NDArray[np.object_]) + +assert_type(f8.std(), Any) +assert_type(AR_f8.std(), Any) +assert_type(AR_f8.std(axis=0), Any) +assert_type(AR_f8.std(keepdims=True), Any) +assert_type(AR_f8.std(out=B), SubClass) + +assert_type(f8.sum(), Any) +assert_type(AR_f8.sum(), Any) +assert_type(AR_f8.sum(axis=0), Any) +assert_type(AR_f8.sum(keepdims=True), Any) +assert_type(AR_f8.sum(out=B), SubClass) + +assert_type(f8.take(0), np.float64) +assert_type(AR_f8.take(0), np.float64) +assert_type(AR_f8.take([0]), npt.NDArray[np.float64]) +assert_type(AR_f8.take(0, out=B), SubClass) +assert_type(AR_f8.take([0], out=B), SubClass) + +assert_type(f8.var(), Any) +assert_type(AR_f8.var(), Any) +assert_type(AR_f8.var(axis=0), Any) +assert_type(AR_f8.var(keepdims=True), Any) +assert_type(AR_f8.var(out=B), SubClass) + +assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) + +assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) + +assert_type(AR_f8.dot(1), np.ndarray[Any, Any]) +assert_type(AR_f8.dot([1]), Any) +assert_type(AR_f8.dot(1, out=B), SubClass) + +assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) + +assert_type(AR_f8.searchsorted(1), np.intp) +assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) + +assert_type(AR_f8.trace(), Any) +assert_type(AR_f8.trace(out=B), SubClass) + +assert_type(AR_f8.item(), float) +assert_type(AR_U.item(), str) + +assert_type(AR_f8.ravel(), npt.NDArray[np.float64]) +assert_type(AR_U.ravel(), npt.NDArray[np.str_]) + +assert_type(AR_f8.flatten(), npt.NDArray[np.float64]) +assert_type(AR_U.flatten(), npt.NDArray[np.str_]) + +assert_type(AR_f8.reshape(1), npt.NDArray[np.float64]) +assert_type(AR_U.reshape(1), npt.NDArray[np.str_]) + +assert_type(int(AR_f8), int) +assert_type(int(AR_U), int) + +assert_type(float(AR_f8), float) +assert_type(float(AR_U), float) + +assert_type(complex(AR_f8), complex) + +assert_type(operator.index(AR_i8), int) + +assert_type(AR_f8.__array_prepare__(B), npt.NDArray[np.object_]) +assert_type(AR_f8.__array_wrap__(B), npt.NDArray[np.object_]) + +assert_type(AR_V[0], Any) +assert_type(AR_V[0, 0], Any) +assert_type(AR_V[AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, None], npt.NDArray[np.void]) +assert_type(AR_V[0, ...], npt.NDArray[np.void]) +assert_type(AR_V[[0]], npt.NDArray[np.void]) +assert_type(AR_V[[0], [0]], npt.NDArray[np.void]) +assert_type(AR_V[:], npt.NDArray[np.void]) +assert_type(AR_V["a"], npt.NDArray[Any]) +assert_type(AR_V[["a", "b"]], npt.NDArray[np.void]) -reveal_type(AR_f8.dump("test_file")) # E: None -reveal_type(AR_f8.dump(b"test_file")) # E: None +assert_type(AR_f8.dump("test_file"), None) +assert_type(AR_f8.dump(b"test_file"), None) with open("test_file", "wb") as f: - reveal_type(AR_f8.dump(f)) # E: None + assert_type(AR_f8.dump(f), None) -reveal_type(AR_f8.__array_finalize__(None)) # E: None -reveal_type(AR_f8.__array_finalize__(B)) # E: None -reveal_type(AR_f8.__array_finalize__(AR_f8)) # E: None +assert_type(AR_f8.__array_finalize__(None), None) +assert_type(AR_f8.__array_finalize__(B), None) +assert_type(AR_f8.__array_finalize__(AR_f8), None) diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index c000bf45c3f4..9a41a90f1ee9 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,35 +1,44 @@ +import sys +from typing import Any + import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type -nd = np.array([[1, 2], [3, 4]]) +nd: npt.NDArray[np.int64] # reshape -reveal_type(nd.reshape()) # E: ndarray -reveal_type(nd.reshape(4)) # E: ndarray -reveal_type(nd.reshape(2, 2)) # E: ndarray -reveal_type(nd.reshape((2, 2))) # E: ndarray +assert_type(nd.reshape(), npt.NDArray[np.int64]) +assert_type(nd.reshape(4), npt.NDArray[np.int64]) +assert_type(nd.reshape(2, 2), npt.NDArray[np.int64]) +assert_type(nd.reshape((2, 2)), npt.NDArray[np.int64]) -reveal_type(nd.reshape((2, 2), order="C")) # E: ndarray -reveal_type(nd.reshape(4, order="C")) # E: ndarray +assert_type(nd.reshape((2, 2), order="C"), npt.NDArray[np.int64]) +assert_type(nd.reshape(4, order="C"), npt.NDArray[np.int64]) # resize does not return a value # transpose -reveal_type(nd.transpose()) # E: ndarray -reveal_type(nd.transpose(1, 0)) # E: ndarray -reveal_type(nd.transpose((1, 0))) # E: ndarray +assert_type(nd.transpose(), npt.NDArray[np.int64]) +assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) # swapaxes -reveal_type(nd.swapaxes(0, 1)) # E: ndarray +assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) # flatten -reveal_type(nd.flatten()) # E: ndarray -reveal_type(nd.flatten("C")) # E: ndarray +assert_type(nd.flatten(), npt.NDArray[np.int64]) +assert_type(nd.flatten("C"), npt.NDArray[np.int64]) # ravel -reveal_type(nd.ravel()) # E: ndarray -reveal_type(nd.ravel("C")) # E: ndarray +assert_type(nd.ravel(), npt.NDArray[np.int64]) +assert_type(nd.ravel("C"), npt.NDArray[np.int64]) # squeeze -reveal_type(nd.squeeze()) # E: ndarray -reveal_type(nd.squeeze(0)) # E: ndarray -reveal_type(nd.squeeze((0, 2))) # E: ndarray +assert_type(nd.squeeze(), npt.NDArray[np.int64]) +assert_type(nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/nditer.pyi b/numpy/typing/tests/data/reveal/nditer.pyi index fd8b7e1094fc..589453e777f2 100644 --- a/numpy/typing/tests/data/reveal/nditer.pyi +++ b/numpy/typing/tests/data/reveal/nditer.pyi @@ -1,46 +1,55 @@ +import sys +from typing import Any + import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type nditer_obj: np.nditer -reveal_type(np.nditer([0, 1], flags=["c_index"])) # E: nditer -reveal_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]])) # E: nditer -reveal_type(np.nditer([0, 1], op_dtypes=np.int_)) # E: nditer -reveal_type(np.nditer([0, 1], order="C", casting="no")) # E: nditer +assert_type(np.nditer([0, 1], flags=["c_index"]), np.nditer) +assert_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]]), np.nditer) +assert_type(np.nditer([0, 1], op_dtypes=np.int_), np.nditer) +assert_type(np.nditer([0, 1], order="C", casting="no"), np.nditer) -reveal_type(nditer_obj.dtypes) # E: tuple[dtype[Any], ...] -reveal_type(nditer_obj.finished) # E: bool -reveal_type(nditer_obj.has_delayed_bufalloc) # E: bool -reveal_type(nditer_obj.has_index) # E: bool -reveal_type(nditer_obj.has_multi_index) # E: bool -reveal_type(nditer_obj.index) # E: int -reveal_type(nditer_obj.iterationneedsapi) # E: bool -reveal_type(nditer_obj.iterindex) # E: int -reveal_type(nditer_obj.iterrange) # E: tuple[builtins.int, ...] -reveal_type(nditer_obj.itersize) # E: int -reveal_type(nditer_obj.itviews) # E: tuple[ndarray[Any, dtype[Any]], ...] -reveal_type(nditer_obj.multi_index) # E: tuple[builtins.int, ...] -reveal_type(nditer_obj.ndim) # E: int -reveal_type(nditer_obj.nop) # E: int -reveal_type(nditer_obj.operands) # E: tuple[ndarray[Any, dtype[Any]], ...] -reveal_type(nditer_obj.shape) # E: tuple[builtins.int, ...] -reveal_type(nditer_obj.value) # E: tuple[ndarray[Any, dtype[Any]], ...] +assert_type(nditer_obj.dtypes, tuple[np.dtype[Any], ...]) +assert_type(nditer_obj.finished, bool) +assert_type(nditer_obj.has_delayed_bufalloc, bool) +assert_type(nditer_obj.has_index, bool) +assert_type(nditer_obj.has_multi_index, bool) +assert_type(nditer_obj.index, int) +assert_type(nditer_obj.iterationneedsapi, bool) +assert_type(nditer_obj.iterindex, int) +assert_type(nditer_obj.iterrange, tuple[int, ...]) +assert_type(nditer_obj.itersize, int) +assert_type(nditer_obj.itviews, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.multi_index, tuple[int, ...]) +assert_type(nditer_obj.ndim, int) +assert_type(nditer_obj.nop, int) +assert_type(nditer_obj.operands, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.shape, tuple[int, ...]) +assert_type(nditer_obj.value, tuple[npt.NDArray[Any], ...]) -reveal_type(nditer_obj.close()) # E: None -reveal_type(nditer_obj.copy()) # E: nditer -reveal_type(nditer_obj.debug_print()) # E: None -reveal_type(nditer_obj.enable_external_loop()) # E: None -reveal_type(nditer_obj.iternext()) # E: bool -reveal_type(nditer_obj.remove_axis(0)) # E: None -reveal_type(nditer_obj.remove_multi_index()) # E: None -reveal_type(nditer_obj.reset()) # E: None +assert_type(nditer_obj.close(), None) +assert_type(nditer_obj.copy(), np.nditer) +assert_type(nditer_obj.debug_print(), None) +assert_type(nditer_obj.enable_external_loop(), None) +assert_type(nditer_obj.iternext(), bool) +assert_type(nditer_obj.remove_axis(0), None) +assert_type(nditer_obj.remove_multi_index(), None) +assert_type(nditer_obj.reset(), None) -reveal_type(len(nditer_obj)) # E: int -reveal_type(iter(nditer_obj)) # E: nditer -reveal_type(next(nditer_obj)) # E: tuple[ndarray[Any, dtype[Any]], ...] -reveal_type(nditer_obj.__copy__()) # E: nditer +assert_type(len(nditer_obj), int) +assert_type(iter(nditer_obj), np.nditer) +assert_type(next(nditer_obj), tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.__copy__(), np.nditer) with nditer_obj as f: - reveal_type(f) # E: nditer -reveal_type(nditer_obj[0]) # E: ndarray[Any, dtype[Any]] -reveal_type(nditer_obj[:]) # E: tuple[ndarray[Any, dtype[Any]], ...] + assert_type(f, np.nditer) +assert_type(nditer_obj[0], npt.NDArray[Any]) +assert_type(nditer_obj[:], tuple[npt.NDArray[Any], ...]) nditer_obj[0] = 0 nditer_obj[:] = [0, 1] diff --git a/numpy/typing/tests/data/reveal/nested_sequence.pyi b/numpy/typing/tests/data/reveal/nested_sequence.pyi index 286f75ac5c44..3ca23d6875e8 100644 --- a/numpy/typing/tests/data/reveal/nested_sequence.pyi +++ b/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -1,8 +1,14 @@ +import sys from collections.abc import Sequence from typing import Any from numpy._typing import _NestedSequence +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + a: Sequence[int] b: Sequence[Sequence[int]] c: Sequence[Sequence[Sequence[int]]] @@ -15,12 +21,12 @@ h: Sequence[Any] def func(a: _NestedSequence[int]) -> None: ... -reveal_type(func(a)) # E: None -reveal_type(func(b)) # E: None -reveal_type(func(c)) # E: None -reveal_type(func(d)) # E: None -reveal_type(func(e)) # E: None -reveal_type(func(f)) # E: None -reveal_type(func(g)) # E: None -reveal_type(func(h)) # E: None -reveal_type(func(range(15))) # E: None +assert_type(func(a), None) +assert_type(func(b), None) +assert_type(func(c), None) +assert_type(func(d), None) +assert_type(func(e), None) +assert_type(func(f), None) +assert_type(func(g), None) +assert_type(func(h), None) +assert_type(func(range(15)), None) diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index 2c62d8d21edf..bbd906068da9 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -1,17 +1,27 @@ import re +import sys +import zipfile import pathlib -from typing import IO +from typing import IO, Any +from collections.abc import Mapping import numpy.typing as npt import numpy as np +from numpy.lib.npyio import BagObj, NpzFile +from numpy.ma.mrecords import MaskedRecords + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type str_path: str pathlib_path: pathlib.Path str_file: IO[str] bytes_file: IO[bytes] -bag_obj: np.lib.npyio.BagObj[int] -npz_file: np.lib.npyio.NpzFile +bag_obj: BagObj[int] +npz_file: NpzFile AR_i8: npt.NDArray[np.int64] AR_LIKE_f8: list[float] @@ -26,67 +36,67 @@ class BytesReader: bytes_writer: BytesWriter bytes_reader: BytesReader -reveal_type(bag_obj.a) # E: int -reveal_type(bag_obj.b) # E: int - -reveal_type(npz_file.zip) # E: zipfile.ZipFile -reveal_type(npz_file.fid) # E: Union[None, typing.IO[builtins.str]] -reveal_type(npz_file.files) # E: list[builtins.str] -reveal_type(npz_file.allow_pickle) # E: bool -reveal_type(npz_file.pickle_kwargs) # E: Union[None, typing.Mapping[builtins.str, Any]] -reveal_type(npz_file.f) # E: lib.npyio.BagObj[lib.npyio.NpzFile] -reveal_type(npz_file["test"]) # E: ndarray[Any, dtype[Any]] -reveal_type(len(npz_file)) # E: int +assert_type(bag_obj.a, int) +assert_type(bag_obj.b, int) + +assert_type(npz_file.zip, zipfile.ZipFile) +assert_type(npz_file.fid, None | IO[str]) +assert_type(npz_file.files, list[str]) +assert_type(npz_file.allow_pickle, bool) +assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any]) +assert_type(npz_file.f, BagObj[NpzFile]) +assert_type(npz_file["test"], npt.NDArray[Any]) +assert_type(len(npz_file), int) with npz_file as f: - reveal_type(f) # E: lib.npyio.NpzFile - -reveal_type(np.load(bytes_file)) # E: Any -reveal_type(np.load(pathlib_path, allow_pickle=True)) # E: Any -reveal_type(np.load(str_path, encoding="bytes")) # E: Any -reveal_type(np.load(bytes_reader)) # E: Any - -reveal_type(np.save(bytes_file, AR_LIKE_f8)) # E: None -reveal_type(np.save(pathlib_path, AR_i8, allow_pickle=True)) # E: None -reveal_type(np.save(str_path, AR_LIKE_f8)) # E: None -reveal_type(np.save(bytes_writer, AR_LIKE_f8)) # E: None - -reveal_type(np.savez(bytes_file, AR_LIKE_f8)) # E: None -reveal_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None -reveal_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None -reveal_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None - -reveal_type(np.savez_compressed(bytes_file, AR_LIKE_f8)) # E: None -reveal_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None -reveal_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None -reveal_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None - -reveal_type(np.loadtxt(bytes_file)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.loadtxt(pathlib_path, dtype=np.str_)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.loadtxt(str_path, dtype=str, skiprows=2)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.loadtxt(str_file, comments="test")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.loadtxt(str_file, comments=None)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.loadtxt(str_path, delimiter="\n")) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.loadtxt(str_path, ndmin=2)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.loadtxt(["1", "2", "3"])) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.fromregex(bytes_file, "test", np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fromregex(str_file, b"test", dtype=float)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8")) # E: ndarray[Any, dtype[str_]] -reveal_type(np.fromregex(pathlib_path, "test", np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.fromregex(bytes_reader, "test", np.float64)) # E: ndarray[Any, dtype[{float64}]] - -reveal_type(np.genfromtxt(bytes_file)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.genfromtxt(pathlib_path, dtype=np.str_)) # E: ndarray[Any, dtype[str_]] -reveal_type(np.genfromtxt(str_path, dtype=str, skip_header=2)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.genfromtxt(str_file, comments="test")) # E: ndarray[Any, dtype[Any]] -reveal_type(np.genfromtxt(str_path, delimiter="\n")) # E: ndarray[Any, dtype[Any]] -reveal_type(np.genfromtxt(str_path, ndmin=2)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.genfromtxt(["1", "2", "3"], ndmin=2)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.recfromtxt(bytes_file)) # E: recarray[Any, dtype[record]] -reveal_type(np.recfromtxt(pathlib_path, usemask=True)) # E: ma.mrecords.MaskedRecords[Any, dtype[void]] -reveal_type(np.recfromtxt(["1", "2", "3"])) # E: recarray[Any, dtype[record]] - -reveal_type(np.recfromcsv(bytes_file)) # E: recarray[Any, dtype[record]] -reveal_type(np.recfromcsv(pathlib_path, usemask=True)) # E: ma.mrecords.MaskedRecords[Any, dtype[void]] -reveal_type(np.recfromcsv(["1", "2", "3"])) # E: recarray[Any, dtype[record]] + assert_type(f, NpzFile) + +assert_type(np.load(bytes_file), Any) +assert_type(np.load(pathlib_path, allow_pickle=True), Any) +assert_type(np.load(str_path, encoding="bytes"), Any) +assert_type(np.load(bytes_reader), Any) + +assert_type(np.save(bytes_file, AR_LIKE_f8), None) +assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None) +assert_type(np.save(str_path, AR_LIKE_f8), None) +assert_type(np.save(bytes_writer, AR_LIKE_f8), None) + +assert_type(np.savez(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64]) +assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any]) +assert_type(np.loadtxt(str_file, comments="test"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64]) +assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64]) + +assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any]) +assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_]) +assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64]) + +assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any]) +assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_file, comments="test"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, delimiter="\n"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(["1", "2", "3"], ndmin=2), npt.NDArray[Any]) + +assert_type(np.recfromtxt(bytes_file), np.recarray[Any, np.dtype[np.record]]) +assert_type(np.recfromtxt(pathlib_path, usemask=True), MaskedRecords[Any, np.dtype[np.void]]) +assert_type(np.recfromtxt(["1", "2", "3"]), np.recarray[Any, np.dtype[np.record]]) + +assert_type(np.recfromcsv(bytes_file), np.recarray[Any, np.dtype[np.record]]) +assert_type(np.recfromcsv(pathlib_path, usemask=True), MaskedRecords[Any, np.dtype[np.void]]) +assert_type(np.recfromcsv(["1", "2", "3"]), np.recarray[Any, np.dtype[np.record]]) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index b8fe15d3a08a..78f3980aedc5 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -5,9 +5,17 @@ Does not include tests which fall under ``array_constructors``. """ +import sys +from typing import Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + class SubClass(npt.NDArray[np.int64]): ... @@ -24,110 +32,110 @@ AR_O: npt.NDArray[np.object_] B: list[int] C: SubClass -reveal_type(np.count_nonzero(i8)) # E: int -reveal_type(np.count_nonzero(AR_i8)) # E: int -reveal_type(np.count_nonzero(B)) # E: int -reveal_type(np.count_nonzero(AR_i8, keepdims=True)) # E: Any -reveal_type(np.count_nonzero(AR_i8, axis=0)) # E: Any - -reveal_type(np.isfortran(i8)) # E: bool -reveal_type(np.isfortran(AR_i8)) # E: bool - -reveal_type(np.argwhere(i8)) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.argwhere(AR_i8)) # E: ndarray[Any, dtype[{intp}]] - -reveal_type(np.flatnonzero(i8)) # E: ndarray[Any, dtype[{intp}]] -reveal_type(np.flatnonzero(AR_i8)) # E: ndarray[Any, dtype[{intp}]] - -reveal_type(np.correlate(B, AR_i8, mode="valid")) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.correlate(AR_i8, AR_i8, mode="same")) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.correlate(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.correlate(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.correlate(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.correlate(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.correlate(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.correlate(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.correlate(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.convolve(B, AR_i8, mode="valid")) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.convolve(AR_i8, AR_i8, mode="same")) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.convolve(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.convolve(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.convolve(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.convolve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.convolve(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.convolve(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.convolve(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.outer(i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.outer(B, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.outer(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.outer(AR_i8, AR_i8, out=C)) # E: SubClass -reveal_type(np.outer(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.outer(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.outer(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.convolve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.outer(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.outer(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.outer(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.tensordot(B, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.tensordot(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.tensordot(AR_i8, AR_i8, axes=0)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1))) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.tensordot(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.tensordot(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.tensordot(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.tensordot(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.tensordot(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.tensordot(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]] -reveal_type(np.tensordot(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.isscalar(i8)) # E: bool -reveal_type(np.isscalar(AR_i8)) # E: bool -reveal_type(np.isscalar(B)) # E: bool - -reveal_type(np.roll(AR_i8, 1)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.roll(AR_i8, (1, 2))) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.roll(B, 1)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.rollaxis(AR_i8, 0, 1)) # E: ndarray[Any, dtype[{int64}]] - -reveal_type(np.moveaxis(AR_i8, 0, 1)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.moveaxis(AR_i8, (0, 1), (1, 2))) # E: ndarray[Any, dtype[{int64}]] - -reveal_type(np.cross(B, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.cross(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.cross(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]] -reveal_type(np.cross(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.cross(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.cross(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.cross(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.indices([0, 1, 2])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[ndarray[Any, dtype[{int_}]], ...] -reveal_type(np.indices([0, 1, 2], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.indices([0, 1, 2], sparse=True, dtype=np.float64)) # E: tuple[ndarray[Any, dtype[{float64}]], ...] -reveal_type(np.indices([0, 1, 2], dtype=float)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.indices([0, 1, 2], sparse=True, dtype=float)) # E: tuple[ndarray[Any, dtype[Any]], ...] - -reveal_type(np.binary_repr(1)) # E: str - -reveal_type(np.base_repr(1)) # E: str - -reveal_type(np.allclose(i8, AR_i8)) # E: bool -reveal_type(np.allclose(B, AR_i8)) # E: bool -reveal_type(np.allclose(AR_i8, AR_i8)) # E: bool - -reveal_type(np.isclose(i8, i8)) # E: bool_ -reveal_type(np.isclose(i8, AR_i8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isclose(B, AR_i8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isclose(AR_i8, AR_i8)) # E: ndarray[Any, dtype[bool_]] - -reveal_type(np.array_equal(i8, AR_i8)) # E: bool -reveal_type(np.array_equal(B, AR_i8)) # E: bool -reveal_type(np.array_equal(AR_i8, AR_i8)) # E: bool - -reveal_type(np.array_equiv(i8, AR_i8)) # E: bool -reveal_type(np.array_equiv(B, AR_i8)) # E: bool -reveal_type(np.array_equiv(AR_i8, AR_i8)) # E: bool +assert_type(np.count_nonzero(i8), int) +assert_type(np.count_nonzero(AR_i8), int) +assert_type(np.count_nonzero(B), int) +assert_type(np.count_nonzero(AR_i8, keepdims=True), Any) +assert_type(np.count_nonzero(AR_i8, axis=0), Any) + +assert_type(np.isfortran(i8), bool) +assert_type(np.isfortran(AR_i8), bool) + +assert_type(np.argwhere(i8), npt.NDArray[np.intp]) +assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) +assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool_]) +assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool_]) +assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) +assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool_]) +assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool_]) +assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.isscalar(i8), bool) +assert_type(np.isscalar(AR_i8), bool) +assert_type(np.isscalar(B), bool) + +assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) +assert_type(np.roll(B, 1), npt.NDArray[Any]) + +assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) + +assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) +assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) + +assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) +assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) +assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=np.float64), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.indices([0, 1, 2], dtype=float), npt.NDArray[Any]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=float), tuple[npt.NDArray[Any], ...]) + +assert_type(np.binary_repr(1), str) + +assert_type(np.base_repr(1), str) + +assert_type(np.allclose(i8, AR_i8), bool) +assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(AR_i8, AR_i8), bool) + +assert_type(np.isclose(i8, i8), np.bool_) +assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool_]) +assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool_]) +assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool_]) + +assert_type(np.array_equal(i8, AR_i8), bool) +assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(AR_i8, AR_i8), bool) + +assert_type(np.array_equiv(i8, AR_i8), bool) +assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(AR_i8, AR_i8), bool) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 95ec76e8ec7c..5d5a7a7af4c9 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -1,40 +1,84 @@ +import sys +from typing import Literal, Any + import numpy as np +from numpy.core.numerictypes import _CastFunc + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type -reveal_type(np.maximum_sctype(np.float64)) # E: Type[{float64}] -reveal_type(np.maximum_sctype("f8")) # E: Type[Any] +assert_type(np.cast[int], _CastFunc) +assert_type(np.cast["i8"], _CastFunc) +assert_type(np.cast[np.int64], _CastFunc) -reveal_type(np.issctype(np.float64)) # E: bool -reveal_type(np.issctype("foo")) # E: Literal[False] +assert_type(np.maximum_sctype(np.float64), type[np.float64]) +assert_type(np.maximum_sctype("f8"), type[Any]) -reveal_type(np.obj2sctype(np.float64)) # E: Union[None, Type[{float64}]] -reveal_type(np.obj2sctype(np.float64, default=False)) # E: Union[builtins.bool, Type[{float64}]] -reveal_type(np.obj2sctype("S8")) # E: Union[None, Type[Any]] -reveal_type(np.obj2sctype("S8", default=None)) # E: Union[None, Type[Any]] -reveal_type(np.obj2sctype("foo", default=False)) # E: Union[builtins.bool, Type[Any]] -reveal_type(np.obj2sctype(1)) # E: None -reveal_type(np.obj2sctype(1, default=False)) # E: bool +assert_type(np.issctype(np.float64), bool) +assert_type(np.issctype("foo"), Literal[False]) -reveal_type(np.issubclass_(np.float64, float)) # E: bool -reveal_type(np.issubclass_(np.float64, (int, float))) # E: bool -reveal_type(np.issubclass_(1, 1)) # E: Literal[False] +assert_type(np.obj2sctype(np.float64), None | type[np.float64]) +assert_type(np.obj2sctype(np.float64, default=False), bool | type[np.float64]) +assert_type(np.obj2sctype("S8"), None | type[Any]) +assert_type(np.obj2sctype("S8", default=None), None | type[Any]) +assert_type(np.obj2sctype("foo", default=False), bool | type[Any]) +assert_type(np.obj2sctype(1), None) +assert_type(np.obj2sctype(1, default=False), bool) -reveal_type(np.sctype2char("S8")) # E: str -reveal_type(np.sctype2char(list)) # E: str +assert_type(np.issubclass_(np.float64, float), bool) +assert_type(np.issubclass_(np.float64, (int, float)), bool) +assert_type(np.issubclass_(1, 1), Literal[False]) -reveal_type(np.cast[int]) # E: _CastFunc -reveal_type(np.cast["i8"]) # E: _CastFunc -reveal_type(np.cast[np.int64]) # E: _CastFunc +assert_type(np.sctype2char("S8"), str) +assert_type(np.sctype2char(list), str) -reveal_type(np.nbytes[int]) # E: int -reveal_type(np.nbytes["i8"]) # E: int -reveal_type(np.nbytes[np.int64]) # E: int +assert_type(np.nbytes[int], int) +assert_type(np.nbytes["i8"], int) +assert_type(np.nbytes[np.int64], int) -reveal_type(np.ScalarType) # E: tuple -reveal_type(np.ScalarType[0]) # E: Type[builtins.int] -reveal_type(np.ScalarType[3]) # E: Type[builtins.bool] -reveal_type(np.ScalarType[8]) # E: Type[{csingle}] -reveal_type(np.ScalarType[10]) # E: Type[{clongdouble}] +assert_type( + np.ScalarType, + tuple[ + type[int], + type[float], + type[complex], + type[bool], + type[bytes], + type[str], + type[memoryview], + type[np.bool_], + type[np.csingle], + type[np.cdouble], + type[np.clongdouble], + type[np.half], + type[np.single], + type[np.double], + type[np.longdouble], + type[np.byte], + type[np.short], + type[np.intc], + type[np.int_], + type[np.longlong], + type[np.timedelta64], + type[np.datetime64], + type[np.object_], + type[np.bytes_], + type[np.str_], + type[np.ubyte], + type[np.ushort], + type[np.uintc], + type[np.uint], + type[np.ulonglong], + type[np.void], + ], +) +assert_type(np.ScalarType[0], type[int]) +assert_type(np.ScalarType[3], type[bool]) +assert_type(np.ScalarType[8], type[np.csingle]) +assert_type(np.ScalarType[10], type[np.clongdouble]) -reveal_type(np.typecodes["Character"]) # E: Literal['c'] -reveal_type(np.typecodes["Complex"]) # E: Literal['FDG'] -reveal_type(np.typecodes["All"]) # E: Literal['?bhilqpBHILQPefdgFDGSUVOMm'] +assert_type(np.typecodes["Character"], Literal["c"]) +assert_type(np.typecodes["Complex"], Literal["FDG"]) +assert_type(np.typecodes["All"], Literal["?bhilqpBHILQPefdgFDGSUVOMm"]) diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index ac287feb2c3c..4aefc01cf6b5 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -1,8 +1,21 @@ -from __future__ import annotations - +import sys +import threading from typing import Any +from collections.abc import Sequence import numpy as np +import numpy.typing as npt +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64 +from numpy.random._sfc64 import SFC64 +from numpy.random._philox import Philox +from numpy.random.bit_generator import SeedSequence, SeedlessSeedSequence + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type def_rng = np.random.default_rng() seed_seq = np.random.SeedSequence() @@ -10,26 +23,26 @@ mt19937 = np.random.MT19937() pcg64 = np.random.PCG64() sfc64 = np.random.SFC64() philox = np.random.Philox() -seedless_seq = np.random.bit_generator.SeedlessSeedSequence() +seedless_seq = SeedlessSeedSequence() -reveal_type(def_rng) # E: random._generator.Generator -reveal_type(mt19937) # E: random._mt19937.MT19937 -reveal_type(pcg64) # E: random._pcg64.PCG64 -reveal_type(sfc64) # E: random._sfc64.SFC64 -reveal_type(philox) # E: random._philox.Philox -reveal_type(seed_seq) # E: random.bit_generator.SeedSequence -reveal_type(seedless_seq) # E: random.bit_generator.SeedlessSeedSequence +assert_type(def_rng, Generator) +assert_type(mt19937, MT19937) +assert_type(pcg64, PCG64) +assert_type(sfc64, SFC64) +assert_type(philox, Philox) +assert_type(seed_seq, SeedSequence) +assert_type(seedless_seq, SeedlessSeedSequence) mt19937_jumped = mt19937.jumped() mt19937_jumped3 = mt19937.jumped(3) mt19937_raw = mt19937.random_raw() mt19937_raw_arr = mt19937.random_raw(5) -reveal_type(mt19937_jumped) # E: random._mt19937.MT19937 -reveal_type(mt19937_jumped3) # E: random._mt19937.MT19937 -reveal_type(mt19937_raw) # E: int -reveal_type(mt19937_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(mt19937.lock) # E: threading.Lock +assert_type(mt19937_jumped, MT19937) +assert_type(mt19937_jumped3, MT19937) +assert_type(mt19937_raw, int) +assert_type(mt19937_raw_arr, npt.NDArray[np.uint64]) +assert_type(mt19937.lock, threading.Lock) pcg64_jumped = pcg64.jumped() pcg64_jumped3 = pcg64.jumped(3) @@ -37,12 +50,12 @@ pcg64_adv = pcg64.advance(3) pcg64_raw = pcg64.random_raw() pcg64_raw_arr = pcg64.random_raw(5) -reveal_type(pcg64_jumped) # E: random._pcg64.PCG64 -reveal_type(pcg64_jumped3) # E: random._pcg64.PCG64 -reveal_type(pcg64_adv) # E: random._pcg64.PCG64 -reveal_type(pcg64_raw) # E: int -reveal_type(pcg64_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(pcg64.lock) # E: threading.Lock +assert_type(pcg64_jumped, PCG64) +assert_type(pcg64_jumped3, PCG64) +assert_type(pcg64_adv, PCG64) +assert_type(pcg64_raw, int) +assert_type(pcg64_raw_arr, npt.NDArray[np.uint64]) +assert_type(pcg64.lock, threading.Lock) philox_jumped = philox.jumped() philox_jumped3 = philox.jumped(3) @@ -50,33 +63,33 @@ philox_adv = philox.advance(3) philox_raw = philox.random_raw() philox_raw_arr = philox.random_raw(5) -reveal_type(philox_jumped) # E: random._philox.Philox -reveal_type(philox_jumped3) # E: random._philox.Philox -reveal_type(philox_adv) # E: random._philox.Philox -reveal_type(philox_raw) # E: int -reveal_type(philox_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(philox.lock) # E: threading.Lock +assert_type(philox_jumped, Philox) +assert_type(philox_jumped3, Philox) +assert_type(philox_adv, Philox) +assert_type(philox_raw, int) +assert_type(philox_raw_arr, npt.NDArray[np.uint64]) +assert_type(philox.lock, threading.Lock) sfc64_raw = sfc64.random_raw() sfc64_raw_arr = sfc64.random_raw(5) -reveal_type(sfc64_raw) # E: int -reveal_type(sfc64_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(sfc64.lock) # E: threading.Lock +assert_type(sfc64_raw, int) +assert_type(sfc64_raw_arr, npt.NDArray[np.uint64]) +assert_type(sfc64.lock, threading.Lock) -reveal_type(seed_seq.pool) # ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(seed_seq.entropy) # E:Union[None, int, Sequence[int]] -reveal_type(seed_seq.spawn(1)) # E: list[random.bit_generator.SeedSequence] -reveal_type(seed_seq.generate_state(8, "uint32")) # E: ndarray[Any, dtype[Union[unsignedinteger[typing._32Bit], unsignedinteger[typing._64Bit]]]] -reveal_type(seed_seq.generate_state(8, "uint64")) # E: ndarray[Any, dtype[Union[unsignedinteger[typing._32Bit], unsignedinteger[typing._64Bit]]]] +assert_type(seed_seq.pool, npt.NDArray[np.uint32]) +assert_type(seed_seq.entropy, None | int | Sequence[int]) +assert_type(seed_seq.spawn(1), list[np.random.SeedSequence]) +assert_type(seed_seq.generate_state(8, "uint32"), npt.NDArray[np.uint32 | np.uint64]) +assert_type(seed_seq.generate_state(8, "uint64"), npt.NDArray[np.uint32 | np.uint64]) def_gen: np.random.Generator = np.random.default_rng() -D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) -D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) -D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) -D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +D_arr_0p1: npt.NDArray[np.float64] = np.array([0.1]) +D_arr_0p5: npt.NDArray[np.float64] = np.array([0.5]) +D_arr_0p9: npt.NDArray[np.float64] = np.array([0.9]) +D_arr_1p5: npt.NDArray[np.float64] = np.array([1.5]) I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) D_arr_like_0p1: list[float] = [0.1] @@ -86,1457 +99,1457 @@ D_arr_like_1p5: list[float] = [1.5] I_arr_like_10: list[int] = [10] I_arr_like_20: list[int] = [20] D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] -D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) -S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) -D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) - -reveal_type(def_gen.standard_normal()) # E: float -reveal_type(def_gen.standard_normal(dtype=np.float32)) # E: float -reveal_type(def_gen.standard_normal(dtype="float32")) # E: float -reveal_type(def_gen.standard_normal(dtype="double")) # E: float -reveal_type(def_gen.standard_normal(dtype=np.float64)) # E: float -reveal_type(def_gen.standard_normal(size=None)) # E: float -reveal_type(def_gen.standard_normal(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype=np.float32)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype="f4")) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_normal(dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype=np.float64)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype="f8")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_normal(out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(def_gen.random()) # E: float -reveal_type(def_gen.random(dtype=np.float32)) # E: float -reveal_type(def_gen.random(dtype="float32")) # E: float -reveal_type(def_gen.random(dtype="double")) # E: float -reveal_type(def_gen.random(dtype=np.float64)) # E: float -reveal_type(def_gen.random(size=None)) # E: float -reveal_type(def_gen.random(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.random(size=1, dtype=np.float32)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.random(size=1, dtype="f4")) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.random(size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.random(dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.random(size=1, dtype=np.float64)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.random(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.random(size=1, dtype="f8")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.random(out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.random(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.random(size=1, dtype="float64", out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(def_gen.standard_cauchy()) # E: float -reveal_type(def_gen.standard_cauchy(size=None)) # E: float -reveal_type(def_gen.standard_cauchy(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.standard_exponential()) # E: float -reveal_type(def_gen.standard_exponential(method="inv")) # E: float -reveal_type(def_gen.standard_exponential(dtype=np.float32)) # E: float -reveal_type(def_gen.standard_exponential(dtype="float32")) # E: float -reveal_type(def_gen.standard_exponential(dtype="double")) # E: float -reveal_type(def_gen.standard_exponential(dtype=np.float64)) # E: float -reveal_type(def_gen.standard_exponential(size=None)) # E: float -reveal_type(def_gen.standard_exponential(size=None, method="inv")) # E: float -reveal_type(def_gen.standard_exponential(size=1, method="inv")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype=np.float32)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv")) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_exponential(dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype="f8")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_exponential(out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(def_gen.zipf(1.5)) # E: int -reveal_type(def_gen.zipf(1.5, size=None)) # E: int -reveal_type(def_gen.zipf(1.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.zipf(D_arr_1p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.zipf(D_arr_1p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.zipf(D_arr_like_1p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.zipf(D_arr_like_1p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.weibull(0.5)) # E: float -reveal_type(def_gen.weibull(0.5, size=None)) # E: float -reveal_type(def_gen.weibull(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.weibull(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.weibull(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.weibull(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.weibull(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.standard_t(0.5)) # E: float -reveal_type(def_gen.standard_t(0.5, size=None)) # E: float -reveal_type(def_gen.standard_t(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.standard_t(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.standard_t(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.standard_t(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.standard_t(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.poisson(0.5)) # E: int -reveal_type(def_gen.poisson(0.5, size=None)) # E: int -reveal_type(def_gen.poisson(0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.poisson(D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.poisson(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.poisson(D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.poisson(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.power(0.5)) # E: float -reveal_type(def_gen.power(0.5, size=None)) # E: float -reveal_type(def_gen.power(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.power(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.power(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.power(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.power(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.pareto(0.5)) # E: float -reveal_type(def_gen.pareto(0.5, size=None)) # E: float -reveal_type(def_gen.pareto(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.pareto(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.pareto(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.pareto(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.pareto(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.chisquare(0.5)) # E: float -reveal_type(def_gen.chisquare(0.5, size=None)) # E: float -reveal_type(def_gen.chisquare(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.chisquare(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.chisquare(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.chisquare(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.chisquare(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.exponential(0.5)) # E: float -reveal_type(def_gen.exponential(0.5, size=None)) # E: float -reveal_type(def_gen.exponential(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.exponential(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.exponential(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.exponential(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.exponential(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.geometric(0.5)) # E: int -reveal_type(def_gen.geometric(0.5, size=None)) # E: int -reveal_type(def_gen.geometric(0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.geometric(D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.geometric(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.geometric(D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.geometric(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.logseries(0.5)) # E: int -reveal_type(def_gen.logseries(0.5, size=None)) # E: int -reveal_type(def_gen.logseries(0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.logseries(D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.logseries(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.logseries(D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.logseries(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.rayleigh(0.5)) # E: float -reveal_type(def_gen.rayleigh(0.5, size=None)) # E: float -reveal_type(def_gen.rayleigh(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.rayleigh(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.rayleigh(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.rayleigh(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.rayleigh(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.standard_gamma(0.5)) # E: float -reveal_type(def_gen.standard_gamma(0.5, size=None)) # E: float -reveal_type(def_gen.standard_gamma(0.5, dtype="float32")) # E: float -reveal_type(def_gen.standard_gamma(0.5, size=None, dtype="float32")) # E: float -reveal_type(def_gen.standard_gamma(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4")) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(0.5, out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(def_gen.vonmises(0.5, 0.5)) # E: float -reveal_type(def_gen.vonmises(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.vonmises(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.wald(0.5, 0.5)) # E: float -reveal_type(def_gen.wald(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.wald(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.uniform(0.5, 0.5)) # E: float -reveal_type(def_gen.uniform(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.uniform(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.beta(0.5, 0.5)) # E: float -reveal_type(def_gen.beta(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.beta(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.f(0.5, 0.5)) # E: float -reveal_type(def_gen.f(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.f(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.gamma(0.5, 0.5)) # E: float -reveal_type(def_gen.gamma(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.gamma(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.gumbel(0.5, 0.5)) # E: float -reveal_type(def_gen.gumbel(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.gumbel(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.laplace(0.5, 0.5)) # E: float -reveal_type(def_gen.laplace(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.laplace(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.logistic(0.5, 0.5)) # E: float -reveal_type(def_gen.logistic(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.logistic(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.lognormal(0.5, 0.5)) # E: float -reveal_type(def_gen.lognormal(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.lognormal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.noncentral_chisquare(0.5, 0.5)) # E: float -reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.normal(0.5, 0.5)) # E: float -reveal_type(def_gen.normal(0.5, 0.5, size=None)) # E: float -reveal_type(def_gen.normal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.triangular(0.1, 0.5, 0.9)) # E: float -reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=None)) # E: float -reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9)) # E: float -reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float -reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.binomial(10, 0.5)) # E: int -reveal_type(def_gen.binomial(10, 0.5, size=None)) # E: int -reveal_type(def_gen.binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.negative_binomial(10, 0.5)) # E: int -reveal_type(def_gen.negative_binomial(10, 0.5, size=None)) # E: int -reveal_type(def_gen.negative_binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.hypergeometric(20, 20, 10)) # E: int -reveal_type(def_gen.hypergeometric(20, 20, 10, size=None)) # E: int -reveal_type(def_gen.hypergeometric(20, 20, 10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(I_arr_20, 20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(20, I_arr_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] +D_2D: npt.NDArray[np.float64] = np.array(D_2D_like) +S_out: npt.NDArray[np.float32] = np.empty(1, dtype=np.float32) +D_out: npt.NDArray[np.float64] = np.empty(1) + +assert_type(def_gen.standard_normal(), float) +assert_type(def_gen.standard_normal(dtype=np.float32), float) +assert_type(def_gen.standard_normal(dtype="float32"), float) +assert_type(def_gen.standard_normal(dtype="double"), float) +assert_type(def_gen.standard_normal(dtype=np.float64), float) +assert_type(def_gen.standard_normal(size=None), float) +assert_type(def_gen.standard_normal(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.random(), float) +assert_type(def_gen.random(dtype=np.float32), float) +assert_type(def_gen.random(dtype="float32"), float) +assert_type(def_gen.random(dtype="double"), float) +assert_type(def_gen.random(dtype=np.float64), float) +assert_type(def_gen.random(size=None), float) +assert_type(def_gen.random(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.random(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_cauchy(), float) +assert_type(def_gen.standard_cauchy(size=None), float) +assert_type(def_gen.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_exponential(), float) +assert_type(def_gen.standard_exponential(method="inv"), float) +assert_type(def_gen.standard_exponential(dtype=np.float32), float) +assert_type(def_gen.standard_exponential(dtype="float32"), float) +assert_type(def_gen.standard_exponential(dtype="double"), float) +assert_type(def_gen.standard_exponential(dtype=np.float64), float) +assert_type(def_gen.standard_exponential(size=None), float) +assert_type(def_gen.standard_exponential(size=None, method="inv"), float) +assert_type(def_gen.standard_exponential(size=1, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.zipf(1.5), int) +assert_type(def_gen.zipf(1.5, size=None), int) +assert_type(def_gen.zipf(1.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.weibull(0.5), float) +assert_type(def_gen.weibull(0.5, size=None), float) +assert_type(def_gen.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_t(0.5), float) +assert_type(def_gen.standard_t(0.5, size=None), float) +assert_type(def_gen.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.poisson(0.5), int) +assert_type(def_gen.poisson(0.5, size=None), int) +assert_type(def_gen.poisson(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.power(0.5), float) +assert_type(def_gen.power(0.5, size=None), float) +assert_type(def_gen.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.pareto(0.5), float) +assert_type(def_gen.pareto(0.5, size=None), float) +assert_type(def_gen.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.chisquare(0.5), float) +assert_type(def_gen.chisquare(0.5, size=None), float) +assert_type(def_gen.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.exponential(0.5), float) +assert_type(def_gen.exponential(0.5, size=None), float) +assert_type(def_gen.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.geometric(0.5), int) +assert_type(def_gen.geometric(0.5, size=None), int) +assert_type(def_gen.geometric(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.logseries(0.5), int) +assert_type(def_gen.logseries(0.5, size=None), int) +assert_type(def_gen.logseries(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.rayleigh(0.5), float) +assert_type(def_gen.rayleigh(0.5, size=None), float) +assert_type(def_gen.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_gamma(0.5), float) +assert_type(def_gen.standard_gamma(0.5, size=None), float) +assert_type(def_gen.standard_gamma(0.5, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=None, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(0.5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64), npt.NDArray[np.float64]) + +assert_type(def_gen.vonmises(0.5, 0.5), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=None), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.wald(0.5, 0.5), float) +assert_type(def_gen.wald(0.5, 0.5, size=None), float) +assert_type(def_gen.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.uniform(0.5, 0.5), float) +assert_type(def_gen.uniform(0.5, 0.5, size=None), float) +assert_type(def_gen.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.beta(0.5, 0.5), float) +assert_type(def_gen.beta(0.5, 0.5, size=None), float) +assert_type(def_gen.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.f(0.5, 0.5), float) +assert_type(def_gen.f(0.5, 0.5, size=None), float) +assert_type(def_gen.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gamma(0.5, 0.5), float) +assert_type(def_gen.gamma(0.5, 0.5, size=None), float) +assert_type(def_gen.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gumbel(0.5, 0.5), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.laplace(0.5, 0.5), float) +assert_type(def_gen.laplace(0.5, 0.5, size=None), float) +assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.logistic(0.5, 0.5), float) +assert_type(def_gen.logistic(0.5, 0.5, size=None), float) +assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.lognormal(0.5, 0.5), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=None), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_chisquare(0.5, 0.5), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.normal(0.5, 0.5), float) +assert_type(def_gen.normal(0.5, 0.5, size=None), float) +assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.triangular(0.1, 0.5, 0.9), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.binomial(10, 0.5), int) +assert_type(def_gen.binomial(10, 0.5, size=None), int) +assert_type(def_gen.binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.negative_binomial(10, 0.5), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=None), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.hypergeometric(20, 20, 10), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) -reveal_type(def_gen.integers(0, 100)) # E: int -reveal_type(def_gen.integers(100)) # E: int -reveal_type(def_gen.integers([100])) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(0, [100])) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] +assert_type(def_gen.integers(0, 100), int) +assert_type(def_gen.integers(100), int) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) -I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) +I_bool_low: npt.NDArray[np.bool_] = np.array([0], dtype=np.bool_) I_bool_low_like: list[int] = [0] -I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) -I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) - -reveal_type(def_gen.integers(2, dtype=bool)) # E: builtins.bool -reveal_type(def_gen.integers(0, 2, dtype=bool)) # E: builtins.bool -reveal_type(def_gen.integers(1, dtype=bool, endpoint=True)) # E: builtins.bool -reveal_type(def_gen.integers(0, 1, dtype=bool, endpoint=True)) # E: builtins.bool -reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(0, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_] - -reveal_type(def_gen.integers(2, dtype=np.bool_)) # E: builtins.bool -reveal_type(def_gen.integers(0, 2, dtype=np.bool_)) # E: builtins.bool -reveal_type(def_gen.integers(1, dtype=np.bool_, endpoint=True)) # E: builtins.bool -reveal_type(def_gen.integers(0, 1, dtype=np.bool_, endpoint=True)) # E: builtins.bool -reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_] -reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_] +I_bool_high_open: npt.NDArray[np.bool_] = np.array([1], dtype=np.bool_) +I_bool_high_closed: npt.NDArray[np.bool_] = np.array([1], dtype=np.bool_) + +assert_type(def_gen.integers(2, dtype=bool), bool) +assert_type(def_gen.integers(0, 2, dtype=bool), bool) +assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool_]) + +assert_type(def_gen.integers(2, dtype=np.bool_), bool) +assert_type(def_gen.integers(0, 2, dtype=np.bool_), bool) +assert_type(def_gen.integers(1, dtype=np.bool_, endpoint=True), bool) +assert_type(def_gen.integers(0, 1, dtype=np.bool_, endpoint=True), bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool_), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool_), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True), npt.NDArray[np.bool_]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True), npt.NDArray[np.bool_]) I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) I_u1_low_like: list[int] = [0] I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) -reveal_type(def_gen.integers(256, dtype="u1")) # E: int -reveal_type(def_gen.integers(0, 256, dtype="u1")) # E: int -reveal_type(def_gen.integers(255, dtype="u1", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 255, dtype="u1", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(0, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] - -reveal_type(def_gen.integers(256, dtype="uint8")) # E: int -reveal_type(def_gen.integers(0, 256, dtype="uint8")) # E: int -reveal_type(def_gen.integers(255, dtype="uint8", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(0, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] - -reveal_type(def_gen.integers(256, dtype=np.uint8)) # E: int -reveal_type(def_gen.integers(0, 256, dtype=np.uint8)) # E: int -reveal_type(def_gen.integers(255, dtype=np.uint8, endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] +assert_type(def_gen.integers(256, dtype="u1"), int) +assert_type(def_gen.integers(0, 256, dtype="u1"), int) +assert_type(def_gen.integers(255, dtype="u1", endpoint=True), int) +assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), int) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype="uint8"), int) +assert_type(def_gen.integers(0, 256, dtype="uint8"), int) +assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), int) +assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), int) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype=np.uint8), int) +assert_type(def_gen.integers(0, 256, dtype=np.uint8), int) +assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), int) +assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), int) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) I_u2_low_like: list[int] = [0] I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) -reveal_type(def_gen.integers(65536, dtype="u2")) # E: int -reveal_type(def_gen.integers(0, 65536, dtype="u2")) # E: int -reveal_type(def_gen.integers(65535, dtype="u2", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(0, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] - -reveal_type(def_gen.integers(65536, dtype="uint16")) # E: int -reveal_type(def_gen.integers(0, 65536, dtype="uint16")) # E: int -reveal_type(def_gen.integers(65535, dtype="uint16", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(0, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] - -reveal_type(def_gen.integers(65536, dtype=np.uint16)) # E: int -reveal_type(def_gen.integers(0, 65536, dtype=np.uint16)) # E: int -reveal_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] +assert_type(def_gen.integers(65536, dtype="u2"), int) +assert_type(def_gen.integers(0, 65536, dtype="u2"), int) +assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), int) +assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), int) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype="uint16"), int) +assert_type(def_gen.integers(0, 65536, dtype="uint16"), int) +assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), int) +assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), int) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype=np.uint16), int) +assert_type(def_gen.integers(0, 65536, dtype=np.uint16), int) +assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), int) +assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), int) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) I_u4_low_like: list[int] = [0] I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) -reveal_type(def_gen.integers(4294967296, dtype=np.int_)) # E: int -reveal_type(def_gen.integers(0, 4294967296, dtype=np.int_)) # E: int -reveal_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(def_gen.integers(I_u4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]] - - -reveal_type(def_gen.integers(4294967296, dtype="u4")) # E: int -reveal_type(def_gen.integers(0, 4294967296, dtype="u4")) # E: int -reveal_type(def_gen.integers(4294967295, dtype="u4", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] - -reveal_type(def_gen.integers(4294967296, dtype="uint32")) # E: int -reveal_type(def_gen.integers(0, 4294967296, dtype="uint32")) # E: int -reveal_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] - -reveal_type(def_gen.integers(4294967296, dtype=np.uint32)) # E: int -reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint32)) # E: int -reveal_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] - -reveal_type(def_gen.integers(4294967296, dtype=np.uint)) # E: int -reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint)) # E: int -reveal_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]] +assert_type(def_gen.integers(4294967296, dtype=np.int_), int) +assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), int) +assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), int) +assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), int) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) + + +assert_type(def_gen.integers(4294967296, dtype="u4"), int) +assert_type(def_gen.integers(0, 4294967296, dtype="u4"), int) +assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), int) +assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), int) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype="uint32"), int) +assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), int) +assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), int) +assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), int) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint32), int) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), int) +assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), int) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), int) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint), int) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), int) +assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), int) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), int) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) I_u8_low_like: list[int] = [0] I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) -reveal_type(def_gen.integers(18446744073709551616, dtype="u8")) # E: int -reveal_type(def_gen.integers(0, 18446744073709551616, dtype="u8")) # E: int -reveal_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(0, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] - -reveal_type(def_gen.integers(18446744073709551616, dtype="uint64")) # E: int -reveal_type(def_gen.integers(0, 18446744073709551616, dtype="uint64")) # E: int -reveal_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(0, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] - -reveal_type(def_gen.integers(18446744073709551616, dtype=np.uint64)) # E: int -reveal_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64)) # E: int -reveal_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int -reveal_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] +assert_type(def_gen.integers(18446744073709551616, dtype="u8"), int) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), int) +assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), int) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), int) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), int) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), int) +assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), int) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), int) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), int) +assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), int) +assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), int) +assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), int) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) I_i1_low_like: list[int] = [-128] I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) -reveal_type(def_gen.integers(128, dtype="i1")) # E: int -reveal_type(def_gen.integers(-128, 128, dtype="i1")) # E: int -reveal_type(def_gen.integers(127, dtype="i1", endpoint=True)) # E: int -reveal_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] - -reveal_type(def_gen.integers(128, dtype="int8")) # E: int -reveal_type(def_gen.integers(-128, 128, dtype="int8")) # E: int -reveal_type(def_gen.integers(127, dtype="int8", endpoint=True)) # E: int -reveal_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] - -reveal_type(def_gen.integers(128, dtype=np.int8)) # E: int -reveal_type(def_gen.integers(-128, 128, dtype=np.int8)) # E: int -reveal_type(def_gen.integers(127, dtype=np.int8, endpoint=True)) # E: int -reveal_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] - -I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +assert_type(def_gen.integers(128, dtype="i1"), int) +assert_type(def_gen.integers(-128, 128, dtype="i1"), int) +assert_type(def_gen.integers(127, dtype="i1", endpoint=True), int) +assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), int) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype="int8"), int) +assert_type(def_gen.integers(-128, 128, dtype="int8"), int) +assert_type(def_gen.integers(127, dtype="int8", endpoint=True), int) +assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), int) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype=np.int8), int) +assert_type(def_gen.integers(-128, 128, dtype=np.int8), int) +assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), int) +assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), int) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) + +I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) I_i2_low_like: list[int] = [-32768] -I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) -I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) - -reveal_type(def_gen.integers(32768, dtype="i2")) # E: int -reveal_type(def_gen.integers(-32768, 32768, dtype="i2")) # E: int -reveal_type(def_gen.integers(32767, dtype="i2", endpoint=True)) # E: int -reveal_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] - -reveal_type(def_gen.integers(32768, dtype="int16")) # E: int -reveal_type(def_gen.integers(-32768, 32768, dtype="int16")) # E: int -reveal_type(def_gen.integers(32767, dtype="int16", endpoint=True)) # E: int -reveal_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] - -reveal_type(def_gen.integers(32768, dtype=np.int16)) # E: int -reveal_type(def_gen.integers(-32768, 32768, dtype=np.int16)) # E: int -reveal_type(def_gen.integers(32767, dtype=np.int16, endpoint=True)) # E: int -reveal_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] +I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) +I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) + +assert_type(def_gen.integers(32768, dtype="i2"), int) +assert_type(def_gen.integers(-32768, 32768, dtype="i2"), int) +assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), int) +assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), int) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype="int16"), int) +assert_type(def_gen.integers(-32768, 32768, dtype="int16"), int) +assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), int) +assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), int) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype=np.int16), int) +assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), int) +assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), int) +assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), int) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) -reveal_type(def_gen.integers(2147483648, dtype="i4")) # E: int -reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="i4")) # E: int -reveal_type(def_gen.integers(2147483647, dtype="i4", endpoint=True)) # E: int -reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] - -reveal_type(def_gen.integers(2147483648, dtype="int32")) # E: int -reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="int32")) # E: int -reveal_type(def_gen.integers(2147483647, dtype="int32", endpoint=True)) # E: int -reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] - -reveal_type(def_gen.integers(2147483648, dtype=np.int32)) # E: int -reveal_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32)) # E: int -reveal_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True)) # E: int -reveal_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] +assert_type(def_gen.integers(2147483648, dtype="i4"), int) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), int) +assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), int) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), int) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype="int32"), int) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), int) +assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), int) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), int) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype=np.int32), int) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), int) +assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), int) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), int) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) -reveal_type(def_gen.integers(9223372036854775808, dtype="i8")) # E: int -reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int -reveal_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)) # E: int -reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.integers(9223372036854775808, dtype="int64")) # E: int -reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int -reveal_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)) # E: int -reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.integers(9223372036854775808, dtype=np.int64)) # E: int -reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int -reveal_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)) # E: int -reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: int -reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - - -reveal_type(def_gen.bit_generator) # E: BitGenerator - -reveal_type(def_gen.bytes(2)) # E: bytes - -reveal_type(def_gen.choice(5)) # E: int -reveal_type(def_gen.choice(5, 3)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.choice(5, 3, replace=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any -reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: ndarray[Any, Any] -reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: ndarray[Any, Any] -reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: ndarray[Any, Any] -reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: ndarray[Any, Any] - -reveal_type(def_gen.dirichlet([0.5, 0.5])) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multinomial(20, np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(def_gen.multivariate_normal([0.0], [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(def_gen.permutation(10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(def_gen.permutation([1, 2, 3, 4])) # E: ndarray[Any, Any] -reveal_type(def_gen.permutation(np.array([1, 2, 3, 4]))) # E: ndarray[Any, Any] -reveal_type(def_gen.permutation(D_2D, axis=1)) # E: ndarray[Any, Any] -reveal_type(def_gen.permuted(D_2D)) # E: ndarray[Any, Any] -reveal_type(def_gen.permuted(D_2D_like)) # E: ndarray[Any, Any] -reveal_type(def_gen.permuted(D_2D, axis=1)) # E: ndarray[Any, Any] -reveal_type(def_gen.permuted(D_2D, out=D_2D)) # E: ndarray[Any, Any] -reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: ndarray[Any, Any] -reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: ndarray[Any, Any] -reveal_type(def_gen.permuted(D_2D, axis=1, out=D_2D)) # E: ndarray[Any, Any] - -reveal_type(def_gen.shuffle(np.arange(10))) # E: None -reveal_type(def_gen.shuffle([1, 2, 3, 4, 5])) # E: None -reveal_type(def_gen.shuffle(D_2D, axis=1)) # E: None - -reveal_type(np.random.Generator(pcg64)) # E: Generator -reveal_type(def_gen.__str__()) # E: str -reveal_type(def_gen.__repr__()) # E: str +assert_type(def_gen.integers(9223372036854775808, dtype="i8"), int) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), int) +assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), int) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), int) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype="int64"), int) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), int) +assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), int) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), int) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), int) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), int) +assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), int) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), int) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) + + +assert_type(def_gen.bit_generator, np.random.BitGenerator) + +assert_type(def_gen.bytes(2), bytes) + +assert_type(def_gen.choice(5), int) +assert_type(def_gen.choice(5, 3), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) + +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), np.ndarray[Any, Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), np.ndarray[Any, Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), np.ndarray[Any, Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), np.ndarray[Any, Any]) + +assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count"), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals"), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) +assert_type(def_gen.permutation([1, 2, 3, 4]), np.ndarray[Any, Any]) +assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), np.ndarray[Any, Any]) +assert_type(def_gen.permutation(D_2D, axis=1), np.ndarray[Any, Any]) +assert_type(def_gen.permuted(D_2D), np.ndarray[Any, Any]) +assert_type(def_gen.permuted(D_2D_like), np.ndarray[Any, Any]) +assert_type(def_gen.permuted(D_2D, axis=1), np.ndarray[Any, Any]) +assert_type(def_gen.permuted(D_2D, out=D_2D), np.ndarray[Any, Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), np.ndarray[Any, Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), np.ndarray[Any, Any]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), np.ndarray[Any, Any]) + +assert_type(def_gen.shuffle(np.arange(10)), None) +assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) +assert_type(def_gen.shuffle(D_2D, axis=1), None) + +assert_type(np.random.Generator(pcg64), np.random.Generator) +assert_type(def_gen.__str__(), str) +assert_type(def_gen.__repr__(), str) def_gen_state = def_gen.__getstate__() -reveal_type(def_gen_state) # E: builtins.dict[builtins.str, Any] -reveal_type(def_gen.__setstate__(def_gen_state)) # E: None +assert_type(def_gen_state, dict[str, Any]) +assert_type(def_gen.__setstate__(def_gen_state), None) # RandomState random_st: np.random.RandomState = np.random.RandomState() -reveal_type(random_st.standard_normal()) # E: float -reveal_type(random_st.standard_normal(size=None)) # E: float -reveal_type(random_st.standard_normal(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(random_st.random()) # E: float -reveal_type(random_st.random(size=None)) # E: float -reveal_type(random_st.random(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(random_st.standard_cauchy()) # E: float -reveal_type(random_st.standard_cauchy(size=None)) # E: float -reveal_type(random_st.standard_cauchy(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.standard_exponential()) # E: float -reveal_type(random_st.standard_exponential(size=None)) # E: float -reveal_type(random_st.standard_exponential(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(random_st.zipf(1.5)) # E: int -reveal_type(random_st.zipf(1.5, size=None)) # E: int -reveal_type(random_st.zipf(1.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.zipf(D_arr_1p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.zipf(D_arr_like_1p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.weibull(0.5)) # E: float -reveal_type(random_st.weibull(0.5, size=None)) # E: float -reveal_type(random_st.weibull(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.weibull(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.weibull(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.weibull(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.weibull(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.standard_t(0.5)) # E: float -reveal_type(random_st.standard_t(0.5, size=None)) # E: float -reveal_type(random_st.standard_t(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.standard_t(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.standard_t(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.standard_t(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.standard_t(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.poisson(0.5)) # E: int -reveal_type(random_st.poisson(0.5, size=None)) # E: int -reveal_type(random_st.poisson(0.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.poisson(D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.poisson(D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.power(0.5)) # E: float -reveal_type(random_st.power(0.5, size=None)) # E: float -reveal_type(random_st.power(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.power(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.power(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.power(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.power(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.pareto(0.5)) # E: float -reveal_type(random_st.pareto(0.5, size=None)) # E: float -reveal_type(random_st.pareto(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.pareto(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.pareto(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.pareto(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.pareto(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.chisquare(0.5)) # E: float -reveal_type(random_st.chisquare(0.5, size=None)) # E: float -reveal_type(random_st.chisquare(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.chisquare(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.chisquare(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.chisquare(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.chisquare(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.exponential(0.5)) # E: float -reveal_type(random_st.exponential(0.5, size=None)) # E: float -reveal_type(random_st.exponential(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.exponential(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.exponential(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.exponential(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.exponential(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.geometric(0.5)) # E: int -reveal_type(random_st.geometric(0.5, size=None)) # E: int -reveal_type(random_st.geometric(0.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.geometric(D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.geometric(D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.logseries(0.5)) # E: int -reveal_type(random_st.logseries(0.5, size=None)) # E: int -reveal_type(random_st.logseries(0.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.logseries(D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.logseries(D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.rayleigh(0.5)) # E: float -reveal_type(random_st.rayleigh(0.5, size=None)) # E: float -reveal_type(random_st.rayleigh(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.rayleigh(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.rayleigh(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.rayleigh(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.rayleigh(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.standard_gamma(0.5)) # E: float -reveal_type(random_st.standard_gamma(0.5, size=None)) # E: float -reveal_type(random_st.standard_gamma(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(random_st.standard_gamma(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(random_st.standard_gamma(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(random_st.standard_gamma(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] -reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]] - -reveal_type(random_st.vonmises(0.5, 0.5)) # E: float -reveal_type(random_st.vonmises(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.vonmises(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.wald(0.5, 0.5)) # E: float -reveal_type(random_st.wald(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.wald(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.uniform(0.5, 0.5)) # E: float -reveal_type(random_st.uniform(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.uniform(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.beta(0.5, 0.5)) # E: float -reveal_type(random_st.beta(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.beta(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.f(0.5, 0.5)) # E: float -reveal_type(random_st.f(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.f(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.gamma(0.5, 0.5)) # E: float -reveal_type(random_st.gamma(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.gamma(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.gumbel(0.5, 0.5)) # E: float -reveal_type(random_st.gumbel(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.gumbel(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.laplace(0.5, 0.5)) # E: float -reveal_type(random_st.laplace(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.laplace(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.logistic(0.5, 0.5)) # E: float -reveal_type(random_st.logistic(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.logistic(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.lognormal(0.5, 0.5)) # E: float -reveal_type(random_st.lognormal(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.lognormal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.noncentral_chisquare(0.5, 0.5)) # E: float -reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.normal(0.5, 0.5)) # E: float -reveal_type(random_st.normal(0.5, 0.5, size=None)) # E: float -reveal_type(random_st.normal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.triangular(0.1, 0.5, 0.9)) # E: float -reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=None)) # E: float -reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9)) # E: float -reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float -reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.binomial(10, 0.5)) # E: int -reveal_type(random_st.binomial(10, 0.5, size=None)) # E: int -reveal_type(random_st.binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.negative_binomial(10, 0.5)) # E: int -reveal_type(random_st.negative_binomial(10, 0.5, size=None)) # E: int -reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.hypergeometric(20, 20, 10)) # E: int -reveal_type(random_st.hypergeometric(20, 20, 10, size=None)) # E: int -reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.randint(0, 100)) # E: int -reveal_type(random_st.randint(100)) # E: int -reveal_type(random_st.randint([100])) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.randint(0, [100])) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.randint(2, dtype=bool)) # E: builtins.bool -reveal_type(random_st.randint(0, 2, dtype=bool)) # E: builtins.bool -reveal_type(random_st.randint(I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_] -reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_] -reveal_type(random_st.randint(0, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_] - -reveal_type(random_st.randint(2, dtype=np.bool_)) # E: builtins.bool -reveal_type(random_st.randint(0, 2, dtype=np.bool_)) # E: builtins.bool -reveal_type(random_st.randint(I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_] -reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_] -reveal_type(random_st.randint(0, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_] - -reveal_type(random_st.randint(256, dtype="u1")) # E: int -reveal_type(random_st.randint(0, 256, dtype="u1")) # E: int -reveal_type(random_st.randint(I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(random_st.randint(0, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] - -reveal_type(random_st.randint(256, dtype="uint8")) # E: int -reveal_type(random_st.randint(0, 256, dtype="uint8")) # E: int -reveal_type(random_st.randint(I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(random_st.randint(0, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] - -reveal_type(random_st.randint(256, dtype=np.uint8)) # E: int -reveal_type(random_st.randint(0, 256, dtype=np.uint8)) # E: int -reveal_type(random_st.randint(I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] -reveal_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]] - -reveal_type(random_st.randint(65536, dtype="u2")) # E: int -reveal_type(random_st.randint(0, 65536, dtype="u2")) # E: int -reveal_type(random_st.randint(I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(random_st.randint(0, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] - -reveal_type(random_st.randint(65536, dtype="uint16")) # E: int -reveal_type(random_st.randint(0, 65536, dtype="uint16")) # E: int -reveal_type(random_st.randint(I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(random_st.randint(0, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] - -reveal_type(random_st.randint(65536, dtype=np.uint16)) # E: int -reveal_type(random_st.randint(0, 65536, dtype=np.uint16)) # E: int -reveal_type(random_st.randint(I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] -reveal_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]] - -reveal_type(random_st.randint(4294967296, dtype="u4")) # E: int -reveal_type(random_st.randint(0, 4294967296, dtype="u4")) # E: int -reveal_type(random_st.randint(I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(random_st.randint(0, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] - -reveal_type(random_st.randint(4294967296, dtype="uint32")) # E: int -reveal_type(random_st.randint(0, 4294967296, dtype="uint32")) # E: int -reveal_type(random_st.randint(I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(random_st.randint(0, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] - -reveal_type(random_st.randint(4294967296, dtype=np.uint32)) # E: int -reveal_type(random_st.randint(0, 4294967296, dtype=np.uint32)) # E: int -reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] -reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]] - -reveal_type(random_st.randint(4294967296, dtype=np.uint)) # E: int -reveal_type(random_st.randint(0, 4294967296, dtype=np.uint)) # E: int -reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]] -reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]] - -reveal_type(random_st.randint(18446744073709551616, dtype="u8")) # E: int -reveal_type(random_st.randint(0, 18446744073709551616, dtype="u8")) # E: int -reveal_type(random_st.randint(I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(random_st.randint(0, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] - -reveal_type(random_st.randint(18446744073709551616, dtype="uint64")) # E: int -reveal_type(random_st.randint(0, 18446744073709551616, dtype="uint64")) # E: int -reveal_type(random_st.randint(I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(random_st.randint(0, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] - -reveal_type(random_st.randint(18446744073709551616, dtype=np.uint64)) # E: int -reveal_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64)) # E: int -reveal_type(random_st.randint(I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] -reveal_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]] - -reveal_type(random_st.randint(128, dtype="i1")) # E: int -reveal_type(random_st.randint(-128, 128, dtype="i1")) # E: int -reveal_type(random_st.randint(I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(random_st.randint(-128, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] - -reveal_type(random_st.randint(128, dtype="int8")) # E: int -reveal_type(random_st.randint(-128, 128, dtype="int8")) # E: int -reveal_type(random_st.randint(I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(random_st.randint(-128, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] - -reveal_type(random_st.randint(128, dtype=np.int8)) # E: int -reveal_type(random_st.randint(-128, 128, dtype=np.int8)) # E: int -reveal_type(random_st.randint(I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] -reveal_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]] - -reveal_type(random_st.randint(32768, dtype="i2")) # E: int -reveal_type(random_st.randint(-32768, 32768, dtype="i2")) # E: int -reveal_type(random_st.randint(I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(32768, dtype="int16")) # E: int -reveal_type(random_st.randint(-32768, 32768, dtype="int16")) # E: int -reveal_type(random_st.randint(I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(32768, dtype=np.int16)) # E: int -reveal_type(random_st.randint(-32768, 32768, dtype=np.int16)) # E: int -reveal_type(random_st.randint(I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] -reveal_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]] - -reveal_type(random_st.randint(2147483648, dtype="i4")) # E: int -reveal_type(random_st.randint(-2147483648, 2147483648, dtype="i4")) # E: int -reveal_type(random_st.randint(I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] - -reveal_type(random_st.randint(2147483648, dtype="int32")) # E: int -reveal_type(random_st.randint(-2147483648, 2147483648, dtype="int32")) # E: int -reveal_type(random_st.randint(I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] - -reveal_type(random_st.randint(2147483648, dtype=np.int32)) # E: int -reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32)) # E: int -reveal_type(random_st.randint(I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] -reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]] - -reveal_type(random_st.randint(2147483648, dtype=np.int_)) # E: int -reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_)) # E: int -reveal_type(random_st.randint(I_i4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.randint(9223372036854775808, dtype="i8")) # E: int -reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int -reveal_type(random_st.randint(I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(random_st.randint(9223372036854775808, dtype="int64")) # E: int -reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int -reveal_type(random_st.randint(I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(random_st.randint(9223372036854775808, dtype=np.int64)) # E: int -reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int -reveal_type(random_st.randint(I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] -reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]] - -reveal_type(random_st._bit_generator) # E: BitGenerator - -reveal_type(random_st.bytes(2)) # E: bytes - -reveal_type(random_st.choice(5)) # E: int -reveal_type(random_st.choice(5, 3)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.choice(5, 3, replace=True)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any -reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: ndarray[Any, Any] -reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: ndarray[Any, Any] -reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: ndarray[Any, Any] -reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: ndarray[Any, Any] - -reveal_type(random_st.dirichlet([0.5, 0.5])) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.dirichlet(np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3)) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(random_st.multivariate_normal([0.0], [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.permutation(10)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.permutation([1, 2, 3, 4])) # E: ndarray[Any, Any] -reveal_type(random_st.permutation(np.array([1, 2, 3, 4]))) # E: ndarray[Any, Any] -reveal_type(random_st.permutation(D_2D)) # E: ndarray[Any, Any] - -reveal_type(random_st.shuffle(np.arange(10))) # E: None -reveal_type(random_st.shuffle([1, 2, 3, 4, 5])) # E: None -reveal_type(random_st.shuffle(D_2D)) # E: None - -reveal_type(np.random.RandomState(pcg64)) # E: RandomState -reveal_type(np.random.RandomState(0)) # E: RandomState -reveal_type(np.random.RandomState([0, 1, 2])) # E: RandomState -reveal_type(random_st.__str__()) # E: str -reveal_type(random_st.__repr__()) # E: str +assert_type(random_st.standard_normal(), float) +assert_type(random_st.standard_normal(size=None), float) +assert_type(random_st.standard_normal(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.random(), float) +assert_type(random_st.random(size=None), float) +assert_type(random_st.random(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_cauchy(), float) +assert_type(random_st.standard_cauchy(size=None), float) +assert_type(random_st.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_exponential(), float) +assert_type(random_st.standard_exponential(size=None), float) +assert_type(random_st.standard_exponential(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.zipf(1.5), int) +assert_type(random_st.zipf(1.5, size=None), int) +assert_type(random_st.zipf(1.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.int_]) +assert_type(random_st.zipf(D_arr_1p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.int_]) +assert_type(random_st.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.int_]) + +assert_type(random_st.weibull(0.5), float) +assert_type(random_st.weibull(0.5, size=None), float) +assert_type(random_st.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_t(0.5), float) +assert_type(random_st.standard_t(0.5, size=None), float) +assert_type(random_st.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.poisson(0.5), int) +assert_type(random_st.poisson(0.5, size=None), int) +assert_type(random_st.poisson(0.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.int_]) +assert_type(random_st.poisson(D_arr_0p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.int_]) +assert_type(random_st.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.int_]) + +assert_type(random_st.power(0.5), float) +assert_type(random_st.power(0.5, size=None), float) +assert_type(random_st.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.pareto(0.5), float) +assert_type(random_st.pareto(0.5, size=None), float) +assert_type(random_st.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.chisquare(0.5), float) +assert_type(random_st.chisquare(0.5, size=None), float) +assert_type(random_st.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.exponential(0.5), float) +assert_type(random_st.exponential(0.5, size=None), float) +assert_type(random_st.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.geometric(0.5), int) +assert_type(random_st.geometric(0.5, size=None), int) +assert_type(random_st.geometric(0.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.int_]) +assert_type(random_st.geometric(D_arr_0p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.int_]) +assert_type(random_st.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.int_]) + +assert_type(random_st.logseries(0.5), int) +assert_type(random_st.logseries(0.5, size=None), int) +assert_type(random_st.logseries(0.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.int_]) +assert_type(random_st.logseries(D_arr_0p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.int_]) +assert_type(random_st.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.int_]) + +assert_type(random_st.rayleigh(0.5), float) +assert_type(random_st.rayleigh(0.5, size=None), float) +assert_type(random_st.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_gamma(0.5), float) +assert_type(random_st.standard_gamma(0.5, size=None), float) +assert_type(random_st.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.vonmises(0.5, 0.5), float) +assert_type(random_st.vonmises(0.5, 0.5, size=None), float) +assert_type(random_st.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.wald(0.5, 0.5), float) +assert_type(random_st.wald(0.5, 0.5, size=None), float) +assert_type(random_st.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.uniform(0.5, 0.5), float) +assert_type(random_st.uniform(0.5, 0.5, size=None), float) +assert_type(random_st.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.beta(0.5, 0.5), float) +assert_type(random_st.beta(0.5, 0.5, size=None), float) +assert_type(random_st.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.f(0.5, 0.5), float) +assert_type(random_st.f(0.5, 0.5, size=None), float) +assert_type(random_st.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gamma(0.5, 0.5), float) +assert_type(random_st.gamma(0.5, 0.5, size=None), float) +assert_type(random_st.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gumbel(0.5, 0.5), float) +assert_type(random_st.gumbel(0.5, 0.5, size=None), float) +assert_type(random_st.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.laplace(0.5, 0.5), float) +assert_type(random_st.laplace(0.5, 0.5, size=None), float) +assert_type(random_st.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.logistic(0.5, 0.5), float) +assert_type(random_st.logistic(0.5, 0.5, size=None), float) +assert_type(random_st.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.lognormal(0.5, 0.5), float) +assert_type(random_st.lognormal(0.5, 0.5, size=None), float) +assert_type(random_st.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_chisquare(0.5, 0.5), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.normal(0.5, 0.5), float) +assert_type(random_st.normal(0.5, 0.5, size=None), float) +assert_type(random_st.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.triangular(0.1, 0.5, 0.9), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.binomial(10, 0.5), int) +assert_type(random_st.binomial(10, 0.5, size=None), int) +assert_type(random_st.binomial(10, 0.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.int_]) +assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.int_]) +assert_type(random_st.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.int_]) +assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.int_]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int_]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int_]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int_]) + +assert_type(random_st.negative_binomial(10, 0.5), int) +assert_type(random_st.negative_binomial(10, 0.5, size=None), int) +assert_type(random_st.negative_binomial(10, 0.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int_]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int_]) + +assert_type(random_st.hypergeometric(20, 20, 10), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=None), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int_]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int_]) + +assert_type(random_st.randint(0, 100), int) +assert_type(random_st.randint(100), int) +assert_type(random_st.randint([100]), npt.NDArray[np.int_]) +assert_type(random_st.randint(0, [100]), npt.NDArray[np.int_]) + +assert_type(random_st.randint(2, dtype=bool), bool) +assert_type(random_st.randint(0, 2, dtype=bool), bool) +assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool_]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool_]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool_]) + +assert_type(random_st.randint(2, dtype=np.bool_), bool) +assert_type(random_st.randint(0, 2, dtype=np.bool_), bool) +assert_type(random_st.randint(I_bool_high_open, dtype=np.bool_), npt.NDArray[np.bool_]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_), npt.NDArray[np.bool_]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool_), npt.NDArray[np.bool_]) + +assert_type(random_st.randint(256, dtype="u1"), int) +assert_type(random_st.randint(0, 256, dtype="u1"), int) +assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype="uint8"), int) +assert_type(random_st.randint(0, 256, dtype="uint8"), int) +assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype=np.uint8), int) +assert_type(random_st.randint(0, 256, dtype=np.uint8), int) +assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(65536, dtype="u2"), int) +assert_type(random_st.randint(0, 65536, dtype="u2"), int) +assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype="uint16"), int) +assert_type(random_st.randint(0, 65536, dtype="uint16"), int) +assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype=np.uint16), int) +assert_type(random_st.randint(0, 65536, dtype=np.uint16), int) +assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(4294967296, dtype="u4"), int) +assert_type(random_st.randint(0, 4294967296, dtype="u4"), int) +assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype="uint32"), int) +assert_type(random_st.randint(0, 4294967296, dtype="uint32"), int) +assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint32), int) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), int) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint), int) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint), int) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) + +assert_type(random_st.randint(18446744073709551616, dtype="u8"), int) +assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), int) +assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype="uint64"), int) +assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), int) +assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), int) +assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), int) +assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(128, dtype="i1"), int) +assert_type(random_st.randint(-128, 128, dtype="i1"), int) +assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype="int8"), int) +assert_type(random_st.randint(-128, 128, dtype="int8"), int) +assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype=np.int8), int) +assert_type(random_st.randint(-128, 128, dtype=np.int8), int) +assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) + +assert_type(random_st.randint(32768, dtype="i2"), int) +assert_type(random_st.randint(-32768, 32768, dtype="i2"), int) +assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(32768, dtype="int16"), int) +assert_type(random_st.randint(-32768, 32768, dtype="int16"), int) +assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(32768, dtype=np.int16), int) +assert_type(random_st.randint(-32768, 32768, dtype=np.int16), int) +assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) + +assert_type(random_st.randint(2147483648, dtype="i4"), int) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), int) +assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype="int32"), int) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), int) +assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int32), int) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), int) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int_), int) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), int) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) + +assert_type(random_st.randint(9223372036854775808, dtype="i8"), int) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), int) +assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype="int64"), int) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), int) +assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype=np.int64), int) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), int) +assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(random_st._bit_generator, np.random.BitGenerator) + +assert_type(random_st.bytes(2), bytes) + +assert_type(random_st.choice(5), int) +assert_type(random_st.choice(5, 3), npt.NDArray[np.int_]) +assert_type(random_st.choice(5, 3, replace=True), npt.NDArray[np.int_]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int_]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int_]) + +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), np.ndarray[Any, Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), np.ndarray[Any, Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), np.ndarray[Any, Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), np.ndarray[Any, Any]) + +assert_type(random_st.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(random_st.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.int_]) +assert_type(random_st.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.int_]) +assert_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.int_]) + +assert_type(random_st.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(random_st.permutation(10), npt.NDArray[np.int_]) +assert_type(random_st.permutation([1, 2, 3, 4]), np.ndarray[Any, Any]) +assert_type(random_st.permutation(np.array([1, 2, 3, 4])), np.ndarray[Any, Any]) +assert_type(random_st.permutation(D_2D), np.ndarray[Any, Any]) + +assert_type(random_st.shuffle(np.arange(10)), None) +assert_type(random_st.shuffle([1, 2, 3, 4, 5]), None) +assert_type(random_st.shuffle(D_2D), None) + +assert_type(np.random.RandomState(pcg64), np.random.RandomState) +assert_type(np.random.RandomState(0), np.random.RandomState) +assert_type(np.random.RandomState([0, 1, 2]), np.random.RandomState) +assert_type(random_st.__str__(), str) +assert_type(random_st.__repr__(), str) random_st_state = random_st.__getstate__() -reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] -reveal_type(random_st.__setstate__(random_st_state)) # E: None -reveal_type(random_st.seed()) # E: None -reveal_type(random_st.seed(1)) # E: None -reveal_type(random_st.seed([0, 1])) # E: None +assert_type(random_st_state, dict[str, Any]) +assert_type(random_st.__setstate__(random_st_state), None) +assert_type(random_st.seed(), None) +assert_type(random_st.seed(1), None) +assert_type(random_st.seed([0, 1]), None) random_st_get_state = random_st.get_state() -reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] +assert_type(random_st_state, dict[str, Any]) random_st_get_state_legacy = random_st.get_state(legacy=True) -reveal_type(random_st_get_state_legacy) # E: Union[builtins.dict[builtins.str, Any], tuple[builtins.str, ndarray[Any, dtype[unsignedinteger[typing._32Bit]]], builtins.int, builtins.int, builtins.float]] -reveal_type(random_st.set_state(random_st_get_state)) # E: None - -reveal_type(random_st.rand()) # E: float -reveal_type(random_st.rand(1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.rand(1, 2)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.randn()) # E: float -reveal_type(random_st.randn(1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.randn(1, 2)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.random_sample()) # E: float -reveal_type(random_st.random_sample(1)) # E: ndarray[Any, dtype[floating[typing._64Bit]] -reveal_type(random_st.random_sample(size=(1, 2))) # E: ndarray[Any, dtype[floating[typing._64Bit]] - -reveal_type(random_st.tomaxint()) # E: int -reveal_type(random_st.tomaxint(1)) # E: ndarray[Any, dtype[{int_}]] -reveal_type(random_st.tomaxint((1,))) # E: ndarray[Any, dtype[{int_}]] - -reveal_type(np.random.set_bit_generator(pcg64)) # E: None -reveal_type(np.random.get_bit_generator()) # E: BitGenerator +assert_type(random_st_get_state_legacy, dict[str, Any] | tuple[str, npt.NDArray[np.uint32], int, int, float]) +assert_type(random_st.set_state(random_st_get_state), None) + +assert_type(random_st.rand(), float) +assert_type(random_st.rand(1), npt.NDArray[np.float64]) +assert_type(random_st.rand(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.randn(), float) +assert_type(random_st.randn(1), npt.NDArray[np.float64]) +assert_type(random_st.randn(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(), float) +assert_type(random_st.random_sample(1), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(size=(1, 2)), npt.NDArray[np.float64]) + +assert_type(random_st.tomaxint(), int) +assert_type(random_st.tomaxint(1), npt.NDArray[np.int_]) +assert_type(random_st.tomaxint((1,)), npt.NDArray[np.int_]) + +assert_type(np.random.set_bit_generator(pcg64), None) +assert_type(np.random.get_bit_generator(), np.random.BitGenerator) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index 8ea4a6ee8d9c..37408d839f51 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,9 +1,15 @@ import io +import sys from typing import Any import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_i8: npt.NDArray[np.int64] REC_AR_V: np.recarray[Any, np.dtype[np.record]] AR_LIST: list[npt.NDArray[np.int64]] @@ -12,117 +18,150 @@ format_parser: np.format_parser record: np.record file_obj: io.BufferedIOBase -reveal_type(np.format_parser( # E: format_parser +assert_type(np.format_parser( formats=[np.float64, np.int64, np.bool_], names=["f8", "i8", "?"], titles=None, aligned=True, -)) -reveal_type(format_parser.dtype) # E: dtype[void] - -reveal_type(record.field_a) # E: Any -reveal_type(record.field_b) # E: Any -reveal_type(record["field_a"]) # E: Any -reveal_type(record["field_b"]) # E: Any -reveal_type(record.pprint()) # E: str +), np.format_parser) +assert_type(format_parser.dtype, np.dtype[np.void]) + +assert_type(record.field_a, Any) +assert_type(record.field_b, Any) +assert_type(record["field_a"], Any) +assert_type(record["field_b"], Any) +assert_type(record.pprint(), str) record.field_c = 5 -reveal_type(REC_AR_V.field(0)) # E: Any -reveal_type(REC_AR_V.field("field_a")) # E: Any -reveal_type(REC_AR_V.field(0, AR_i8)) # E: None -reveal_type(REC_AR_V.field("field_a", AR_i8)) # E: None -reveal_type(REC_AR_V["field_a"]) # E: Any -reveal_type(REC_AR_V.field_a) # E: Any -reveal_type(REC_AR_V.__array_finalize__(object())) # E: None +assert_type(REC_AR_V.field(0), Any) +assert_type(REC_AR_V.field("field_a"), Any) +assert_type(REC_AR_V.field(0, AR_i8), None) +assert_type(REC_AR_V.field("field_a", AR_i8), None) +assert_type(REC_AR_V["field_a"], npt.NDArray[Any]) +assert_type(REC_AR_V.field_a, Any) +assert_type(REC_AR_V.__array_finalize__(object()), None) -reveal_type(np.recarray( # recarray[Any, dtype[record]] - shape=(10, 5), - formats=[np.float64, np.int64, np.bool_], - order="K", - byteorder="|", -)) -reveal_type(np.recarray( # recarray[Any, dtype[Any]] - shape=(10, 5), - dtype=[("f8", np.float64), ("i8", np.int64)], - strides=(5, 5), -)) - -reveal_type(np.rec.fromarrays( # recarray[Any, dtype[record]] - AR_LIST, -)) -reveal_type(np.rec.fromarrays( # recarray[Any, dtype[Any]] - AR_LIST, - dtype=np.int64, -)) -reveal_type(np.rec.fromarrays( # recarray[Any, dtype[Any]] - AR_LIST, - formats=[np.int64, np.float64], - names=["i8", "f8"] -)) - -reveal_type(np.rec.fromrecords( # recarray[Any, dtype[record]] - (1, 1.5), -)) -reveal_type(np.rec.fromrecords( # recarray[Any, dtype[record]] - [(1, 1.5)], - dtype=[("i8", np.int64), ("f8", np.float64)], -)) -reveal_type(np.rec.fromrecords( # recarray[Any, dtype[record]] - REC_AR_V, - formats=[np.int64, np.float64], - names=["i8", "f8"] -)) - -reveal_type(np.rec.fromstring( # recarray[Any, dtype[record]] - b"(1, 1.5)", - dtype=[("i8", np.int64), ("f8", np.float64)], -)) -reveal_type(np.rec.fromstring( # recarray[Any, dtype[record]] - REC_AR_V, - formats=[np.int64, np.float64], - names=["i8", "f8"] -)) - -reveal_type(np.rec.fromfile( # recarray[Any, dtype[Any]] +assert_type( + np.recarray( + shape=(10, 5), + formats=[np.float64, np.int64, np.bool_], + order="K", + byteorder="|", + ), + np.recarray[Any, np.dtype[np.record]], +) + +assert_type( + np.recarray( + shape=(10, 5), + dtype=[("f8", np.float64), ("i8", np.int64)], + strides=(5, 5), + ), + np.recarray[Any, np.dtype[Any]], +) + +assert_type(np.rec.fromarrays(AR_LIST), np.recarray[Any, np.dtype[Any]]) +assert_type( + np.rec.fromarrays(AR_LIST, dtype=np.int64), + np.recarray[Any, np.dtype[Any]], +) +assert_type( + np.rec.fromarrays( + AR_LIST, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + np.recarray[Any, np.dtype[np.record]], +) + +assert_type(np.rec.fromrecords((1, 1.5)), np.recarray[Any, np.dtype[np.record]]) +assert_type( + np.rec.fromrecords( + [(1, 1.5)], + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray[Any, np.dtype[np.record]], +) +assert_type( + np.rec.fromrecords( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + np.recarray[Any, np.dtype[np.record]], +) + +assert_type( + np.rec.fromstring( + b"(1, 1.5)", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray[Any, np.dtype[np.record]], +) +assert_type( + np.rec.fromstring( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + np.recarray[Any, np.dtype[np.record]], +) + +assert_type(np.rec.fromfile( "test_file.txt", dtype=[("i8", np.int64), ("f8", np.float64)], -)) -reveal_type(np.rec.fromfile( # recarray[Any, dtype[record]] - file_obj, - formats=[np.int64, np.float64], - names=["i8", "f8"] -)) - -reveal_type(np.rec.array( # recarray[Any, dtype[{int64}]] - AR_i8, -)) -reveal_type(np.rec.array( # recarray[Any, dtype[Any]] - [(1, 1.5)], - dtype=[("i8", np.int64), ("f8", np.float64)], -)) -reveal_type(np.rec.array( # recarray[Any, dtype[record]] - [(1, 1.5)], - formats=[np.int64, np.float64], - names=["i8", "f8"] -)) - -reveal_type(np.rec.array( # recarray[Any, dtype[Any]] - None, - dtype=np.float64, - shape=(10, 3), -)) -reveal_type(np.rec.array( # recarray[Any, dtype[Any]] - None, - formats=[np.int64, np.float64], - names=["i8", "f8"], - shape=(10, 3), -)) -reveal_type(np.rec.array( # recarray[Any, dtype[Any]] - file_obj, - dtype=np.float64, -)) -reveal_type(np.rec.array( # recarray[Any, dtype[Any]] - file_obj, - formats=[np.int64, np.float64], - names=["i8", "f8"], -)) +), np.recarray[Any, np.dtype[Any]]) + +assert_type( + np.rec.fromfile( + file_obj, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + np.recarray[Any, np.dtype[np.record]], +) + +assert_type(np.rec.array(AR_i8), np.recarray[Any, np.dtype[np.int64]]) + +assert_type( + np.rec.array([(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)]), + np.recarray[Any, np.dtype[Any]], +) + +assert_type( + np.rec.array( + [(1, 1.5)], + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + np.recarray[Any, np.dtype[np.record]], +) + +assert_type( + np.rec.array( + None, + dtype=np.float64, + shape=(10, 3), + ), + np.recarray[Any, np.dtype[Any]], +) + +assert_type( + np.rec.array( + None, + formats=[np.int64, np.float64], + names=["i8", "f8"], + shape=(10, 3), + ), + np.recarray[Any, np.dtype[np.record]], +) + +assert_type( + np.rec.array(file_obj, dtype=np.float64), + np.recarray[Any, np.dtype[Any]], +) + +assert_type( + np.rec.array(file_obj, formats=[np.int64, np.float64], names=["i8", "f8"]), + np.recarray[Any, np.dtype[np.record]], +) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 88404d9d0215..6b134f7432f4 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,4 +1,13 @@ +import sys +from typing import Any, Literal + import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type b: np.bool_ u8: np.uint64 @@ -11,146 +20,143 @@ U: np.str_ S: np.bytes_ V: np.void -reveal_type(c8.real) # E: {float32} -reveal_type(c8.imag) # E: {float32} +assert_type(c8.real, np.float32) +assert_type(c8.imag, np.float32) -reveal_type(c8.real.real) # E: {float32} -reveal_type(c8.real.imag) # E: {float32} +assert_type(c8.real.real, np.float32) +assert_type(c8.real.imag, np.float32) -reveal_type(c8.itemsize) # E: int -reveal_type(c8.shape) # E: tuple[] -reveal_type(c8.strides) # E: tuple[] +assert_type(c8.itemsize, int) +assert_type(c8.shape, tuple[()]) +assert_type(c8.strides, tuple[()]) -reveal_type(c8.ndim) # E: Literal[0] -reveal_type(c8.size) # E: Literal[1] +assert_type(c8.ndim, Literal[0]) +assert_type(c8.size, Literal[1]) -reveal_type(c8.squeeze()) # E: {complex64} -reveal_type(c8.byteswap()) # E: {complex64} -reveal_type(c8.transpose()) # E: {complex64} +assert_type(c8.squeeze(), np.complex64) +assert_type(c8.byteswap(), np.complex64) +assert_type(c8.transpose(), np.complex64) -reveal_type(c8.dtype) # E: dtype[{complex64}] +assert_type(c8.dtype, np.dtype[np.complex64]) -reveal_type(c8.real) # E: {float32} -reveal_type(c16.imag) # E: {float64} +assert_type(c8.real, np.float32) +assert_type(c16.imag, np.float64) -reveal_type(np.unicode_('foo')) # E: str_ +assert_type(np.str_('foo'), np.str_) -reveal_type(V[0]) # E: Any -reveal_type(V["field1"]) # E: Any -reveal_type(V[["field1", "field2"]]) # E: void +assert_type(V[0], Any) +assert_type(V["field1"], Any) +assert_type(V[["field1", "field2"]], np.void) V[0] = 5 # Aliases -reveal_type(np.unicode_()) # E: str_ -reveal_type(np.string_()) # E: bytes_ - -reveal_type(np.byte()) # E: {byte} -reveal_type(np.short()) # E: {short} -reveal_type(np.intc()) # E: {intc} -reveal_type(np.intp()) # E: {intp} -reveal_type(np.int_()) # E: {int_} -reveal_type(np.longlong()) # E: {longlong} - -reveal_type(np.ubyte()) # E: {ubyte} -reveal_type(np.ushort()) # E: {ushort} -reveal_type(np.uintc()) # E: {uintc} -reveal_type(np.uintp()) # E: {uintp} -reveal_type(np.uint()) # E: {uint} -reveal_type(np.ulonglong()) # E: {ulonglong} - -reveal_type(np.half()) # E: {half} -reveal_type(np.single()) # E: {single} -reveal_type(np.double()) # E: {double} -reveal_type(np.float_()) # E: {double} -reveal_type(np.longdouble()) # E: {longdouble} -reveal_type(np.longfloat()) # E: {longdouble} - -reveal_type(np.csingle()) # E: {csingle} -reveal_type(np.singlecomplex()) # E: {csingle} -reveal_type(np.cdouble()) # E: {cdouble} -reveal_type(np.complex_()) # E: {cdouble} -reveal_type(np.cfloat()) # E: {cdouble} -reveal_type(np.clongdouble()) # E: {clongdouble} -reveal_type(np.clongfloat()) # E: {clongdouble} -reveal_type(np.longcomplex()) # E: {clongdouble} - -reveal_type(b.item()) # E: bool -reveal_type(i8.item()) # E: int -reveal_type(u8.item()) # E: int -reveal_type(f8.item()) # E: float -reveal_type(c16.item()) # E: complex -reveal_type(U.item()) # E: str -reveal_type(S.item()) # E: bytes - -reveal_type(b.tolist()) # E: bool -reveal_type(i8.tolist()) # E: int -reveal_type(u8.tolist()) # E: int -reveal_type(f8.tolist()) # E: float -reveal_type(c16.tolist()) # E: complex -reveal_type(U.tolist()) # E: str -reveal_type(S.tolist()) # E: bytes - -reveal_type(b.ravel()) # E: ndarray[Any, dtype[bool_]] -reveal_type(i8.ravel()) # E: ndarray[Any, dtype[{int64}]] -reveal_type(u8.ravel()) # E: ndarray[Any, dtype[{uint64}]] -reveal_type(f8.ravel()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(c16.ravel()) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(U.ravel()) # E: ndarray[Any, dtype[str_]] -reveal_type(S.ravel()) # E: ndarray[Any, dtype[bytes_]] - -reveal_type(b.flatten()) # E: ndarray[Any, dtype[bool_]] -reveal_type(i8.flatten()) # E: ndarray[Any, dtype[{int64}]] -reveal_type(u8.flatten()) # E: ndarray[Any, dtype[{uint64}]] -reveal_type(f8.flatten()) # E: ndarray[Any, dtype[{float64}]] -reveal_type(c16.flatten()) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(U.flatten()) # E: ndarray[Any, dtype[str_]] -reveal_type(S.flatten()) # E: ndarray[Any, dtype[bytes_]] - -reveal_type(b.reshape(1)) # E: ndarray[Any, dtype[bool_]] -reveal_type(i8.reshape(1)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(u8.reshape(1)) # E: ndarray[Any, dtype[{uint64}]] -reveal_type(f8.reshape(1)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(c16.reshape(1)) # E: ndarray[Any, dtype[{complex128}]] -reveal_type(U.reshape(1)) # E: ndarray[Any, dtype[str_]] -reveal_type(S.reshape(1)) # E: ndarray[Any, dtype[bytes_]] - -reveal_type(i8.astype(float)) # E: Any -reveal_type(i8.astype(np.float64)) # E: {float64} - -reveal_type(i8.view()) # E: {int64} -reveal_type(i8.view(np.float64)) # E: {float64} -reveal_type(i8.view(float)) # E: Any -reveal_type(i8.view(np.float64, np.ndarray)) # E: {float64} - -reveal_type(i8.getfield(float)) # E: Any -reveal_type(i8.getfield(np.float64)) # E: {float64} -reveal_type(i8.getfield(np.float64, 8)) # E: {float64} - -reveal_type(f8.as_integer_ratio()) # E: tuple[builtins.int, builtins.int] -reveal_type(f8.is_integer()) # E: bool -reveal_type(f8.__trunc__()) # E: int -reveal_type(f8.__getformat__("float")) # E: str -reveal_type(f8.hex()) # E: str -reveal_type(np.float64.fromhex("0x0.0p+0")) # E: {float64} - -reveal_type(f8.__getnewargs__()) # E: tuple[builtins.float] -reveal_type(c16.__getnewargs__()) # E: tuple[builtins.float, builtins.float] - -reveal_type(i8.numerator) # E: {int64} -reveal_type(i8.denominator) # E: Literal[1] -reveal_type(u8.numerator) # E: {uint64} -reveal_type(u8.denominator) # E: Literal[1] -reveal_type(m.numerator) # E: timedelta64 -reveal_type(m.denominator) # E: Literal[1] - -reveal_type(round(i8)) # E: int -reveal_type(round(i8, 3)) # E: {int64} -reveal_type(round(u8)) # E: int -reveal_type(round(u8, 3)) # E: {uint64} -reveal_type(round(f8)) # E: int -reveal_type(round(f8, 3)) # E: {float64} - -reveal_type(f8.__ceil__()) # E: int -reveal_type(f8.__floor__()) # E: int - -reveal_type(i8.is_integer()) # E: Literal[True] +assert_type(np.byte(), np.byte) +assert_type(np.short(), np.short) +assert_type(np.intc(), np.intc) +assert_type(np.intp(), np.intp) +assert_type(np.int_(), np.int_) +assert_type(np.longlong(), np.longlong) + +assert_type(np.ubyte(), np.ubyte) +assert_type(np.ushort(), np.ushort) +assert_type(np.uintc(), np.uintc) +assert_type(np.uintp(), np.uintp) +assert_type(np.uint(), np.uint) +assert_type(np.ulonglong(), np.ulonglong) + +assert_type(np.half(), np.half) +assert_type(np.single(), np.single) +assert_type(np.double(), np.double) +assert_type(np.longdouble(), np.longdouble) +assert_type(np.float_(), np.float_) +assert_type(np.longfloat(), np.longfloat) + +assert_type(np.csingle(), np.csingle) +assert_type(np.cdouble(), np.cdouble) +assert_type(np.clongdouble(), np.clongdouble) +assert_type(np.singlecomplex(), np.singlecomplex) +assert_type(np.complex_(), np.complex_) +assert_type(np.cfloat(), np.cfloat) +assert_type(np.clongfloat(), np.clongfloat) +assert_type(np.longcomplex(), np.longcomplex) + +assert_type(b.item(), bool) +assert_type(i8.item(), int) +assert_type(u8.item(), int) +assert_type(f8.item(), float) +assert_type(c16.item(), complex) +assert_type(U.item(), str) +assert_type(S.item(), bytes) + +assert_type(b.tolist(), bool) +assert_type(i8.tolist(), int) +assert_type(u8.tolist(), int) +assert_type(f8.tolist(), float) +assert_type(c16.tolist(), complex) +assert_type(U.tolist(), str) +assert_type(S.tolist(), bytes) + +assert_type(b.ravel(), npt.NDArray[np.bool_]) +assert_type(i8.ravel(), npt.NDArray[np.int64]) +assert_type(u8.ravel(), npt.NDArray[np.uint64]) +assert_type(f8.ravel(), npt.NDArray[np.float64]) +assert_type(c16.ravel(), npt.NDArray[np.complex128]) +assert_type(U.ravel(), npt.NDArray[np.str_]) +assert_type(S.ravel(), npt.NDArray[np.bytes_]) + +assert_type(b.flatten(), npt.NDArray[np.bool_]) +assert_type(i8.flatten(), npt.NDArray[np.int64]) +assert_type(u8.flatten(), npt.NDArray[np.uint64]) +assert_type(f8.flatten(), npt.NDArray[np.float64]) +assert_type(c16.flatten(), npt.NDArray[np.complex128]) +assert_type(U.flatten(), npt.NDArray[np.str_]) +assert_type(S.flatten(), npt.NDArray[np.bytes_]) + +assert_type(b.reshape(1), npt.NDArray[np.bool_]) +assert_type(i8.reshape(1), npt.NDArray[np.int64]) +assert_type(u8.reshape(1), npt.NDArray[np.uint64]) +assert_type(f8.reshape(1), npt.NDArray[np.float64]) +assert_type(c16.reshape(1), npt.NDArray[np.complex128]) +assert_type(U.reshape(1), npt.NDArray[np.str_]) +assert_type(S.reshape(1), npt.NDArray[np.bytes_]) + +assert_type(i8.astype(float), Any) +assert_type(i8.astype(np.float64), np.float64) + +assert_type(i8.view(), np.int64) +assert_type(i8.view(np.float64), np.float64) +assert_type(i8.view(float), Any) +assert_type(i8.view(np.float64, np.ndarray), np.float64) + +assert_type(i8.getfield(float), Any) +assert_type(i8.getfield(np.float64), np.float64) +assert_type(i8.getfield(np.float64, 8), np.float64) + +assert_type(f8.as_integer_ratio(), tuple[int, int]) +assert_type(f8.is_integer(), bool) +assert_type(f8.__trunc__(), int) +assert_type(f8.__getformat__("float"), str) +assert_type(f8.hex(), str) +assert_type(np.float64.fromhex("0x0.0p+0"), np.float64) + +assert_type(f8.__getnewargs__(), tuple[float]) +assert_type(c16.__getnewargs__(), tuple[float, float]) + +assert_type(i8.numerator, np.int64) +assert_type(i8.denominator, Literal[1]) +assert_type(u8.numerator, np.uint64) +assert_type(u8.denominator, Literal[1]) +assert_type(m.numerator, np.timedelta64) +assert_type(m.denominator, Literal[1]) + +assert_type(round(i8), int) +assert_type(round(i8, 3), np.int64) +assert_type(round(u8), int) +assert_type(round(u8, 3), np.uint64) +assert_type(round(f8), int) +assert_type(round(f8, 3), np.float64) + +assert_type(f8.__ceil__(), int) +assert_type(f8.__floor__(), int) + +assert_type(i8.is_integer(), Literal[True]) diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index b907a4328039..db75d1b015ac 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,57 +1,65 @@ -import numpy as np -from numpy._typing import NDArray +import sys from typing import Any +import numpy as np +import numpy.typing as npt +from numpy.lib.shape_base import _ArrayPrepare, _ArrayWrap + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + i8: np.int64 f8: np.float64 -AR_b: NDArray[np.bool_] -AR_i8: NDArray[np.int64] -AR_f8: NDArray[np.float64] +AR_b: npt.NDArray[np.bool_] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] AR_LIKE_f8: list[float] -reveal_type(np.take_along_axis(AR_f8, AR_i8, axis=1)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.take_along_axis(f8, AR_i8, axis=None)) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.take_along_axis(AR_f8, AR_i8, axis=1), npt.NDArray[np.float64]) +assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) -reveal_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1)) # E: None +assert_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1), None) -reveal_type(np.expand_dims(AR_i8, 2)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.expand_dims(AR_LIKE_f8, 2)) # E: ndarray[Any, dtype[Any]] +assert_type(np.expand_dims(AR_i8, 2), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_LIKE_f8, 2), npt.NDArray[Any]) -reveal_type(np.column_stack([AR_i8])) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.column_stack([AR_LIKE_f8])) # E: ndarray[Any, dtype[Any]] +assert_type(np.column_stack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.column_stack([AR_LIKE_f8]), npt.NDArray[Any]) -reveal_type(np.dstack([AR_i8])) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.dstack([AR_LIKE_f8])) # E: ndarray[Any, dtype[Any]] +assert_type(np.dstack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.dstack([AR_LIKE_f8]), npt.NDArray[Any]) -reveal_type(np.row_stack([AR_i8])) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.row_stack([AR_LIKE_f8])) # E: ndarray[Any, dtype[Any]] +assert_type(np.row_stack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.row_stack([AR_LIKE_f8]), npt.NDArray[Any]) -reveal_type(np.array_split(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]] -reveal_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]] +assert_type(np.array_split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) -reveal_type(np.split(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]] -reveal_type(np.split(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]] +assert_type(np.split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) -reveal_type(np.hsplit(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]] -reveal_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]] +assert_type(np.hsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) -reveal_type(np.vsplit(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]] -reveal_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]] +assert_type(np.vsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) -reveal_type(np.dsplit(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]] -reveal_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]] +assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) -reveal_type(np.lib.shape_base.get_array_prepare(AR_i8)) # E: lib.shape_base._ArrayPrepare -reveal_type(np.lib.shape_base.get_array_prepare(AR_i8, 1)) # E: Union[None, lib.shape_base._ArrayPrepare] +assert_type(np.lib.shape_base.get_array_prepare(AR_i8), _ArrayPrepare) +assert_type(np.lib.shape_base.get_array_prepare(AR_i8, 1), None | _ArrayPrepare) -reveal_type(np.get_array_wrap(AR_i8)) # E: lib.shape_base._ArrayWrap -reveal_type(np.get_array_wrap(AR_i8, 1)) # E: Union[None, lib.shape_base._ArrayWrap] +assert_type(np.get_array_wrap(AR_i8), _ArrayWrap) +assert_type(np.get_array_wrap(AR_i8, 1), None | _ArrayWrap) -reveal_type(np.kron(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.kron(AR_b, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.kron(AR_f8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]] +assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool_]) +assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) -reveal_type(np.tile(AR_i8, 5)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.tile(AR_LIKE_f8, [2, 2])) # E: ndarray[Any, dtype[Any]] +assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) +assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/stride_tricks.pyi b/numpy/typing/tests/data/reveal/stride_tricks.pyi index 17769dc4bb39..68e1eeac98fb 100644 --- a/numpy/typing/tests/data/reveal/stride_tricks.pyi +++ b/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -1,28 +1,36 @@ +import sys from typing import Any + import numpy as np import numpy.typing as npt +from numpy.lib.stride_tricks import DummyArray + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR_f8: npt.NDArray[np.float64] AR_LIKE_f: list[float] interface_dict: dict[str, Any] -reveal_type(np.lib.stride_tricks.DummyArray(interface_dict)) # E: lib.stride_tricks.DummyArray +assert_type(np.lib.stride_tricks.DummyArray(interface_dict), DummyArray) -reveal_type(np.lib.stride_tricks.as_strided(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.lib.stride_tricks.as_strided(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5))) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20])) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.lib.stride_tricks.as_strided(AR_f8), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_LIKE_f), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5)), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20]), npt.NDArray[np.float64]) -reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5))) # E: ndarray[Any, dtype[Any]] -reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1)) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1), npt.NDArray[np.float64]) -reveal_type(np.broadcast_to(AR_f8, 5)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.broadcast_to(AR_LIKE_f, (1, 5))) # E: ndarray[Any, dtype[Any]] -reveal_type(np.broadcast_to(AR_f8, [4, 6], subok=True)) # E: ndarray[Any, dtype[{float64}]] +assert_type(np.broadcast_to(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.broadcast_to(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.broadcast_to(AR_f8, [4, 6], subok=True), npt.NDArray[np.float64]) -reveal_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2))) # E: tuple[builtins.int, ...] -reveal_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7))) # E: tuple[builtins.int, ...] +assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[int, ...]) +assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[int, ...]) -reveal_type(np.broadcast_arrays(AR_f8, AR_f8)) # E: list[ndarray[Any, dtype[Any]]] -reveal_type(np.broadcast_arrays(AR_f8, AR_LIKE_f)) # E: list[ndarray[Any, dtype[Any]]] +assert_type(np.broadcast_arrays(AR_f8, AR_f8), list[npt.NDArray[Any]]) +assert_type(np.broadcast_arrays(AR_f8, AR_LIKE_f), list[npt.NDArray[Any]]) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 5c35731d3ba0..ecd74e9aa3d3 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -1,7 +1,9 @@ -from __future__ import annotations - import re import sys +import warnings +import types +import unittest +import contextlib from collections.abc import Callable from typing import Any, TypeVar from pathlib import Path @@ -9,6 +11,11 @@ from pathlib import Path import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] @@ -23,154 +30,174 @@ def func2( y: npt.NDArray[np.number[Any]], ) -> npt.NDArray[np.bool_]: ... -reveal_type(np.testing.KnownFailureException()) # E: KnownFailureException -reveal_type(np.testing.IgnoreException()) # E: IgnoreException - -reveal_type(np.testing.clear_and_catch_warnings(modules=[np.testing])) # E: _clear_and_catch_warnings_without_records -reveal_type(np.testing.clear_and_catch_warnings(True)) # E: _clear_and_catch_warnings_with_records -reveal_type(np.testing.clear_and_catch_warnings(False)) # E: _clear_and_catch_warnings_without_records -reveal_type(np.testing.clear_and_catch_warnings(bool_obj)) # E: clear_and_catch_warnings -reveal_type(np.testing.clear_and_catch_warnings.class_modules) # E: tuple[types.ModuleType, ...] -reveal_type(np.testing.clear_and_catch_warnings.modules) # E: set[types.ModuleType] +assert_type(np.testing.KnownFailureException(), np.testing.KnownFailureException) +assert_type(np.testing.IgnoreException(), np.testing.IgnoreException) + +assert_type( + np.testing.clear_and_catch_warnings(modules=[np.testing]), + np.testing._private.utils._clear_and_catch_warnings_without_records, +) +assert_type( + np.testing.clear_and_catch_warnings(True), + np.testing._private.utils._clear_and_catch_warnings_with_records, +) +assert_type( + np.testing.clear_and_catch_warnings(False), + np.testing._private.utils._clear_and_catch_warnings_without_records, +) +assert_type( + np.testing.clear_and_catch_warnings(bool_obj), + np.testing.clear_and_catch_warnings, +) +assert_type( + np.testing.clear_and_catch_warnings.class_modules, + tuple[types.ModuleType, ...], +) +assert_type( + np.testing.clear_and_catch_warnings.modules, + set[types.ModuleType], +) with np.testing.clear_and_catch_warnings(True) as c1: - reveal_type(c1) # E: builtins.list[warnings.WarningMessage] + assert_type(c1, list[warnings.WarningMessage]) with np.testing.clear_and_catch_warnings() as c2: - reveal_type(c2) # E: None + assert_type(c2, None) -reveal_type(np.testing.suppress_warnings("once")) # E: suppress_warnings -reveal_type(np.testing.suppress_warnings()(func)) # E: def () -> builtins.int -reveal_type(suppress_obj.filter(RuntimeWarning)) # E: None -reveal_type(suppress_obj.record(RuntimeWarning)) # E: list[warnings.WarningMessage] +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(suppress_obj.filter(RuntimeWarning), None) +assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) with suppress_obj as c3: - reveal_type(c3) # E: suppress_warnings + assert_type(c3, np.testing.suppress_warnings) -reveal_type(np.testing.verbose) # E: int -reveal_type(np.testing.IS_PYPY) # E: bool -reveal_type(np.testing.HAS_REFCOUNT) # E: bool -reveal_type(np.testing.HAS_LAPACK64) # E: bool +assert_type(np.testing.verbose, int) +assert_type(np.testing.IS_PYPY, bool) +assert_type(np.testing.HAS_REFCOUNT, bool) +assert_type(np.testing.HAS_LAPACK64, bool) -reveal_type(np.testing.assert_(1, msg="test")) # E: None -reveal_type(np.testing.assert_(2, msg=lambda: "test")) # E: None +assert_type(np.testing.assert_(1, msg="test"), None) +assert_type(np.testing.assert_(2, msg=lambda: "test"), None) if sys.platform == "win32" or sys.platform == "cygwin": - reveal_type(np.testing.memusage()) # E: builtins.int + assert_type(np.testing.memusage(), int) elif sys.platform == "linux": - reveal_type(np.testing.memusage()) # E: Union[None, builtins.int] -else: - reveal_type(np.testing.memusage()) # E: + assert_type(np.testing.memusage(), None | int) -reveal_type(np.testing.jiffies()) # E: builtins.int +assert_type(np.testing.jiffies(), int) -reveal_type(np.testing.build_err_msg([0, 1, 2], "test")) # E: str -reveal_type(np.testing.build_err_msg(range(2), "test", header="header")) # E: str -reveal_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False)) # E: str -reveal_type(np.testing.build_err_msg("abc", "test", names=["x", "y"])) # E: str -reveal_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5)) # E: str +assert_type(np.testing.build_err_msg([0, 1, 2], "test"), str) +assert_type(np.testing.build_err_msg(range(2), "test", header="header"), str) +assert_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False), str) +assert_type(np.testing.build_err_msg("abc", "test", names=["x", "y"]), str) +assert_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5), str) -reveal_type(np.testing.assert_equal({1}, {1})) # E: None -reveal_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None -reveal_type(np.testing.assert_equal(1, 1.0, verbose=True)) # E: None +assert_type(np.testing.assert_equal({1}, {1}), None) +assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) -reveal_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])) # E: None +assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) -reveal_type(np.testing.assert_almost_equal(1.0, 1.1)) # E: None -reveal_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None -reveal_type(np.testing.assert_almost_equal(1, 1.0, verbose=True)) # E: None -reveal_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2)) # E: None +assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) +assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_almost_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2), None) -reveal_type(np.testing.assert_approx_equal(1.0, 1.1)) # E: None -reveal_type(np.testing.assert_approx_equal("1", "2", err_msg="fail")) # E: None -reveal_type(np.testing.assert_approx_equal(1, 1.0, verbose=True)) # E: None -reveal_type(np.testing.assert_approx_equal(1, 1.0001, significant=2)) # E: None +assert_type(np.testing.assert_approx_equal(1.0, 1.1), None) +assert_type(np.testing.assert_approx_equal("1", "2", err_msg="fail"), None) +assert_type(np.testing.assert_approx_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_approx_equal(1, 1.0001, significant=2), None) -reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test")) # E: None -reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True)) # E: None -reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header")) # E: None -reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64())) # E: None -reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False)) # E: None -reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True)) # E: None +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64()), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True), None) -reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8)) # E: None -reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test")) # E: None -reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True)) # E: None +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True), None) -reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8)) # E: None -reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test")) # E: None -reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True)) # E: None -reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1)) # E: None +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1), None) -reveal_type(np.testing.assert_array_less(AR_i8, AR_f8)) # E: None -reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test")) # E: None -reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True)) # E: None +assert_type(np.testing.assert_array_less(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True), None) -reveal_type(np.testing.runstring("1 + 1", {})) # E: Any -reveal_type(np.testing.runstring("int64() + 1", {"int64": np.int64})) # E: Any +assert_type(np.testing.runstring("1 + 1", {}), Any) +assert_type(np.testing.runstring("int64() + 1", {"int64": np.int64}), Any) -reveal_type(np.testing.assert_string_equal("1", "1")) # E: None +assert_type(np.testing.assert_string_equal("1", "1"), None) -reveal_type(np.testing.rundocs()) # E: None -reveal_type(np.testing.rundocs("test.py")) # E: None -reveal_type(np.testing.rundocs(Path("test.py"), raise_on_error=True)) # E: None +assert_type(np.testing.rundocs(), None) +assert_type(np.testing.rundocs("test.py"), None) +assert_type(np.testing.rundocs(Path("test.py"), raise_on_error=True), None) def func3(a: int) -> bool: ... -reveal_type(func3) # E: def (a: builtins.int) -> builtins.bool - -reveal_type(np.testing.assert_raises(RuntimeWarning)) # E: _AssertRaisesContext[builtins.RuntimeWarning] -reveal_type(np.testing.assert_raises(RuntimeWarning, func3, 5)) # E: None +assert_type( + np.testing.assert_raises(RuntimeWarning), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises(RuntimeWarning, func3, 5), None) -reveal_type(np.testing.assert_raises_regex(RuntimeWarning, r"test")) # E: _AssertRaisesContext[builtins.RuntimeWarning] -reveal_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5)) # E: None -reveal_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5)) # E: None +assert_type( + np.testing.assert_raises_regex(RuntimeWarning, r"test"), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5), None) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5), None) class Test: ... def decorate(a: FT) -> FT: return a -reveal_type(np.testing.decorate_methods(Test, decorate)) # E: None -reveal_type(np.testing.decorate_methods(Test, decorate, None)) # E: None -reveal_type(np.testing.decorate_methods(Test, decorate, "test")) # E: None -reveal_type(np.testing.decorate_methods(Test, decorate, b"test")) # E: None -reveal_type(np.testing.decorate_methods(Test, decorate, re.compile("test"))) # E: None +assert_type(np.testing.decorate_methods(Test, decorate), None) +assert_type(np.testing.decorate_methods(Test, decorate, None), None) +assert_type(np.testing.decorate_methods(Test, decorate, "test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, b"test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, re.compile("test")), None) -reveal_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)")) # E: float -reveal_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5)) # E: float +assert_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)"), float) +assert_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5), float) -reveal_type(np.testing.assert_allclose(AR_i8, AR_f8)) # E: None -reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005)) # E: None -reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1)) # E: None -reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True)) # E: None -reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err")) # E: None -reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False)) # E: None +assert_type(np.testing.assert_allclose(AR_i8, AR_f8), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err"), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False), None) -reveal_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2)) # E: None +assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), None) -reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2)) # E: ndarray[Any, dtype[Any]] -reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32)) # E: ndarray[Any, dtype[Any]] +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) -reveal_type(np.testing.assert_warns(RuntimeWarning)) # E: _GeneratorContextManager[None] -reveal_type(np.testing.assert_warns(RuntimeWarning, func3, 5)) # E: bool +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) def func4(a: int, b: str) -> bool: ... -reveal_type(np.testing.assert_no_warnings()) # E: _GeneratorContextManager[None] -reveal_type(np.testing.assert_no_warnings(func3, 5)) # E: bool -reveal_type(np.testing.assert_no_warnings(func4, a=1, b="test")) # E: bool -reveal_type(np.testing.assert_no_warnings(func4, 1, "test")) # E: bool +assert_type(np.testing.assert_no_warnings(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_warnings(func3, 5), bool) +assert_type(np.testing.assert_no_warnings(func4, a=1, b="test"), bool) +assert_type(np.testing.assert_no_warnings(func4, 1, "test"), bool) -reveal_type(np.testing.tempdir("test_dir")) # E: _GeneratorContextManager[builtins.str] -reveal_type(np.testing.tempdir(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes] -reveal_type(np.testing.tempdir("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str] +assert_type(np.testing.tempdir("test_dir"), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.tempdir(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.tempdir("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) -reveal_type(np.testing.temppath("test_dir", text=True)) # E: _GeneratorContextManager[builtins.str] -reveal_type(np.testing.temppath(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes] -reveal_type(np.testing.temppath("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str] +assert_type(np.testing.temppath("test_dir", text=True), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.temppath(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.temppath("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) -reveal_type(np.testing.assert_no_gc_cycles()) # E: _GeneratorContextManager[None] -reveal_type(np.testing.assert_no_gc_cycles(func3, 5)) # E: None +assert_type(np.testing.assert_no_gc_cycles(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_gc_cycles(func3, 5), None) -reveal_type(np.testing.break_cycles()) # E: None +assert_type(np.testing.break_cycles(), None) -reveal_type(np.testing.TestCase()) # E: unittest.case.TestCase +assert_type(np.testing.TestCase(), unittest.case.TestCase) diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 659f00dfa208..506786c78743 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,8 +1,14 @@ +import sys from typing import Any, TypeVar import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + _SCT = TypeVar("_SCT", bound=np.generic) @@ -23,50 +29,71 @@ AR_O: npt.NDArray[np.object_] AR_LIKE_b: list[bool] -reveal_type(np.fliplr(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.fliplr(AR_LIKE_b)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.flipud(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.flipud(AR_LIKE_b)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.eye(10)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.eye(10, M=20, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.eye(10, k=2, dtype=int)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.diag(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.diag(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.diagflat(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.diagflat(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.tri(10)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.tri(10, M=20, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]] -reveal_type(np.tri(10, k=2, dtype=int)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.tril(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.tril(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.triu(AR_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.triu(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.vander(AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.vander(AR_u)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.vander(AR_i, N=2)) # E: ndarray[Any, dtype[signedinteger[Any]]] -reveal_type(np.vander(AR_f, increasing=True)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.vander(AR_c)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.vander(AR_O)) # E: ndarray[Any, dtype[object_]] - -reveal_type(np.histogram2d(AR_i, AR_b)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.histogram2d(AR_f, AR_f)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]] -reveal_type(np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b)) # E: tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]] - -reveal_type(np.mask_indices(10, func1)) # E: tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] -reveal_type(np.mask_indices(8, func2, "0")) # E: tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]] - -reveal_type(np.tril_indices(10)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] - -reveal_type(np.tril_indices_from(AR_b)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] - -reveal_type(np.triu_indices(10)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] - -reveal_type(np.triu_indices_from(AR_b)) # E: tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]] +assert_type(np.fliplr(AR_b), npt.NDArray[np.bool_]) +assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) + +assert_type(np.flipud(AR_b), npt.NDArray[np.bool_]) +assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) + +assert_type(np.eye(10), npt.NDArray[np.float64]) +assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) + +assert_type(np.diag(AR_b), npt.NDArray[np.bool_]) +assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.diagflat(AR_b), npt.NDArray[np.bool_]) +assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.tri(10), npt.NDArray[np.float64]) +assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) + +assert_type(np.tril(AR_b), npt.NDArray[np.bool_]) +assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.triu(AR_b), npt.NDArray[np.bool_]) +assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating[Any]]) +assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.vander(AR_O), npt.NDArray[np.object_]) + +assert_type( + np.histogram2d(AR_i, AR_b), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.floating[Any]], + npt.NDArray[np.floating[Any]], + ], +) +assert_type( + np.histogram2d(AR_f, AR_f), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.floating[Any]], + npt.NDArray[np.floating[Any]], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complexfloating[Any, Any]], + npt.NDArray[np.complexfloating[Any, Any]], + ], +) + +assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) + +assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index af3d1dd41c1d..12af9a66d9dd 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -1,6 +1,14 @@ +import sys +from typing import Any, Literal + import numpy as np import numpy.typing as npt -from numpy._typing import _128Bit +from numpy._typing import _16Bit, _32Bit, _64Bit, _128Bit + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type f8: np.float64 f: float @@ -22,52 +30,58 @@ class RealObj: class ImagObj: imag: slice -reveal_type(np.mintypecode(["f8"], typeset="qfQF")) - -reveal_type(np.asfarray(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asfarray(AR_LIKE_f)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.asfarray(AR_f8, dtype="c16")) # E: ndarray[Any, dtype[complexfloating[Any, Any]]] -reveal_type(np.asfarray(AR_f8, dtype="i8")) # E: ndarray[Any, dtype[floating[Any]]] - -reveal_type(np.real(RealObj())) # E: slice -reveal_type(np.real(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.real(AR_c16)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.real(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.imag(ImagObj())) # E: slice -reveal_type(np.imag(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.imag(AR_c16)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.imag(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.iscomplex(f8)) # E: bool_ -reveal_type(np.iscomplex(AR_f8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.iscomplex(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]] - -reveal_type(np.isreal(f8)) # E: bool_ -reveal_type(np.isreal(AR_f8)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isreal(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]] - -reveal_type(np.iscomplexobj(f8)) # E: bool -reveal_type(np.isrealobj(f8)) # E: bool - -reveal_type(np.nan_to_num(f8)) # E: {float64} -reveal_type(np.nan_to_num(f, copy=True)) # E: Any -reveal_type(np.nan_to_num(AR_f8, nan=1.5)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.nan_to_num(AR_LIKE_f, posinf=9999)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.real_if_close(AR_f8)) # E: ndarray[Any, dtype[{float64}]] -reveal_type(np.real_if_close(AR_c16)) # E: Union[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{complex128}]]] -reveal_type(np.real_if_close(AR_c8)) # E: Union[ndarray[Any, dtype[{float32}]], ndarray[Any, dtype[{complex64}]]] -reveal_type(np.real_if_close(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]] - -reveal_type(np.typename("h")) # E: Literal['short'] -reveal_type(np.typename("B")) # E: Literal['unsigned char'] -reveal_type(np.typename("V")) # E: Literal['void'] -reveal_type(np.typename("S1")) # E: Literal['character'] - -reveal_type(np.common_type(AR_i4)) # E: Type[{float64}] -reveal_type(np.common_type(AR_f2)) # E: Type[{float16}] -reveal_type(np.common_type(AR_f2, AR_i4)) # E: Type[floating[Union[_16Bit, _64Bit]]] -reveal_type(np.common_type(AR_f16, AR_i4)) # E: Type[floating[Union[_128Bit, _64Bit]]] -reveal_type(np.common_type(AR_c8, AR_f2)) # E: Type[complexfloating[Union[_16Bit, _32Bit], Union[_16Bit, _32Bit]]] -reveal_type(np.common_type(AR_f2, AR_c8, AR_i4)) # E: Type[complexfloating[Union[_64Bit, _16Bit, _32Bit], Union[_64Bit, _16Bit, _32Bit]]] +assert_type(np.mintypecode(["f8"], typeset="qfQF"), str) + +assert_type(np.asfarray(AR_f8), npt.NDArray[np.float64]) +assert_type(np.asfarray(AR_LIKE_f), npt.NDArray[np.float64]) +assert_type(np.asfarray(AR_f8, dtype="c16"), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.asfarray(AR_f8, dtype="i8"), npt.NDArray[np.floating[Any]]) + +assert_type(np.real(RealObj()), slice) +assert_type(np.real(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real(AR_c16), npt.NDArray[np.float64]) +assert_type(np.real(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.imag(ImagObj()), slice) +assert_type(np.imag(AR_f8), npt.NDArray[np.float64]) +assert_type(np.imag(AR_c16), npt.NDArray[np.float64]) +assert_type(np.imag(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.iscomplex(f8), np.bool_) +assert_type(np.iscomplex(AR_f8), npt.NDArray[np.bool_]) +assert_type(np.iscomplex(AR_LIKE_f), npt.NDArray[np.bool_]) + +assert_type(np.isreal(f8), np.bool_) +assert_type(np.isreal(AR_f8), npt.NDArray[np.bool_]) +assert_type(np.isreal(AR_LIKE_f), npt.NDArray[np.bool_]) + +assert_type(np.iscomplexobj(f8), bool) +assert_type(np.isrealobj(f8), bool) + +assert_type(np.nan_to_num(f8), np.float64) +assert_type(np.nan_to_num(f, copy=True), Any) +assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) + +assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32] | npt.NDArray[np.complex64]) +assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.typename("h"), Literal["short"]) +assert_type(np.typename("B"), Literal["unsigned char"]) +assert_type(np.typename("V"), Literal["void"]) +assert_type(np.typename("S1"), Literal["character"]) + +assert_type(np.common_type(AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f2), type[np.float16]) +assert_type(np.common_type(AR_f2, AR_i4), type[np.floating[_16Bit | _64Bit]]) +assert_type(np.common_type(AR_f16, AR_i4), type[np.floating[_64Bit | _128Bit]]) +assert_type( + np.common_type(AR_c8, AR_f2), + type[np.complexfloating[_16Bit | _32Bit, _16Bit | _32Bit]], +) +assert_type( + np.common_type(AR_f2, AR_c8, AR_i4), + type[np.complexfloating[_16Bit | _32Bit | _64Bit, _16Bit | _32Bit | _64Bit]], +) diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 2c6fadf92360..38474f1e73fb 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,25 +1,41 @@ """Typing tests for `core._ufunc_config`.""" +import sys +from typing import Any, Protocol +from collections.abc import Callable + import numpy as np +from numpy.core._ufunc_config import _ErrDict + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type def func(a: str, b: int) -> None: ... +class FuncProtocol(Protocol): + def __call__(self, a: str, b: int) -> None: ... + class Write: def write(self, value: str) -> None: ... -reveal_type(np.seterr(all=None)) # E: TypedDict('core._ufunc_config._ErrDict' -reveal_type(np.seterr(divide="ignore")) # E: TypedDict('core._ufunc_config._ErrDict' -reveal_type(np.seterr(over="warn")) # E: TypedDict('core._ufunc_config._ErrDict' -reveal_type(np.seterr(under="call")) # E: TypedDict('core._ufunc_config._ErrDict' -reveal_type(np.seterr(invalid="raise")) # E: TypedDict('core._ufunc_config._ErrDict' -reveal_type(np.geterr()) # E: TypedDict('core._ufunc_config._ErrDict' +class SupportsWrite(Protocol): + def write(self, s: str, /) -> object: ... + +assert_type(np.seterr(all=None), _ErrDict) +assert_type(np.seterr(divide="ignore"), _ErrDict) +assert_type(np.seterr(over="warn"), _ErrDict) +assert_type(np.seterr(under="call"), _ErrDict) +assert_type(np.seterr(invalid="raise"), _ErrDict) +assert_type(np.geterr(), _ErrDict) -reveal_type(np.setbufsize(4096)) # E: int -reveal_type(np.getbufsize()) # E: int +assert_type(np.setbufsize(4096), int) +assert_type(np.getbufsize(), int) -reveal_type(np.seterrcall(func)) # E: Union[None, def (builtins.str, builtins.int) -> Any, _SupportsWrite[builtins.str]] -reveal_type(np.seterrcall(Write())) # E: Union[None, def (builtins.str, builtins.int) -> Any, _SupportsWrite[builtins.str]] -reveal_type(np.geterrcall()) # E: Union[None, def (builtins.str, builtins.int) -> Any, _SupportsWrite[builtins.str]] +assert_type(np.seterrcall(func), Callable[[str, int], Any] | None | SupportsWrite) +assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | None | SupportsWrite) +assert_type(np.geterrcall(), Callable[[str, int], Any] | None | SupportsWrite) -reveal_type(np.errstate(call=func, all="call")) # E: errstate[def (a: builtins.str, b: builtins.int)] -reveal_type(np.errstate(call=Write(), divide="log", over="log")) # E: errstate[ufunc_config.Write] +assert_type(np.errstate(call=func, all="call"), np.errstate[FuncProtocol]) +assert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate[Write]) diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index 9f06600b6420..5f7a03eb6225 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,5 +1,13 @@ +import sys from typing import Any + import numpy as np +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] @@ -7,23 +15,23 @@ AR_LIKE_i: list[int] AR_LIKE_f: list[float] AR_LIKE_O: list[np.object_] -AR_U: np.ndarray[Any, np.dtype[np.str_]] +AR_U: npt.NDArray[np.str_] -reveal_type(np.fix(AR_LIKE_b)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.fix(AR_LIKE_u)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.fix(AR_LIKE_i)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.fix(AR_LIKE_f)) # E: ndarray[Any, dtype[floating[Any]]] -reveal_type(np.fix(AR_LIKE_O)) # E: Any -reveal_type(np.fix(AR_LIKE_f, out=AR_U)) # E: ndarray[Any, dtype[str_]] +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating[Any]]) +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating[Any]]) +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating[Any]]) +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating[Any]]) +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) -reveal_type(np.isposinf(AR_LIKE_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isposinf(AR_LIKE_u)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isposinf(AR_LIKE_i)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isposinf(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isposinf(AR_LIKE_f, out=AR_U)) # E: ndarray[Any, dtype[str_]] +assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool_]) +assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool_]) +assert_type(np.isposinf(AR_LIKE_i), npt.NDArray[np.bool_]) +assert_type(np.isposinf(AR_LIKE_f), npt.NDArray[np.bool_]) +assert_type(np.isposinf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) -reveal_type(np.isneginf(AR_LIKE_b)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isneginf(AR_LIKE_u)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isneginf(AR_LIKE_i)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isneginf(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]] -reveal_type(np.isneginf(AR_LIKE_f, out=AR_U)) # E: ndarray[Any, dtype[str_]] +assert_type(np.isneginf(AR_LIKE_b), npt.NDArray[np.bool_]) +assert_type(np.isneginf(AR_LIKE_u), npt.NDArray[np.bool_]) +assert_type(np.isneginf(AR_LIKE_i), npt.NDArray[np.bool_]) +assert_type(np.isneginf(AR_LIKE_f), npt.NDArray[np.bool_]) +assert_type(np.isneginf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index d4d522988b4e..5f7d99efd12d 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -1,68 +1,76 @@ +import sys +from typing import Literal, Any + import numpy as np import numpy.typing as npt +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + f8: np.float64 AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] -reveal_type(np.absolute.__doc__) # E: str -reveal_type(np.absolute.types) # E: builtins.list[builtins.str] +assert_type(np.absolute.__doc__, str) +assert_type(np.absolute.types, list[str]) -reveal_type(np.absolute.__name__) # E: Literal['absolute'] -reveal_type(np.absolute.ntypes) # E: Literal[20] -reveal_type(np.absolute.identity) # E: None -reveal_type(np.absolute.nin) # E: Literal[1] -reveal_type(np.absolute.nin) # E: Literal[1] -reveal_type(np.absolute.nout) # E: Literal[1] -reveal_type(np.absolute.nargs) # E: Literal[2] -reveal_type(np.absolute.signature) # E: None -reveal_type(np.absolute(f8)) # E: Any -reveal_type(np.absolute(AR_f8)) # E: ndarray -reveal_type(np.absolute.at(AR_f8, AR_i8)) # E: None +assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.ntypes, Literal[20]) +assert_type(np.absolute.identity, None) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nout, Literal[1]) +assert_type(np.absolute.nargs, Literal[2]) +assert_type(np.absolute.signature, None) +assert_type(np.absolute(f8), Any) +assert_type(np.absolute(AR_f8), npt.NDArray[Any]) +assert_type(np.absolute.at(AR_f8, AR_i8), None) -reveal_type(np.add.__name__) # E: Literal['add'] -reveal_type(np.add.ntypes) # E: Literal[22] -reveal_type(np.add.identity) # E: Literal[0] -reveal_type(np.add.nin) # E: Literal[2] -reveal_type(np.add.nout) # E: Literal[1] -reveal_type(np.add.nargs) # E: Literal[3] -reveal_type(np.add.signature) # E: None -reveal_type(np.add(f8, f8)) # E: Any -reveal_type(np.add(AR_f8, f8)) # E: ndarray -reveal_type(np.add.at(AR_f8, AR_i8, f8)) # E: None -reveal_type(np.add.reduce(AR_f8, axis=0)) # E: Any -reveal_type(np.add.accumulate(AR_f8)) # E: ndarray -reveal_type(np.add.reduceat(AR_f8, AR_i8)) # E: ndarray -reveal_type(np.add.outer(f8, f8)) # E: Any -reveal_type(np.add.outer(AR_f8, f8)) # E: ndarray +assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.ntypes, Literal[22]) +assert_type(np.add.identity, Literal[0]) +assert_type(np.add.nin, Literal[2]) +assert_type(np.add.nout, Literal[1]) +assert_type(np.add.nargs, Literal[3]) +assert_type(np.add.signature, None) +assert_type(np.add(f8, f8), Any) +assert_type(np.add(AR_f8, f8), npt.NDArray[Any]) +assert_type(np.add.at(AR_f8, AR_i8, f8), None) +assert_type(np.add.reduce(AR_f8, axis=0), Any) +assert_type(np.add.accumulate(AR_f8), npt.NDArray[Any]) +assert_type(np.add.reduceat(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.add.outer(f8, f8), Any) +assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) -reveal_type(np.frexp.__name__) # E: Literal['frexp'] -reveal_type(np.frexp.ntypes) # E: Literal[4] -reveal_type(np.frexp.identity) # E: None -reveal_type(np.frexp.nin) # E: Literal[1] -reveal_type(np.frexp.nout) # E: Literal[2] -reveal_type(np.frexp.nargs) # E: Literal[3] -reveal_type(np.frexp.signature) # E: None -reveal_type(np.frexp(f8)) # E: tuple[Any, Any] -reveal_type(np.frexp(AR_f8)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.ntypes, Literal[4]) +assert_type(np.frexp.identity, None) +assert_type(np.frexp.nin, Literal[1]) +assert_type(np.frexp.nout, Literal[2]) +assert_type(np.frexp.nargs, Literal[3]) +assert_type(np.frexp.signature, None) +assert_type(np.frexp(f8), tuple[Any, Any]) +assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -reveal_type(np.divmod.__name__) # E: Literal['divmod'] -reveal_type(np.divmod.ntypes) # E: Literal[15] -reveal_type(np.divmod.identity) # E: None -reveal_type(np.divmod.nin) # E: Literal[2] -reveal_type(np.divmod.nout) # E: Literal[2] -reveal_type(np.divmod.nargs) # E: Literal[4] -reveal_type(np.divmod.signature) # E: None -reveal_type(np.divmod(f8, f8)) # E: tuple[Any, Any] -reveal_type(np.divmod(AR_f8, f8)) # E: tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]] +assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.ntypes, Literal[15]) +assert_type(np.divmod.identity, None) +assert_type(np.divmod.nin, Literal[2]) +assert_type(np.divmod.nout, Literal[2]) +assert_type(np.divmod.nargs, Literal[4]) +assert_type(np.divmod.signature, None) +assert_type(np.divmod(f8, f8), tuple[Any, Any]) +assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -reveal_type(np.matmul.__name__) # E: Literal['matmul'] -reveal_type(np.matmul.ntypes) # E: Literal[19] -reveal_type(np.matmul.identity) # E: None -reveal_type(np.matmul.nin) # E: Literal[2] -reveal_type(np.matmul.nout) # E: Literal[1] -reveal_type(np.matmul.nargs) # E: Literal[3] -reveal_type(np.matmul.signature) # E: Literal['(n?,k),(k,m?)->(n?,m?)'] -reveal_type(np.matmul.identity) # E: None -reveal_type(np.matmul(AR_f8, AR_f8)) # E: Any -reveal_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)])) # E: Any +assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.ntypes, Literal[19]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul.nin, Literal[2]) +assert_type(np.matmul.nout, Literal[1]) +assert_type(np.matmul.nargs, Literal[3]) +assert_type(np.matmul.signature, Literal["(n?,k),(k,m?)->(n?,m?)"]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul(AR_f8, AR_f8), Any) +assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) diff --git a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi index 19fa432f91a4..16492c2fb41b 100644 --- a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi +++ b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -1,9 +1,16 @@ +import sys + import numpy as np -reveal_type(np.ModuleDeprecationWarning()) # E: ModuleDeprecationWarning -reveal_type(np.VisibleDeprecationWarning()) # E: VisibleDeprecationWarning -reveal_type(np.ComplexWarning()) # E: ComplexWarning -reveal_type(np.RankWarning()) # E: RankWarning -reveal_type(np.TooHardError()) # E: TooHardError -reveal_type(np.AxisError("test")) # E: AxisError -reveal_type(np.AxisError(5, 1)) # E: AxisError +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_nptensions import assert_type + +assert_type(np.ModuleDeprecationWarning(), np.ModuleDeprecationWarning) +assert_type(np.VisibleDeprecationWarning(), np.VisibleDeprecationWarning) +assert_type(np.ComplexWarning(), np.ComplexWarning) +assert_type(np.RankWarning(), np.RankWarning) +assert_type(np.TooHardError(), np.TooHardError) +assert_type(np.AxisError("test"), np.AxisError) +assert_type(np.AxisError(5, 1), np.AxisError) From 8294350d5768207ed3fdeee6ab86e69295205315 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 1 Sep 2023 16:26:02 +0200 Subject: [PATCH 096/120] TYP: Refactor the typing misc-tests cases using `typing.assert_type` --- .../tests/data/misc/extended_precision.pyi | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 1e495e4f3cc4..78d8d93c6560 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,17 +1,25 @@ +import sys + import numpy as np +from numpy._typing import _80Bit, _96Bit, _128Bit, _256Bit + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type -reveal_type(np.uint128()) -reveal_type(np.uint256()) +assert_type(np.uint128(), np.unsignedinteger[_128Bit]) +assert_type(np.uint256(), np.unsignedinteger[_256Bit]) -reveal_type(np.int128()) -reveal_type(np.int256()) +assert_type(np.int128(), np.signedinteger[_128Bit]) +assert_type(np.int256(), np.signedinteger[_256Bit]) -reveal_type(np.float80()) -reveal_type(np.float96()) -reveal_type(np.float128()) -reveal_type(np.float256()) +assert_type(np.float80(), np.floating[_80Bit]) +assert_type(np.float96(), np.floating[_96Bit]) +assert_type(np.float128(), np.floating[_128Bit]) +assert_type(np.float256(), np.floating[_256Bit]) -reveal_type(np.complex160()) -reveal_type(np.complex192()) -reveal_type(np.complex256()) -reveal_type(np.complex512()) +assert_type(np.complex160(), np.complexfloating[_80Bit, _80Bit]) +assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) +assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) +assert_type(np.complex512(), np.complexfloating[_256Bit, _256Bit]) From c650ab3dc8639868a33bd342f2db64ff0430308e Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 1 Sep 2023 18:10:54 +0200 Subject: [PATCH 097/120] TYP: Overhaul the typing test suite --- numpy/typing/mypy_plugin.py | 7 +- numpy/typing/tests/data/mypy.ini | 1 + numpy/typing/tests/test_typing.py | 284 ++++++------------------------ 3 files changed, 55 insertions(+), 237 deletions(-) diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 1ffe74fa97b1..8ec9637016e3 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -75,8 +75,7 @@ def _get_precision_dict() -> dict[str, str]: def _get_extended_precision_list() -> list[str]: - extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble] - extended_names = { + extended_names = [ "uint128", "uint256", "int128", @@ -89,8 +88,8 @@ def _get_extended_precision_list() -> list[str]: "complex192", "complex256", "complex512", - } - return [i.__name__ for i in extended_types if i.__name__ in extended_names] + ] + return [i for i in extended_names if hasattr(np, i)] def _get_c_intp_name() -> str: diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index 13928c2bca3a..1cc16e03965d 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -2,3 +2,4 @@ plugins = numpy.typing.mypy_plugin show_absolute_path = True implicit_reexport = False +pretty = True diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 491431a86351..cd009bb6e7f2 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -1,22 +1,15 @@ from __future__ import annotations import importlib.util -import itertools import os import re import shutil from collections import defaultdict from collections.abc import Iterator -from typing import IO, TYPE_CHECKING +from typing import TYPE_CHECKING import pytest -import numpy as np -import numpy.typing as npt -from numpy.typing.mypy_plugin import ( - _PRECISION_DICT, - _EXTENDED_PRECISION_LIST, - _C_INTP, -) +from numpy.typing.mypy_plugin import _EXTENDED_PRECISION_LIST # Only trigger a full `mypy` run if this environment variable is set @@ -55,7 +48,7 @@ #: A dictionary with file names as keys and lists of the mypy stdout as values. #: To-be populated by `run_mypy`. -OUTPUT_MYPY: dict[str, list[str]] = {} +OUTPUT_MYPY: defaultdict[str, list[str]] = defaultdict(list) def _key_func(key: str) -> str: @@ -67,10 +60,11 @@ def _key_func(key: str) -> str: return os.path.join(drive, tail.split(":", 1)[0]) -def _strip_filename(msg: str) -> str: - """Strip the filename from a mypy message.""" +def _strip_filename(msg: str) -> tuple[int, str]: + """Strip the filename and line number from a mypy message.""" _, tail = os.path.splitdrive(msg) - return tail.split(":", 1)[-1] + _, lineno, msg = tail.split(":", 2) + return int(lineno), msg.strip() def strip_func(match: re.Match[str]) -> str: @@ -96,6 +90,7 @@ def run_mypy() -> None: ): shutil.rmtree(CACHE_DIR) + split_pattern = re.compile(r"(\s+)?\^(\~+)?") for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): # Run mypy stdout, stderr, exit_code = api.run([ @@ -109,11 +104,20 @@ def run_mypy() -> None: pytest.fail(f"Unexpected mypy standard error\n\n{stderr}") elif exit_code not in {0, 1}: pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}") - stdout = stdout.replace('*', '') - # Parse the output - iterator = itertools.groupby(stdout.split("\n"), key=_key_func) - OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k) + str_concat = "" + filename: str | None = None + for i in stdout.split("\n"): + if "note:" in i: + continue + if filename is None: + filename = _key_func(i) + + str_concat += f"{i}\n" + if split_pattern.match(i) is not None: + OUTPUT_MYPY[filename].append(str_concat) + str_concat = "" + filename = None def get_test_cases(directory: str) -> Iterator[ParameterSet]: @@ -133,7 +137,7 @@ def test_success(path) -> None: output_mypy = OUTPUT_MYPY if path in output_mypy: msg = "Unexpected mypy output\n\n" - msg += "\n".join(_strip_filename(v) for v in output_mypy[path]) + msg += "\n".join(_strip_filename(v)[1] for v in output_mypy[path]) raise AssertionError(msg) @@ -150,15 +154,9 @@ def test_fail(path: str) -> None: output_mypy = OUTPUT_MYPY assert path in output_mypy + for error_line in output_mypy[path]: - error_line = _strip_filename(error_line).split("\n", 1)[0] - match = re.match( - r"(?P\d+): (error|note): .+$", - error_line, - ) - if match is None: - raise ValueError(f"Unexpected error line format: {error_line}") - lineno = int(match.group('lineno')) + lineno, error_line = _strip_filename(error_line) errors[lineno] += f'{error_line}\n' for i, line in enumerate(lines): @@ -190,7 +188,7 @@ def test_fail(path: str) -> None: _FAIL_MSG2 = """Error mismatch at line {} Expression: {} -Expected error: {!r} +Expected error: {} Observed error: {!r} """ @@ -210,141 +208,10 @@ def _test_fail( )) -def _construct_ctypes_dict() -> dict[str, str]: - dct = { - "ubyte": "c_ubyte", - "ushort": "c_ushort", - "uintc": "c_uint", - "uint": "c_ulong", - "ulonglong": "c_ulonglong", - "byte": "c_byte", - "short": "c_short", - "intc": "c_int", - "int_": "c_long", - "longlong": "c_longlong", - "single": "c_float", - "double": "c_double", - "longdouble": "c_longdouble", - } - - # Match `ctypes` names to the first ctypes type with a given kind and - # precision, e.g. {"c_double": "c_double", "c_longdouble": "c_double"} - # if both types represent 64-bit floats. - # In this context "first" is defined by the order of `dct` - ret = {} - visited: dict[tuple[str, int], str] = {} - for np_name, ct_name in dct.items(): - np_scalar = getattr(np, np_name)() - - # Find the first `ctypes` type for a given `kind`/`itemsize` combo - key = (np_scalar.dtype.kind, np_scalar.dtype.itemsize) - ret[ct_name] = visited.setdefault(key, f"ctypes.{ct_name}") - return ret - - -def _construct_format_dict() -> dict[str, str]: - dct = {k.split(".")[-1]: v.replace("numpy", "numpy._typing") for - k, v in _PRECISION_DICT.items()} - - return { - "uint8": "numpy.unsignedinteger[numpy._typing._8Bit]", - "uint16": "numpy.unsignedinteger[numpy._typing._16Bit]", - "uint32": "numpy.unsignedinteger[numpy._typing._32Bit]", - "uint64": "numpy.unsignedinteger[numpy._typing._64Bit]", - "uint128": "numpy.unsignedinteger[numpy._typing._128Bit]", - "uint256": "numpy.unsignedinteger[numpy._typing._256Bit]", - "int8": "numpy.signedinteger[numpy._typing._8Bit]", - "int16": "numpy.signedinteger[numpy._typing._16Bit]", - "int32": "numpy.signedinteger[numpy._typing._32Bit]", - "int64": "numpy.signedinteger[numpy._typing._64Bit]", - "int128": "numpy.signedinteger[numpy._typing._128Bit]", - "int256": "numpy.signedinteger[numpy._typing._256Bit]", - "float16": "numpy.floating[numpy._typing._16Bit]", - "float32": "numpy.floating[numpy._typing._32Bit]", - "float64": "numpy.floating[numpy._typing._64Bit]", - "float80": "numpy.floating[numpy._typing._80Bit]", - "float96": "numpy.floating[numpy._typing._96Bit]", - "float128": "numpy.floating[numpy._typing._128Bit]", - "float256": "numpy.floating[numpy._typing._256Bit]", - "complex64": ("numpy.complexfloating" - "[numpy._typing._32Bit, numpy._typing._32Bit]"), - "complex128": ("numpy.complexfloating" - "[numpy._typing._64Bit, numpy._typing._64Bit]"), - "complex160": ("numpy.complexfloating" - "[numpy._typing._80Bit, numpy._typing._80Bit]"), - "complex192": ("numpy.complexfloating" - "[numpy._typing._96Bit, numpy._typing._96Bit]"), - "complex256": ("numpy.complexfloating" - "[numpy._typing._128Bit, numpy._typing._128Bit]"), - "complex512": ("numpy.complexfloating" - "[numpy._typing._256Bit, numpy._typing._256Bit]"), - - "ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]", - "ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]", - "uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]", - "uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]", - "uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]", - "ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]", - "byte": f"numpy.signedinteger[{dct['_NBitByte']}]", - "short": f"numpy.signedinteger[{dct['_NBitShort']}]", - "intc": f"numpy.signedinteger[{dct['_NBitIntC']}]", - "intp": f"numpy.signedinteger[{dct['_NBitIntP']}]", - "int_": f"numpy.signedinteger[{dct['_NBitInt']}]", - "longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]", - - "half": f"numpy.floating[{dct['_NBitHalf']}]", - "single": f"numpy.floating[{dct['_NBitSingle']}]", - "double": f"numpy.floating[{dct['_NBitDouble']}]", - "longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]", - "csingle": ("numpy.complexfloating" - f"[{dct['_NBitSingle']}, {dct['_NBitSingle']}]"), - "cdouble": ("numpy.complexfloating" - f"[{dct['_NBitDouble']}, {dct['_NBitDouble']}]"), - "clongdouble": ( - "numpy.complexfloating" - f"[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]" - ), - - # numpy.typing - "_NBitInt": dct['_NBitInt'], - - # numpy.ctypeslib - "c_intp": f"ctypes.{_C_INTP}" - } - - -#: A dictionary with all supported format keys (as keys) -#: and matching values -FORMAT_DICT: dict[str, str] = _construct_format_dict() -FORMAT_DICT.update(_construct_ctypes_dict()) - - -def _parse_reveals(file: IO[str]) -> tuple[npt.NDArray[np.str_], list[str]]: - """Extract and parse all ``" # E: "`` comments from the passed - file-like object. - - All format keys will be substituted for their respective value - from `FORMAT_DICT`, *e.g.* ``"{float64}"`` becomes - ``"numpy.floating[numpy._typing._64Bit]"``. - """ - string = file.read().replace("*", "") - - # Grab all `# E:`-based comments and matching expressions - expression_array, _, comments_array = np.char.partition( - string.split("\n"), sep=" # E: " - ).T - comments = "/n".join(comments_array) - - # Only search for the `{*}` pattern within comments, otherwise - # there is the risk of accidentally grabbing dictionaries and sets - key_set = set(re.findall(r"\{(.*?)\}", comments)) - kwargs = { - k: FORMAT_DICT.get(k, f"") for - k in key_set - } - fmt_str = comments.format(**kwargs) +_REVEAL_MSG = """Reveal mismatch at line {} - return expression_array, fmt_str.split("/n") +{} +""" @pytest.mark.slow @@ -356,53 +223,13 @@ def test_reveal(path: str) -> None: """ __tracebackhide__ = True - with open(path) as fin: - expression_array, reveal_list = _parse_reveals(fin) - output_mypy = OUTPUT_MYPY - assert path in output_mypy - for error_line in output_mypy[path]: - error_line = _strip_filename(error_line) - match = re.match( - r"(?P\d+): note: .+$", - error_line, - ) - if match is None: - raise ValueError(f"Unexpected reveal line format: {error_line}") - lineno = int(match.group('lineno')) - 1 - assert "Revealed type is" in error_line - - marker = reveal_list[lineno] - expression = expression_array[lineno] - _test_reveal(path, expression, marker, error_line, 1 + lineno) - + if path not in output_mypy: + return -_REVEAL_MSG = """Reveal mismatch at line {} - -Expression: {} -Expected reveal: {!r} -Observed reveal: {!r} -""" -_STRIP_PATTERN = re.compile(r"(\w+\.)+(\w+)") - - -def _test_reveal( - path: str, - expression: str, - reveal: str, - expected_reveal: str, - lineno: int, -) -> None: - """Error-reporting helper function for `test_reveal`.""" - stripped_reveal = _STRIP_PATTERN.sub(strip_func, reveal) - stripped_expected_reveal = _STRIP_PATTERN.sub(strip_func, expected_reveal) - if stripped_reveal not in stripped_expected_reveal: - raise AssertionError( - _REVEAL_MSG.format(lineno, - expression, - stripped_expected_reveal, - stripped_reveal) - ) + for error_line in output_mypy[path]: + lineno, error_line = _strip_filename(error_line) + raise AssertionError(_REVEAL_MSG.format(lineno, error_line)) @pytest.mark.slow @@ -424,18 +251,18 @@ def test_code_runs(path: str) -> None: LINENO_MAPPING = { - 3: "uint128", - 4: "uint256", - 6: "int128", - 7: "int256", - 9: "float80", - 10: "float96", - 11: "float128", - 12: "float256", - 14: "complex160", - 15: "complex192", - 16: "complex256", - 17: "complex512", + 11: "uint128", + 12: "uint256", + 14: "int128", + 15: "int256", + 17: "float80", + 18: "float96", + 19: "float128", + 20: "float256", + 22: "complex160", + 23: "complex192", + 24: "complex256", + 25: "complex512", } @@ -450,21 +277,12 @@ def test_extended_precision() -> None: expression_list = f.readlines() for _msg in output_mypy[path]: - *_, _lineno, msg_typ, msg = _msg.split(":") - - msg = _strip_filename(msg) - lineno = int(_lineno) + lineno, msg = _strip_filename(_msg) expression = expression_list[lineno - 1].rstrip("\n") - msg_typ = msg_typ.strip() - assert msg_typ in {"error", "note"} if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST: - if msg_typ == "error": - raise ValueError(f"Unexpected reveal line format: {lineno}") - else: - marker = FORMAT_DICT[LINENO_MAPPING[lineno]] - _test_reveal(path, expression, marker, msg, lineno) - else: - if msg_typ == "error": - marker = "Module has no attribute" - _test_fail(path, expression, marker, msg, lineno) + raise AssertionError(_REVEAL_MSG.format(lineno, msg)) + elif "error" not in msg: + _test_fail( + path, expression, msg, 'Expression is of type "Any"', lineno + ) From c141c2b605c0d64eac01a95dfbad6bf08efd5397 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 4 Sep 2023 16:28:19 +0200 Subject: [PATCH 098/120] TYP: Bump mypy to 1.5.1 --- environment.yml | 2 +- numpy/typing/tests/data/fail/lib_polynomial.pyi | 10 +++++----- numpy/typing/tests/data/reveal/arithmetic.pyi | 2 +- test_requirements.txt | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/environment.yml b/environment.yml index 63f80b745b0d..ebdb3076abc2 100644 --- a/environment.yml +++ b/environment.yml @@ -26,7 +26,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.4.1 + - mypy=1.5.1 # For building docs - sphinx>=4.5.0 - sphinx-design diff --git a/numpy/typing/tests/data/fail/lib_polynomial.pyi b/numpy/typing/tests/data/fail/lib_polynomial.pyi index ca02d7bde60d..e51b6b58e307 100644 --- a/numpy/typing/tests/data/fail/lib_polynomial.pyi +++ b/numpy/typing/tests/data/fail/lib_polynomial.pyi @@ -8,6 +8,11 @@ AR_U: npt.NDArray[np.str_] poly_obj: np.poly1d +np.polymul(AR_f8, AR_U) # E: incompatible type +np.polydiv(AR_f8, AR_U) # E: incompatible type + +5**poly_obj # E: No overload variant + np.polyint(AR_U) # E: incompatible type np.polyint(AR_f8, m=1j) # E: No overload variant @@ -22,8 +27,3 @@ np.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant np.polyval(AR_f8, AR_U) # E: incompatible type np.polyadd(AR_f8, AR_U) # E: incompatible type np.polysub(AR_f8, AR_U) # E: incompatible type -np.polymul(AR_f8, AR_U) # E: incompatible type -np.polydiv(AR_f8, AR_U) # E: incompatible type - -5**poly_obj # E: No overload variant -hash(poly_obj) diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 5725e5c4da73..6291fda6cefc 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -461,7 +461,7 @@ assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) -assert_type(u8 + u4, np.signedinteger[_32Bit | _64Bit]) +assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit]) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) diff --git a/test_requirements.txt b/test_requirements.txt index 5d52d9843432..ff1ed284e37d 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -13,7 +13,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.4.1; platform_python_implementation != "PyPy" +mypy==1.5.1; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer From 6d4d5aa6db18fdd969a447297bd8d2a050e6b8ef Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 4 Sep 2023 18:39:16 +0200 Subject: [PATCH 099/120] TYP: More `np.ctypeslib` test fixes for windows --- numpy/typing/tests/data/reveal/ctypeslib.pyi | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index f306bd9c1d0f..a9712c074c40 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -57,11 +57,10 @@ assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) -assert_type(np.ctypeslib.as_ctypes(AR_uint.take(0)), ct.c_ulong) + assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) -assert_type(np.ctypeslib.as_ctypes(AR_int.take(0)), ct.c_long) assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) @@ -85,8 +84,12 @@ if sys.platform == "win32": assert_type(np.ctypeslib.as_ctypes_type(np.uint), type[ct.c_uint]) assert_type(np.ctypeslib.as_ctypes(AR_uint), ct.Array[ct.c_uint]) assert_type(np.ctypeslib.as_ctypes(AR_int), ct.Array[ct.c_int]) + assert_type(np.ctypeslib.as_ctypes(AR_uint.take(0)), ct.c_uint) + assert_type(np.ctypeslib.as_ctypes(AR_int.take(0)), ct.c_int) else: assert_type(np.ctypeslib.as_ctypes_type(np.int_), type[ct.c_long]) assert_type(np.ctypeslib.as_ctypes_type(np.uint), type[ct.c_ulong]) assert_type(np.ctypeslib.as_ctypes(AR_uint), ct.Array[ct.c_ulong]) assert_type(np.ctypeslib.as_ctypes(AR_int), ct.Array[ct.c_long]) + assert_type(np.ctypeslib.as_ctypes(AR_uint.take(0)), ct.c_ulong) + assert_type(np.ctypeslib.as_ctypes(AR_int.take(0)), ct.c_long) From ca0e14063d6095ec200ad08bea67cf781536e6b0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 4 Sep 2023 12:49:48 -0600 Subject: [PATCH 100/120] MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 See #24636. --- .github/workflows/build_test.yml | 38 ++++++++++++------------- .github/workflows/codeql.yml | 2 +- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux_meson.yml | 2 +- .github/workflows/macos.yml | 4 +-- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 6 ++-- .github/workflows/windows_clangcl.yml | 2 +- .github/workflows/windows_meson.yml | 4 +-- 11 files changed, 33 insertions(+), 33 deletions(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index b0a24d7730a1..e570c5540415 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -31,7 +31,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -51,7 +51,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -70,7 +70,7 @@ jobs: env: EXPECT_CPU_FEATURES: "SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -85,7 +85,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -124,7 +124,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Ddisable-optimization=true" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -140,7 +140,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -156,7 +156,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C,AVX2,FMA3" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -172,7 +172,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -188,7 +188,7 @@ jobs: env: USE_DEBUG: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -205,7 +205,7 @@ jobs: env: NPY_USE_BLAS_ILP64: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -223,7 +223,7 @@ jobs: RUN_COVERAGE: 1 INSTALL_PICKLE5: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -245,7 +245,7 @@ jobs: NPY_LAPACK_ORDER: MKL,OPENBLAS,ATLAS,LAPACK USE_ASV: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -263,7 +263,7 @@ jobs: NPY_USE_BLAS_ILP64: 1 NPY_RELAXED_STRIDES_DEBUG: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -279,7 +279,7 @@ jobs: env: USE_WHEEL: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -298,7 +298,7 @@ jobs: ATLAS: None DOWNLOAD_OPENBLAS: '' steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -314,7 +314,7 @@ jobs: env: USE_SDIST: 1 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -329,7 +329,7 @@ jobs: runs-on: ubuntu-22.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -385,7 +385,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -414,7 +414,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-latest steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.4.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3b34b3a87c99..08bcf63fdb75 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 526fb5d94de8..55a0bc50a628 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -20,7 +20,7 @@ jobs: runs-on: windows-latest if: "github.repository == 'numpy/numpy'" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 1f59fba0b1ff..07127a7f6cb5 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,6 +15,6 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: 'Dependency Review' uses: actions/dependency-review-action@1360a344ccb0ab6e9475edef90ad2f46bf8003b1 # v3.0.6 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 9a13dba61f61..8e23c1bc4836 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -31,7 +31,7 @@ jobs: NODE_VERSION: 18 steps: - name: Checkout numpy - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: true # versioneer.py requires the latest tag to be reachable. Here we diff --git a/.github/workflows/linux_meson.yml b/.github/workflows/linux_meson.yml index b489c9e3f12f..7be5cb38bfc2 100644 --- a/.github/workflows/linux_meson.yml +++ b/.github/workflows/linux_meson.yml @@ -31,7 +31,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index ee445220607c..29f141b10f9d 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -26,7 +26,7 @@ jobs: python-version: ["3.11"] steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -104,7 +104,7 @@ jobs: if: "github.repository == 'numpy/numpy'" runs-on: macos-13 steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f4cf135a426e..6e9dfa0a445f 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.1.0 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: persist-credentials: false diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 53c280afbff2..53ade8a876db 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -43,7 +43,7 @@ jobs: message: ${{ steps.commit_message.outputs.message }} steps: - name: Checkout numpy - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 # Gets the correct commit message for pull request with: ref: ${{ github.event.pull_request.head.sha }} @@ -91,7 +91,7 @@ jobs: IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: true # versioneer.py requires the latest tag to be reachable. Here we @@ -171,7 +171,7 @@ jobs: # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: true # versioneer.py requires the latest tag to be reachable. Here we diff --git a/.github/workflows/windows_clangcl.yml b/.github/workflows/windows_clangcl.yml index 223d4809da89..3f3201df587b 100644 --- a/.github/workflows/windows_clangcl.yml +++ b/.github/workflows/windows_clangcl.yml @@ -23,7 +23,7 @@ jobs: if: "github.repository == 'numpy/numpy'" steps: - name: Checkout - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 diff --git a/.github/workflows/windows_meson.yml b/.github/workflows/windows_meson.yml index f6f5c932ee10..851284a20fb0 100644 --- a/.github/workflows/windows_meson.yml +++ b/.github/workflows/windows_meson.yml @@ -23,7 +23,7 @@ jobs: if: "github.repository == 'numpy/numpy'" steps: - name: Checkout - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 @@ -79,7 +79,7 @@ jobs: if: "github.repository == 'numpy/numpy'" steps: - name: Checkout - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 with: submodules: recursive fetch-depth: 0 From 4fe7ea30423be441f8a3aff92b8531eb84980539 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Tue, 5 Sep 2023 16:42:35 +0000 Subject: [PATCH 101/120] ENH: ``meson`` backend for ``f2py`` (#24532) * FIX: Import f2py2e rather than f2py for run_main * FIX: Import f2py2e instead of f2py * ENH: Add F2PY back-end work from gh-22225 Co-authored-by: NamamiShanker * ENH: Add meson skeleton from gh-2225 Co-authored-by: NamamiShanker * MAINT: Trim backend.py down to f2py2e flags * ENH: Add a factory function for backends * ENH: Add a distutils backend * ENH: Handle --backends in f2py Defaults to distutils for now * DOC: Add some minor comments in f2py2e * MAINT: Refactor and rework meson.build.src * MAINT: Add objects * MAINT: Cleanup distutils backend * MAINT: Refactor to add everything back to backend Necessary for the meson.build for now. Refactors / cleanup needs better argument handling in f2py2e * MAINT: Fix overly long line * BUG: Construct wrappers for meson backend * MAINT: Rework, simplify template massively * ENH: Truncate meson.build to skeleton only * MAINT: Minor backend housekeeping, name changes * MAINT: Less absolute paths, update setup.py [f2py] * MAINT: Move f2py module name functionality Previously part of np.distutils * ENH: Handle .pyf files * TST: Fix typo in isoFortranEnvMap.f90 * MAINT: Typo in f2py2e support for pyf files * DOC: Add release note for --backend * MAINT: Conditional switch for Python 3.12 [f2py] * MAINT: No absolute paths in backend [f2py-meson] The files are copied over anyway, this makes it easier to extend the generated skeleton * MAINT: Prettier generated meson.build files [f2py] * ENH: Add meson's dependency(blah) to f2py * DOC: Document the new flag * MAINT: Simplify and rename backend template [f2py] Co-authored-by: rgommers * ENH: Support build_type via --debug [f2py-meson] * MAINT,DOC: Reduce warn,rework doc [f2py-meson] Co-authored-by: rgommers * ENH: Rework deps: to --dep calls [f2py-meson] Also shows how incremental updates to the parser can be done. * MAINT,DOC: Add --backend to argparse, add docs * MAINT: Rename meson template [f2py-meson] * MAINT: Add meson.build for f2py Should address https://github.com/numpy/numpy/pull/22225#issuecomment-1697208937 * BLD: remove duplicate f2py handling in meson.build files --------- Co-authored-by: Namami Shanker Co-authored-by: NamamiShanker Co-authored-by: rgommers Co-authored-by: Ralf Gommers --- .../upcoming_changes/24532.new_feature.rst | 12 ++ numpy/distutils/command/build_src.py | 8 +- numpy/f2py/_backends/__init__.py | 9 + numpy/f2py/_backends/_backend.py | 46 +++++ numpy/f2py/_backends/_distutils.py | 75 +++++++++ numpy/f2py/_backends/_meson.py | 157 ++++++++++++++++++ numpy/f2py/_backends/meson.build.template | 42 +++++ numpy/f2py/auxfuncs.py | 20 ++- numpy/f2py/f2py2e.py | 134 +++++++++------ numpy/f2py/setup.py | 5 +- .../tests/src/f2cmap/isoFortranEnvMap.f90 | 2 +- 11 files changed, 451 insertions(+), 59 deletions(-) create mode 100644 doc/release/upcoming_changes/24532.new_feature.rst create mode 100644 numpy/f2py/_backends/__init__.py create mode 100644 numpy/f2py/_backends/_backend.py create mode 100644 numpy/f2py/_backends/_distutils.py create mode 100644 numpy/f2py/_backends/_meson.py create mode 100644 numpy/f2py/_backends/meson.build.template diff --git a/doc/release/upcoming_changes/24532.new_feature.rst b/doc/release/upcoming_changes/24532.new_feature.rst new file mode 100644 index 000000000000..504b1d431cff --- /dev/null +++ b/doc/release/upcoming_changes/24532.new_feature.rst @@ -0,0 +1,12 @@ +``meson`` backend for ``f2py`` +------------------------------ +``f2py`` in compile mode (i.e. ``f2py -c``) now accepts the ``--backend meson`` option. This is the default option +for Python ``3.12`` on-wards. Older versions will still default to ``--backend +distutils``. + +To support this in realistic use-cases, in compile mode ``f2py`` takes a +``--dep`` flag one or many times which maps to ``dependency()`` calls in the +``meson`` backend, and does nothing in the ``distutils`` backend. + + +There are no changes for users of ``f2py`` only as a code generator, i.e. without ``-c``. diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index bf3d03c70e44..7303db124cc8 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -539,8 +539,8 @@ def f2py_sources(self, sources, extension): if (self.force or newer_group(depends, target_file, 'newer')) \ and not skip_f2py: log.info("f2py: %s" % (source)) - import numpy.f2py - numpy.f2py.run_main(f2py_options + from numpy.f2py import f2py2e + f2py2e.run_main(f2py_options + ['--build-dir', target_dir, source]) else: log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) @@ -558,8 +558,8 @@ def f2py_sources(self, sources, extension): and not skip_f2py: log.info("f2py:> %s" % (target_file)) self.mkpath(target_dir) - import numpy.f2py - numpy.f2py.run_main(f2py_options + ['--lower', + from numpy.f2py import f2py2e + f2py2e.run_main(f2py_options + ['--lower', '--build-dir', target_dir]+\ ['-m', ext_name]+f_sources) else: diff --git a/numpy/f2py/_backends/__init__.py b/numpy/f2py/_backends/__init__.py new file mode 100644 index 000000000000..e91393c14be3 --- /dev/null +++ b/numpy/f2py/_backends/__init__.py @@ -0,0 +1,9 @@ +def f2py_build_generator(name): + if name == "meson": + from ._meson import MesonBackend + return MesonBackend + elif name == "distutils": + from ._distutils import DistutilsBackend + return DistutilsBackend + else: + raise ValueError(f"Unknown backend: {name}") diff --git a/numpy/f2py/_backends/_backend.py b/numpy/f2py/_backends/_backend.py new file mode 100644 index 000000000000..a7d43d2587b2 --- /dev/null +++ b/numpy/f2py/_backends/_backend.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod + + +class Backend(ABC): + def __init__( + self, + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + extra_dat, + ): + self.modulename = modulename + self.sources = sources + self.extra_objects = extra_objects + self.build_dir = build_dir + self.include_dirs = include_dirs + self.library_dirs = library_dirs + self.libraries = libraries + self.define_macros = define_macros + self.undef_macros = undef_macros + self.f2py_flags = f2py_flags + self.sysinfo_flags = sysinfo_flags + self.fc_flags = fc_flags + self.flib_flags = flib_flags + self.setup_flags = setup_flags + self.remove_build_dir = remove_build_dir + self.extra_dat = extra_dat + + @abstractmethod + def compile(self) -> None: + """Compile the wrapper.""" + pass diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py new file mode 100644 index 000000000000..e548fc543010 --- /dev/null +++ b/numpy/f2py/_backends/_distutils.py @@ -0,0 +1,75 @@ +from ._backend import Backend + +from numpy.distutils.core import setup, Extension +from numpy.distutils.system_info import get_info +from numpy.distutils.misc_util import dict_append +from numpy.exceptions import VisibleDeprecationWarning +import os +import sys +import shutil +import warnings + + +class DistutilsBackend(Backend): + def __init__(sef, *args, **kwargs): + warnings.warn( + "distutils has been deprecated since NumPy 1.26." + "Use the Meson backend instead, or generate wrappers" + "without -c and use a custom build script", + VisibleDeprecationWarning, + stacklevel=2, + ) + super().__init__(*args, **kwargs) + + def compile(self): + num_info = {} + if num_info: + self.include_dirs.extend(num_info.get("include_dirs", [])) + ext_args = { + "name": self.modulename, + "sources": self.sources, + "include_dirs": self.include_dirs, + "library_dirs": self.library_dirs, + "libraries": self.libraries, + "define_macros": self.define_macros, + "undef_macros": self.undef_macros, + "extra_objects": self.extra_objects, + "f2py_options": self.f2py_flags, + } + + if self.sysinfo_flags: + for n in self.sysinfo_flags: + i = get_info(n) + if not i: + print( + f"No {repr(n)} resources found" + "in system (try `f2py --help-link`)" + ) + dict_append(ext_args, **i) + + ext = Extension(**ext_args) + + sys.argv = [sys.argv[0]] + self.setup_flags + sys.argv.extend( + [ + "build", + "--build-temp", + self.build_dir, + "--build-base", + self.build_dir, + "--build-platlib", + ".", + "--disable-optimization", + ] + ) + + if self.fc_flags: + sys.argv.extend(["config_fc"] + self.fc_flags) + if self.flib_flags: + sys.argv.extend(["build_ext"] + self.flib_flags) + + setup(ext_modules=[ext]) + + if self.remove_build_dir and os.path.exists(self.build_dir): + print(f"Removing build directory {self.build_dir}") + shutil.rmtree(self.build_dir) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py new file mode 100644 index 000000000000..3176a5e08f30 --- /dev/null +++ b/numpy/f2py/_backends/_meson.py @@ -0,0 +1,157 @@ +from __future__ import annotations + +import errno +import shutil +import subprocess +from pathlib import Path + +from ._backend import Backend +from string import Template + +import warnings + + +class MesonTemplate: + """Template meson build file generation class.""" + + def __init__( + self, + modulename: str, + sources: list[Path], + deps: list[str], + object_files: list[Path], + linker_args: list[str], + c_args: list[str], + build_type: str, + ): + self.modulename = modulename + self.build_template_path = ( + Path(__file__).parent.absolute() / "meson.build.template" + ) + self.sources = sources + self.deps = deps + self.substitutions = {} + self.objects = object_files + self.pipeline = [ + self.initialize_template, + self.sources_substitution, + self.deps_substitution, + ] + self.build_type = build_type + + def meson_build_template(self) -> str: + if not self.build_template_path.is_file(): + raise FileNotFoundError( + errno.ENOENT, + "Meson build template" + f" {self.build_template_path.absolute()}" + " does not exist.", + ) + return self.build_template_path.read_text() + + def initialize_template(self) -> None: + self.substitutions["modulename"] = self.modulename + self.substitutions["buildtype"] = self.build_type + + def sources_substitution(self) -> None: + indent = " " * 21 + self.substitutions["source_list"] = f",\n{indent}".join( + [f"'{source}'" for source in self.sources] + ) + + def deps_substitution(self) -> None: + indent = " " * 21 + self.substitutions["dep_list"] = f",\n{indent}".join( + [f"dependency('{dep}')" for dep in self.deps] + ) + + def generate_meson_build(self): + for node in self.pipeline: + node() + template = Template(self.meson_build_template()) + return template.substitute(self.substitutions) + + +class MesonBackend(Backend): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dependencies = self.extra_dat.get("dependencies", []) + self.meson_build_dir = "bbdir" + self.build_type = ( + "debug" if any("debug" in flag for flag in self.fc_flags) else "release" + ) + + def _move_exec_to_root(self, build_dir: Path): + walk_dir = Path(build_dir) / self.meson_build_dir + path_objects = walk_dir.glob(f"{self.modulename}*.so") + for path_object in path_objects: + shutil.move(path_object, Path.cwd()) + + def _get_build_command(self): + return [ + "meson", + "setup", + self.meson_build_dir, + ] + + def write_meson_build(self, build_dir: Path) -> None: + """Writes the meson build file at specified location""" + meson_template = MesonTemplate( + self.modulename, + self.sources, + self.dependencies, + self.extra_objects, + self.flib_flags, + self.fc_flags, + self.build_type, + ) + src = meson_template.generate_meson_build() + Path(build_dir).mkdir(parents=True, exist_ok=True) + meson_build_file = Path(build_dir) / "meson.build" + meson_build_file.write_text(src) + return meson_build_file + + def run_meson(self, build_dir: Path): + completed_process = subprocess.run(self._get_build_command(), cwd=build_dir) + if completed_process.returncode != 0: + raise subprocess.CalledProcessError( + completed_process.returncode, completed_process.args + ) + completed_process = subprocess.run( + ["meson", "compile", "-C", self.meson_build_dir], cwd=build_dir + ) + if completed_process.returncode != 0: + raise subprocess.CalledProcessError( + completed_process.returncode, completed_process.args + ) + + def compile(self) -> None: + self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + self.write_meson_build(self.build_dir) + self.run_meson(self.build_dir) + self._move_exec_to_root(self.build_dir) + + +def _prepare_sources(mname, sources, bdir): + extended_sources = sources.copy() + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy sources + for source in sources: + shutil.copy(source, bdir) + generated_sources = [ + Path(f"{mname}module.c"), + Path(f"{mname}-f2pywrappers2.f90"), + Path(f"{mname}-f2pywrappers.f"), + ] + bdir = Path(bdir) + for generated_source in generated_sources: + if generated_source.exists(): + shutil.copy(generated_source, bdir / generated_source.name) + extended_sources.append(generated_source.name) + generated_source.unlink() + extended_sources = [ + Path(source).name + for source in extended_sources + if not Path(source).suffix == ".pyf" + ] + return extended_sources diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template new file mode 100644 index 000000000000..545e3995218a --- /dev/null +++ b/numpy/f2py/_backends/meson.build.template @@ -0,0 +1,42 @@ +project('${modulename}', + ['c', 'fortran'], + version : '0.1', + meson_version: '>= 1.1.0', + default_options : [ + 'warning_level=1', + 'buildtype=${buildtype}' + ]) + +py = import('python').find_installation(pure: false) +py_dep = py.dependency() + +incdir_numpy = run_command(py, + ['-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'], + check : true +).stdout().strip() + +incdir_f2py = run_command(py, + ['-c', 'import os; os.chdir(".."); import numpy.f2py; print(numpy.f2py.get_include())'], + check : true +).stdout().strip() + +inc_np = include_directories(incdir_numpy) +np_dep = declare_dependency(include_directories: inc_np) + +incdir_f2py = incdir_numpy / '..' / '..' / 'f2py' / 'src' +inc_f2py = include_directories(incdir_f2py) +fortranobject_c = incdir_f2py / 'fortranobject.c' + +inc_np = include_directories(incdir_numpy, incdir_f2py) + +py.extension_module('${modulename}', + [ +${source_list}, + fortranobject_c + ], + include_directories: [inc_np], + dependencies : [ + py_dep, +${dep_list} + ], + install : true) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index c0864b5bc613..535e324286bd 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -16,6 +16,7 @@ """ import pprint import sys +import re import types from functools import reduce from copy import deepcopy @@ -43,7 +44,7 @@ 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', - 'issubroutine', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', @@ -912,3 +913,20 @@ def deep_merge(dict1, dict2): else: merged_dict[key] = value return merged_dict + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 10508488dc04..1cfe8cddd68c 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -19,6 +19,8 @@ import pprint import re from pathlib import Path +from itertools import dropwhile +import argparse from . import crackfortran from . import rules @@ -28,6 +30,7 @@ from . import f90mod_rules from . import __version__ from . import capi_maps +from numpy.f2py._backends import f2py_build_generator f2py_version = __version__.version numpy_version = __version__.version @@ -126,7 +129,7 @@ -v Print f2py version ID and exit. -numpy.distutils options (only effective with -c): +build backend options (only effective with -c): --fcompiler= Specify Fortran compiler type by vendor --compiler= Specify C compiler type (as defined by distutils) @@ -142,6 +145,22 @@ --noarch Compile without arch-dependent optimization --debug Compile with debugging information + --dep + Specify a meson dependency for the module. This may + be passed multiple times for multiple dependencies. + Dependencies are stored in a list for further processing. + + Example: --dep lapack --dep scalapack + This will identify "lapack" and "scalapack" as dependencies + and remove them from argv, leaving a dependencies list + containing ["lapack", "scalapack"]. + + --backend + Specify the build backend for the compilation process. + The supported backends are 'meson' and 'distutils'. + If not specified, defaults to 'distutils'. On + Python 3.12 or higher, the default is 'meson'. + Extra options (only effective with -c): --link- Link extension module with as defined @@ -251,6 +270,8 @@ def scaninputline(inputline): 'f2py option --include_paths is deprecated, use --include-paths instead.\n') f7 = 1 elif l[:15] in '--include-paths': + # Similar to using -I with -c, however this is + # also used during generation of wrappers f7 = 1 elif l == '--skip-empty-wrappers': emptygen = False @@ -501,6 +522,25 @@ def get_prefix(module): p = os.path.dirname(os.path.dirname(module.__file__)) return p +def preparse_sysargv(): + # To keep backwards bug compatibility, newer flags are handled by argparse, + # and `sys.argv` is passed to the rest of `f2py` as is. + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--dep", action="append", dest="dependencies") + parser.add_argument("--backend", choices=['meson', 'distutils'], default='distutils') + + args, remaining_argv = parser.parse_known_args() + sys.argv = [sys.argv[0]] + remaining_argv + + backend_key = args.backend + if sys.version_info >= (3, 12) and backend_key == 'distutils': + outmess('Cannot use distutils backend with Python 3.12, using meson backend instead.') + backend_key = 'meson' + + return { + "dependencies": args.dependencies or [], + "backend": backend_key + } def run_compile(): """ @@ -508,6 +548,13 @@ def run_compile(): """ import tempfile + # Collect dependency flags, preprocess sys.argv + argy = preparse_sysargv() + dependencies = argy["dependencies"] + backend_key = argy["backend"] + build_backend = f2py_build_generator(backend_key) + + i = sys.argv.index('-c') del sys.argv[i] @@ -546,7 +593,6 @@ def run_compile(): if f2py_flags2 and f2py_flags2[-1] != ':': f2py_flags2.append(':') f2py_flags.extend(f2py_flags2) - sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] _reg3 = re.compile( r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') @@ -598,17 +644,17 @@ def run_compile(): del sys.argv[i + 1], sys.argv[i] sources = sys.argv[1:] + pyf_files = [] if '-m' in sys.argv: i = sys.argv.index('-m') modulename = sys.argv[i + 1] del sys.argv[i + 1], sys.argv[i] sources = sys.argv[1:] else: - from numpy.distutils.command.build_src import get_f2py_modulename - pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) - sources = pyf_files + sources + pyf_files, _sources = filter_files('', '[.]pyf([.]src|)', sources) + sources = pyf_files + _sources for f in pyf_files: - modulename = get_f2py_modulename(f) + modulename = auxfuncs.get_f2py_modulename(f) if modulename: break @@ -627,52 +673,36 @@ def run_compile(): else: print('Invalid use of -D:', name_value) - from numpy.distutils.system_info import get_info - - num_info = {} - if num_info: - include_dirs.extend(num_info.get('include_dirs', [])) - - from numpy.distutils.core import setup, Extension - ext_args = {'name': modulename, 'sources': sources, - 'include_dirs': include_dirs, - 'library_dirs': library_dirs, - 'libraries': libraries, - 'define_macros': define_macros, - 'undef_macros': undef_macros, - 'extra_objects': extra_objects, - 'f2py_options': f2py_flags, - } - - if sysinfo_flags: - from numpy.distutils.misc_util import dict_append - for n in sysinfo_flags: - i = get_info(n) - if not i: - outmess('No %s resources found in system' - ' (try `f2py --help-link`)\n' % (repr(n))) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - sys.argv = [sys.argv[0]] + setup_flags - sys.argv.extend(['build', - '--build-temp', build_dir, - '--build-base', build_dir, - '--build-platlib', '.', - # disable CCompilerOpt - '--disable-optimization']) - if fc_flags: - sys.argv.extend(['config_fc'] + fc_flags) - if flib_flags: - sys.argv.extend(['build_ext'] + flib_flags) - - setup(ext_modules=[ext]) - - if remove_build_dir and os.path.exists(build_dir): - import shutil - outmess('Removing build directory %s\n' % (build_dir)) - shutil.rmtree(build_dir) - + # Construct wrappers / signatures / things + if backend_key == 'meson': + outmess('Using meson backend\nWill pass --lower to f2py\nSee https://numpy.org/doc/stable/f2py/buildtools/meson.html') + f2py_flags.append('--lower') + if pyf_files: + run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) + else: + run_main(f" {' '.join(f2py_flags)} -m {modulename} {' '.join(sources)}".split()) + + # Now use the builder + builder = build_backend( + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + {"dependencies": dependencies}, + ) + + builder.compile() def main(): if '--help-link' in sys.argv[1:]: diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py index 499609f96600..98f1e9aaae84 100644 --- a/numpy/f2py/setup.py +++ b/numpy/f2py/setup.py @@ -26,10 +26,13 @@ def configuration(parent_package='', top_path=None): config = Configuration('f2py', parent_package, top_path) config.add_subpackage('tests') + config.add_subpackage('_backends') config.add_data_dir('tests/src') config.add_data_files( 'src/fortranobject.c', - 'src/fortranobject.h') + 'src/fortranobject.h', + 'backends/meson.build.template', + ) config.add_data_files('*.pyi') return config diff --git a/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 b/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 index 3f0e12c76833..1e1dc1d4054b 100644 --- a/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 +++ b/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 @@ -4,6 +4,6 @@ subroutine func1(n, x, res) integer(int64), intent(in) :: n real(real64), intent(in) :: x(n) real(real64), intent(out) :: res -Cf2py intent(hide) :: n +!f2py intent(hide) :: n res = sum(x) end From 9e8a7a8cc2ca26647c7a14efac78b865f7299690 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sun, 20 Aug 2023 03:26:00 +0400 Subject: [PATCH 102/120] SIMD: Refactor partial load Workaround for Clang Clang exhibits aggressive optimization behavior when the `-ftrapping-math` flag is not fully supported, starting from -O1 optimization level. When partially loading a vector register for operations that require filling up the remaining lanes with specific values (e.g., divide operations needing non-zero integers to prevent FP exception divide-by-zero), Clang's optimizer recognizes that the full register is unnecessary for the store operation. Consequently, it optimizes out the fill step involving non-zero integers for the remaining elements. As a solution, we apply the `volatile` keyword to the returned register, followed by a symmetric operand operation like `or`, to inform the compiler about the necessity of the full vector. This refactor involves transferring this workaround from the source files to the universal intrinsic headers, also to guarantee that it is applied by all kernels. Furthermore, the workaround is disabled when the `-ftrapping-math` flag is fully supported by the Clang compiler. This patch also enables `-ftrapping-math` flag for clang-cl and suppress floating point exceptions warnings. --- meson.build | 32 ++++- numpy/core/meson.build | 8 +- numpy/core/src/common/simd/avx2/memory.h | 97 +++++++++++---- numpy/core/src/common/simd/avx512/memory.h | 77 ++++++++++-- numpy/core/src/common/simd/neon/memory.h | 52 +++++++-- numpy/core/src/common/simd/simd.h | 42 +++++-- numpy/core/src/common/simd/sse/memory.h | 110 +++++++----------- numpy/core/src/common/simd/vec/memory.h | 51 ++++++-- .../src/umath/loops_arithm_fp.dispatch.c.src | 101 +--------------- .../src/umath/loops_unary_fp.dispatch.c.src | 64 ---------- 10 files changed, 332 insertions(+), 302 deletions(-) diff --git a/meson.build b/meson.build index 0469f7f4590b..9ba86f393d6d 100644 --- a/meson.build +++ b/meson.build @@ -55,11 +55,33 @@ add_project_arguments( # # Clang defaults to a non-strict floating error point model, but we need strict # behavior. `-ftrapping-math` is equivalent to `-ffp-exception-behavior=strict`. -# Note that this is only supported on macOS arm64 as of XCode 14.3 -if cc.get_id() == 'clang' - add_project_arguments( - cc.get_supported_arguments('-ftrapping-math'), language: ['c', 'cpp'], - ) +# This flag is also required to prevent the activation of SIMD partial load workarounds. +# For further clarification, refer to gh-24461. +cc_id = cc.get_id() +if cc_id.startswith('clang') + # Determine the compiler flags for trapping math exceptions. + trapping_math = { + 'clang-cl': '/clang:-ftrapping-math', + }.get(cc_id, '-ftrapping-math') + # Check if the compiler supports the trapping math flag. + if cc.has_argument(trapping_math) + # TODO: Consider upgrading the vendored Meson to 1.3.0 to support the parameter `werror` + # Detect whether the compiler actually supports strict handling of floating-point exceptions + # by treating warnings as errors. + if cc.compiles('int main() { return 0; }', args: [trapping_math, '-Werror']) + trapping_math = [trapping_math, '-DNPY_HAVE_CLANG_FPSTRICT'] + else + # Suppress warnings about unsupported floating-point optimization. + trapping_math = [trapping_math, '-Wno-unsupported-floating-point-opt'] + # Inform the user about the workaround. + message( + 'NumPy is being built against a version of Clang that does not strictly enforce ' + + 'floating-point exception handling. Workarounds will be used, which may impact performance.\n' + + 'Consider upgrading Clang to the latest version.' + ) + endif + add_project_arguments(trapping_math, language: ['c', 'cpp']) + endif endif subdir('meson_cpu') diff --git a/numpy/core/meson.build b/numpy/core/meson.build index ccc060aacb96..d32bb7406d33 100644 --- a/numpy/core/meson.build +++ b/numpy/core/meson.build @@ -838,9 +838,7 @@ foreach gen_mtargets : [ [ 'loops_exponent_log.dispatch.h', src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), - # Enabling SIMD on clang-cl raises spurious FP exceptions - # TODO (seiko2plus): debug spurious FP exceptions for single-precision log/exp - compiler_id == 'clang-cl' ? [] : [ + [ AVX512_SKX, AVX512F, [AVX2, FMA3] ] ], @@ -884,9 +882,7 @@ foreach gen_mtargets : [ [ 'loops_trigonometric.dispatch.h', src_file.process('src/umath/loops_trigonometric.dispatch.c.src'), - # Enabling SIMD on clang-cl raises spurious FP exceptions - # TODO (seiko2plus): debug spurious FP exceptions for single-precision sin/cos - compiler_id == 'clang-cl' ? [] : [ + [ AVX512F, [AVX2, FMA3], VSX4, VSX3, VSX2, NEON_VFPV4, diff --git a/numpy/core/src/common/simd/avx2/memory.h b/numpy/core/src/common/simd/avx2/memory.h index 993d3ba0d06c..f18636538174 100644 --- a/numpy/core/src/common/simd/avx2/memory.h +++ b/numpy/core/src/common/simd/avx2/memory.h @@ -196,7 +196,12 @@ NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, n __m256i vnlane = _mm256_set1_epi32(nlane > 8 ? 8 : (int)nlane); __m256i mask = _mm256_cmpgt_epi32(vnlane, steps); __m256i payload = _mm256_maskload_epi32((const int*)ptr, mask); - return _mm256_blendv_epi8(vfill, payload, mask); + __m256i ret = _mm256_blendv_epi8(vfill, payload, mask); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) @@ -205,7 +210,12 @@ NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) const __m256i steps = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7); __m256i vnlane = _mm256_set1_epi32(nlane > 8 ? 8 : (int)nlane); __m256i mask = _mm256_cmpgt_epi32(vnlane, steps); - return _mm256_maskload_epi32((const int*)ptr, mask); + __m256i ret = _mm256_maskload_epi32((const int*)ptr, mask); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } //// 64 NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, npy_int64 fill) @@ -216,7 +226,12 @@ NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, n __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); __m256i payload = _mm256_maskload_epi64((const long long*)ptr, mask); - return _mm256_blendv_epi8(vfill, payload, mask); + __m256i ret = _mm256_blendv_epi8(vfill, payload, mask); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s64 npyv_load_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) @@ -225,7 +240,12 @@ NPY_FINLINE npyv_s64 npyv_load_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) const __m256i steps = npyv_set_s64(0, 1, 2, 3); __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); - return _mm256_maskload_epi64((const long long*)ptr, mask); + __m256i ret = _mm256_maskload_epi64((const long long*)ptr, mask); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } //// 64-bit nlane @@ -241,7 +261,12 @@ NPY_FINLINE npyv_s32 npyv_load2_till_s32(const npy_int32 *ptr, npy_uintp nlane, __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); __m256i payload = _mm256_maskload_epi64((const long long*)ptr, mask); - return _mm256_blendv_epi8(vfill, payload, mask); + __m256i ret = _mm256_blendv_epi8(vfill, payload, mask); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_load2_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) @@ -251,19 +276,29 @@ NPY_FINLINE npyv_s32 npyv_load2_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) NPY_FINLINE npyv_u64 npyv_load2_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) { assert(nlane > 0); - npy_int64 m = -((npy_int64)(nlane > 1)); + npy_int64 m = -((npy_int64)(nlane > 1)); __m256i mask = npyv_set_s64(-1, -1, m, m); - return _mm256_maskload_epi64((const long long*)ptr, mask); + __m256i ret = _mm256_maskload_epi64((const long long*)ptr, mask); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_u64 npyv_load2_till_s64(const npy_int64 *ptr, npy_uintp nlane, npy_int64 fill_lo, npy_int64 fill_hi) { const __m256i vfill = npyv_set_s64(0, 0, fill_lo, fill_hi); - npy_int64 m = -((npy_int64)(nlane > 1)); - __m256i mask = npyv_set_s64(-1, -1, m, m); + npy_int64 m = -((npy_int64)(nlane > 1)); + __m256i mask = npyv_set_s64(-1, -1, m, m); __m256i payload = _mm256_maskload_epi64((const long long*)ptr, mask); - return _mm256_blendv_epi8(vfill, payload, mask); + __m256i ret =_mm256_blendv_epi8(vfill, payload, mask); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } /********************************* * Non-contiguous partial load @@ -277,9 +312,14 @@ npyv_loadn_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npy_ const __m256i vfill = _mm256_set1_epi32(fill); const __m256i steps = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7); const __m256i idx = _mm256_mullo_epi32(_mm256_set1_epi32((int)stride), steps); - __m256i vnlane = _mm256_set1_epi32(nlane > 8 ? 8 : (int)nlane); - __m256i mask = _mm256_cmpgt_epi32(vnlane, steps); - return _mm256_mask_i32gather_epi32(vfill, (const int*)ptr, idx, mask, 4); + __m256i vnlane = _mm256_set1_epi32(nlane > 8 ? 8 : (int)nlane); + __m256i mask = _mm256_cmpgt_epi32(vnlane, steps); + __m256i ret = _mm256_mask_i32gather_epi32(vfill, (const int*)ptr, idx, mask, 4); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 @@ -293,9 +333,14 @@ npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_ const __m256i vfill = npyv_setall_s64(fill); const __m256i idx = npyv_set_s64(0, 1*stride, 2*stride, 3*stride); const __m256i steps = npyv_set_s64(0, 1, 2, 3); - __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); - __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); - return _mm256_mask_i64gather_epi64(vfill, (const long long*)ptr, idx, mask, 8); + __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); + __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); + __m256i ret = _mm256_mask_i64gather_epi64(vfill, (const long long*)ptr, idx, mask, 8); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s64 @@ -313,9 +358,14 @@ NPY_FINLINE npyv_s64 npyv_loadn2_till_s32(const npy_int32 *ptr, npy_intp stride, ); const __m256i idx = npyv_set_s64(0, 1*stride, 2*stride, 3*stride); const __m256i steps = npyv_set_s64(0, 1, 2, 3); - __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); - __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); - return _mm256_mask_i64gather_epi64(vfill, (const long long*)ptr, idx, mask, 4); + __m256i vnlane = npyv_setall_s64(nlane > 4 ? 4 : (int)nlane); + __m256i mask = _mm256_cmpgt_epi64(vnlane, steps); + __m256i ret = _mm256_mask_i64gather_epi64(vfill, (const long long*)ptr, idx, mask, 4); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_loadn2_tillz_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane) @@ -323,7 +373,7 @@ NPY_FINLINE npyv_s32 npyv_loadn2_tillz_s32(const npy_int32 *ptr, npy_intp stride //// 128-bit load over 64-bit stride NPY_FINLINE npyv_s64 npyv_loadn2_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, - npy_int64 fill_lo, npy_int64 fill_hi) + npy_int64 fill_lo, npy_int64 fill_hi) { assert(nlane > 0); __m256i a = npyv_loadl_s64(ptr); @@ -336,7 +386,12 @@ NPY_FINLINE npyv_s64 npyv_loadn2_till_s64(const npy_int64 *ptr, npy_intp stride, __m128i fill = _mm_set_epi64x(fill_hi, fill_lo); #endif __m128i b = nlane > 1 ? _mm_loadu_si128((const __m128i*)(ptr + stride)) : fill; - return _mm256_inserti128_si256(a, b, 1); + __m256i ret = _mm256_inserti128_si256(a, b, 1); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m256i workaround = ret; + ret = _mm256_or_si256(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s64 npyv_loadn2_tillz_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane) diff --git a/numpy/core/src/common/simd/avx512/memory.h b/numpy/core/src/common/simd/avx512/memory.h index fdf96a92c583..e981ef8f6dd1 100644 --- a/numpy/core/src/common/simd/avx512/memory.h +++ b/numpy/core/src/common/simd/avx512/memory.h @@ -248,14 +248,24 @@ NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, n assert(nlane > 0); const __m512i vfill = _mm512_set1_epi32(fill); const __mmask16 mask = nlane > 15 ? -1 : (1 << nlane) - 1; - return _mm512_mask_loadu_epi32(vfill, mask, (const __m512i*)ptr); + __m512i ret = _mm512_mask_loadu_epi32(vfill, mask, (const __m512i*)ptr); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) { assert(nlane > 0); const __mmask16 mask = nlane > 15 ? -1 : (1 << nlane) - 1; - return _mm512_maskz_loadu_epi32(mask, (const __m512i*)ptr); + __m512i ret = _mm512_maskz_loadu_epi32(mask, (const __m512i*)ptr); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } //// 64 NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, npy_int64 fill) @@ -263,14 +273,24 @@ NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, n assert(nlane > 0); const __m512i vfill = npyv_setall_s64(fill); const __mmask8 mask = nlane > 7 ? -1 : (1 << nlane) - 1; - return _mm512_mask_loadu_epi64(vfill, mask, (const __m512i*)ptr); + __m512i ret = _mm512_mask_loadu_epi64(vfill, mask, (const __m512i*)ptr); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s64 npyv_load_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) { assert(nlane > 0); const __mmask8 mask = nlane > 7 ? -1 : (1 << nlane) - 1; - return _mm512_maskz_loadu_epi64(mask, (const __m512i*)ptr); + __m512i ret = _mm512_maskz_loadu_epi64(mask, (const __m512i*)ptr); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } //// 64-bit nlane @@ -280,7 +300,12 @@ NPY_FINLINE npyv_s32 npyv_load2_till_s32(const npy_int32 *ptr, npy_uintp nlane, assert(nlane > 0); const __m512i vfill = _mm512_set4_epi32(fill_hi, fill_lo, fill_hi, fill_lo); const __mmask8 mask = nlane > 7 ? -1 : (1 << nlane) - 1; - return _mm512_mask_loadu_epi64(vfill, mask, (const __m512i*)ptr); + __m512i ret = _mm512_mask_loadu_epi64(vfill, mask, (const __m512i*)ptr); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_load2_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) @@ -293,14 +318,24 @@ NPY_FINLINE npyv_u64 npyv_load2_till_s64(const npy_int64 *ptr, npy_uintp nlane, assert(nlane > 0); const __m512i vfill = _mm512_set4_epi64(fill_hi, fill_lo, fill_hi, fill_lo); const __mmask8 mask = nlane > 3 ? -1 : (1 << (nlane*2)) - 1; - return _mm512_mask_loadu_epi64(vfill, mask, (const __m512i*)ptr); + __m512i ret = _mm512_mask_loadu_epi64(vfill, mask, (const __m512i*)ptr); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s64 npyv_load2_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) { assert(nlane > 0); const __mmask8 mask = nlane > 3 ? -1 : (1 << (nlane*2)) - 1; - return _mm512_maskz_loadu_epi64(mask, (const __m512i*)ptr); + __m512i ret = _mm512_maskz_loadu_epi64(mask, (const __m512i*)ptr); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } /********************************* * Non-contiguous partial load @@ -317,7 +352,12 @@ npyv_loadn_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npy_ const __m512i idx = _mm512_mullo_epi32(steps, _mm512_set1_epi32((int)stride)); const __m512i vfill = _mm512_set1_epi32(fill); const __mmask16 mask = nlane > 15 ? -1 : (1 << nlane) - 1; - return _mm512_mask_i32gather_epi32(vfill, mask, idx, (const __m512i*)ptr, 4); + __m512i ret = _mm512_mask_i32gather_epi32(vfill, mask, idx, (const __m512i*)ptr, 4); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 @@ -334,7 +374,12 @@ npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_ ); const __m512i vfill = npyv_setall_s64(fill); const __mmask8 mask = nlane > 15 ? -1 : (1 << nlane) - 1; - return _mm512_mask_i64gather_epi64(vfill, mask, idx, (const __m512i*)ptr, 8); + __m512i ret = _mm512_mask_i64gather_epi64(vfill, mask, idx, (const __m512i*)ptr, 8); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s64 @@ -352,7 +397,12 @@ NPY_FINLINE npyv_s64 npyv_loadn2_till_s32(const npy_int32 *ptr, npy_intp stride, ); const __m512i vfill = _mm512_set4_epi32(fill_hi, fill_lo, fill_hi, fill_lo); const __mmask8 mask = nlane > 7 ? -1 : (1 << nlane) - 1; - return _mm512_mask_i64gather_epi64(vfill, mask, idx, (const __m512i*)ptr, 4); + __m512i ret = _mm512_mask_i64gather_epi64(vfill, mask, idx, (const __m512i*)ptr, 4); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_loadn2_tillz_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane) @@ -369,7 +419,12 @@ NPY_FINLINE npyv_s64 npyv_loadn2_till_s64(const npy_int64 *ptr, npy_intp stride, ); const __mmask8 mask = nlane > 3 ? -1 : (1 << (nlane*2)) - 1; const __m512i vfill = _mm512_set4_epi64(fill_hi, fill_lo, fill_hi, fill_lo); - return _mm512_mask_i64gather_epi64(vfill, mask, idx, (const __m512i*)ptr, 8); + __m512i ret = _mm512_mask_i64gather_epi64(vfill, mask, idx, (const __m512i*)ptr, 8); +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m512i workaround = ret; + ret = _mm512_or_si512(workaround, ret); +#endif + return ret; } // fill zero to rest lanes NPY_FINLINE npyv_s64 npyv_loadn2_tillz_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane) diff --git a/numpy/core/src/common/simd/neon/memory.h b/numpy/core/src/common/simd/neon/memory.h index 6163440c39cd..2dc21e5a4305 100644 --- a/numpy/core/src/common/simd/neon/memory.h +++ b/numpy/core/src/common/simd/neon/memory.h @@ -187,19 +187,28 @@ NPY_FINLINE void npyv_storen2_f64(double *ptr, npy_intp stride, npyv_f64 a) NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, npy_int32 fill) { assert(nlane > 0); + npyv_s32 a; switch(nlane) { case 1: - return vld1q_lane_s32((const int32_t*)ptr, vdupq_n_s32(fill), 0); + a = vld1q_lane_s32((const int32_t*)ptr, vdupq_n_s32(fill), 0); + break; case 2: - return vcombine_s32(vld1_s32((const int32_t*)ptr), vdup_n_s32(fill)); + a = vcombine_s32(vld1_s32((const int32_t*)ptr), vdup_n_s32(fill)); + break; case 3: - return vcombine_s32( + a = vcombine_s32( vld1_s32((const int32_t*)ptr), vld1_lane_s32((const int32_t*)ptr + 2, vdup_n_s32(fill), 0) ); + break; default: return npyv_load_s32(ptr); } +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = a; + a = vorrq_s32(workaround, a); +#endif + return a; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) @@ -209,7 +218,12 @@ NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, n { assert(nlane > 0); if (nlane == 1) { - return vcombine_s64(vld1_s64((const int64_t*)ptr), vdup_n_s64(fill)); + npyv_s64 a = vcombine_s64(vld1_s64((const int64_t*)ptr), vdup_n_s64(fill)); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s64 workaround = a; + a = vorrq_s64(workaround, a); + #endif + return a; } return npyv_load_s64(ptr); } @@ -224,7 +238,12 @@ NPY_FINLINE npyv_s32 npyv_load2_till_s32(const npy_int32 *ptr, npy_uintp nlane, assert(nlane > 0); if (nlane == 1) { const int32_t NPY_DECL_ALIGNED(16) fill[2] = {fill_lo, fill_hi}; - return vcombine_s32(vld1_s32((const int32_t*)ptr), vld1_s32(fill)); + npyv_s32 a = vcombine_s32(vld1_s32((const int32_t*)ptr), vld1_s32(fill)); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = a; + a = vorrq_s32(workaround, a); + #endif + return a; } return npyv_load_s32(ptr); } @@ -256,10 +275,15 @@ npyv_loadn_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npy_ vfill = vld1q_lane_s32((const int32_t*)ptr + stride, vfill, 1); case 1: vfill = vld1q_lane_s32((const int32_t*)ptr, vfill, 0); - return vfill; + break; default: return npyv_loadn_s32(ptr, stride); } +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = vfill; + vfill = vorrq_s32(workaround, vfill); +#endif + return vfill; } NPY_FINLINE npyv_s32 npyv_loadn_tillz_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane) @@ -270,7 +294,7 @@ npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_ { assert(nlane > 0); if (nlane == 1) { - return vcombine_s64(vld1_s64((const int64_t*)ptr), vdup_n_s64(fill)); + return npyv_load_till_s64(ptr, 1, fill); } return npyv_loadn_s64(ptr, stride); } @@ -285,7 +309,12 @@ NPY_FINLINE npyv_s32 npyv_loadn2_till_s32(const npy_int32 *ptr, npy_intp stride, assert(nlane > 0); if (nlane == 1) { const int32_t NPY_DECL_ALIGNED(16) fill[2] = {fill_lo, fill_hi}; - return vcombine_s32(vld1_s32((const int32_t*)ptr), vld1_s32(fill)); + npyv_s32 a = vcombine_s32(vld1_s32((const int32_t*)ptr), vld1_s32(fill)); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = a; + a = vorrq_s32(workaround, a); + #endif + return a; } return npyv_loadn2_s32(ptr, stride); } @@ -293,7 +322,12 @@ NPY_FINLINE npyv_s32 npyv_loadn2_tillz_s32(const npy_int32 *ptr, npy_intp stride { assert(nlane > 0); if (nlane == 1) { - return vcombine_s32(vld1_s32((const int32_t*)ptr), vdup_n_s32(0)); + npyv_s32 a = vcombine_s32(vld1_s32((const int32_t*)ptr), vdup_n_s32(0)); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = a; + a = vorrq_s32(workaround, a); + #endif + return a; } return npyv_loadn2_s32(ptr, stride); } diff --git a/numpy/core/src/common/simd/simd.h b/numpy/core/src/common/simd/simd.h index 8c9b14251aa0..a5b1fa0c5521 100644 --- a/numpy/core/src/common/simd/simd.h +++ b/numpy/core/src/common/simd/simd.h @@ -18,18 +18,23 @@ #ifdef __cplusplus extern "C" { #endif - -// lane type by intrin suffix -typedef npy_uint8 npyv_lanetype_u8; -typedef npy_int8 npyv_lanetype_s8; -typedef npy_uint16 npyv_lanetype_u16; -typedef npy_int16 npyv_lanetype_s16; -typedef npy_uint32 npyv_lanetype_u32; -typedef npy_int32 npyv_lanetype_s32; -typedef npy_uint64 npyv_lanetype_u64; -typedef npy_int64 npyv_lanetype_s64; -typedef float npyv_lanetype_f32; -typedef double npyv_lanetype_f64; +/* + * clang commit a agrresive optimization behavoueir when flag `-ftrapping-math` + * isn't fully supported that's present at -O1 or greater. When partially loading a + * vector register for a operations that requires to fill up the remaining lanes + * with certain value for example divide operation needs to fill the remaining value + * with non-zero integer to avoid fp exception divide-by-zero. + * clang optimizer notices that the entire register is not needed for the store + * and optimizes out the fill of non-zero integer to the remaining + * elements. As workaround we mark the returned register with `volatile` + * followed by symmetric operand operation e.g. `or` + * to convince the compiler that the entire vector is needed. + */ +#if defined(__clang__) && !defined(NPY_HAVE_CLANG_FPSTRICT) + #define NPY_SIMD_GUARD_PARTIAL_LOAD 1 +#else + #define NPY_SIMD_GUARD_PARTIAL_LOAD 0 +#endif #if defined(_MSC_VER) && defined(_M_IX86) /* @@ -50,6 +55,19 @@ typedef double npyv_lanetype_f64; #undef _mm256_set_epi64x #undef _mm_set_epi64x #endif + +// lane type by intrin suffix +typedef npy_uint8 npyv_lanetype_u8; +typedef npy_int8 npyv_lanetype_s8; +typedef npy_uint16 npyv_lanetype_u16; +typedef npy_int16 npyv_lanetype_s16; +typedef npy_uint32 npyv_lanetype_u32; +typedef npy_int32 npyv_lanetype_s32; +typedef npy_uint64 npyv_lanetype_u64; +typedef npy_int64 npyv_lanetype_s64; +typedef float npyv_lanetype_f32; +typedef double npyv_lanetype_f64; + #if defined(NPY_HAVE_AVX512F) && !defined(NPY_SIMD_FORCE_256) && !defined(NPY_SIMD_FORCE_128) #include "avx512/avx512.h" #elif defined(NPY_HAVE_AVX2) && !defined(NPY_SIMD_FORCE_128) diff --git a/numpy/core/src/common/simd/sse/memory.h b/numpy/core/src/common/simd/sse/memory.h index 4c8e86a6f9da..90c01ffefedb 100644 --- a/numpy/core/src/common/simd/sse/memory.h +++ b/numpy/core/src/common/simd/sse/memory.h @@ -178,62 +178,53 @@ NPY_FINLINE void npyv_storen2_f64(double *ptr, npy_intp stride, npyv_f64 a) /********************************* * Partial Load *********************************/ -#if defined(__clang__) && __clang_major__ > 7 - /** - * Clang >=8 perform aggressive optimization that tends to - * zero the bits of upper half part of vectors even - * when we try to fill it up with certain scalars, - * which my lead to zero division errors. - */ - #define NPYV__CLANG_ZEROUPPER -#endif //// 32 NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, npy_int32 fill) { assert(nlane > 0); -#ifdef NPYV__CLANG_ZEROUPPER - if (nlane > 3) { - return npyv_load_s32(ptr); - } - npy_int32 NPY_DECL_ALIGNED(16) data[4] = {fill, fill, fill, fill}; - for (npy_uint64 i = 0; i < nlane; ++i) { - data[i] = ptr[i]; - } - return npyv_loada_s32(data); -#else #ifndef NPY_HAVE_SSE41 const short *wptr = (const short*)ptr; #endif const __m128i vfill = npyv_setall_s32(fill); __m128i a; switch(nlane) { - case 2: - return _mm_castpd_si128( - _mm_loadl_pd(_mm_castsi128_pd(vfill), (double*)ptr) - ); + case 2: + a = _mm_castpd_si128( + _mm_loadl_pd(_mm_castsi128_pd(vfill), (double*)ptr) + ); + break; #ifdef NPY_HAVE_SSE41 case 1: - return _mm_insert_epi32(vfill, ptr[0], 0); + a = _mm_insert_epi32(vfill, ptr[0], 0); + break; case 3: a = _mm_loadl_epi64((const __m128i*)ptr); a = _mm_insert_epi32(a, ptr[2], 2); a = _mm_insert_epi32(a, fill, 3); - return a; + break; #else case 1: a = _mm_insert_epi16(vfill, wptr[0], 0); - return _mm_insert_epi16(a, wptr[1], 1); + a = _mm_insert_epi16(a, wptr[1], 1); + break; case 3: a = _mm_loadl_epi64((const __m128i*)ptr); a = _mm_unpacklo_epi64(a, vfill); a = _mm_insert_epi16(a, wptr[4], 4); a = _mm_insert_epi16(a, wptr[5], 5); - return a; + break; #endif // NPY_HAVE_SSE41 default: return npyv_load_s32(ptr); - } -#endif + } + #if NPY_SIMD_GUARD_PARTIAL_LOAD + // We use a variable marked 'volatile' to convince the compiler that + // the entire vector is needed. + volatile __m128i workaround = a; + // avoid optimizing it out + a = _mm_or_si128(workaround, a); + #endif + return a; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) @@ -260,22 +251,17 @@ NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, npy_int64 fill) { assert(nlane > 0); -#ifdef NPYV__CLANG_ZEROUPPER - if (nlane <= 2) { - npy_int64 NPY_DECL_ALIGNED(16) data[2] = {fill, fill}; - for (npy_uint64 i = 0; i < nlane; ++i) { - data[i] = ptr[i]; - } - return npyv_loada_s64(data); - } -#else if (nlane == 1) { const __m128i vfill = npyv_setall_s64(fill); - return _mm_castpd_si128( + npyv_s64 a = _mm_castpd_si128( _mm_loadl_pd(_mm_castsi128_pd(vfill), (double*)ptr) ); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m128i workaround = a; + a = _mm_or_si128(workaround, a); + #endif + return a; } -#endif return npyv_load_s64(ptr); } // fill zero to rest lanes @@ -295,9 +281,14 @@ NPY_FINLINE npyv_s32 npyv_load2_till_s32(const npy_int32 *ptr, npy_uintp nlane, assert(nlane > 0); if (nlane == 1) { const __m128i vfill = npyv_set_s32(fill_lo, fill_hi, fill_lo, fill_hi); - return _mm_castpd_si128( + __m128i a = _mm_castpd_si128( _mm_loadl_pd(_mm_castsi128_pd(vfill), (double*)ptr) ); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m128i workaround = a; + a = _mm_or_si128(workaround, a); + #endif + return a; } return npyv_load_s32(ptr); } @@ -321,16 +312,6 @@ NPY_FINLINE npyv_s32 npyv_loadn_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npy_int32 fill) { assert(nlane > 0); -#ifdef NPYV__CLANG_ZEROUPPER - if (nlane > 3) { - return npyv_loadn_s32(ptr, stride); - } - npy_int32 NPY_DECL_ALIGNED(16) data[4] = {fill, fill, fill, fill}; - for (npy_uint64 i = 0; i < nlane; ++i) { - data[i] = ptr[stride*i]; - } - return npyv_loada_s32(data); -#else __m128i vfill = npyv_setall_s32(fill); #ifndef NPY_HAVE_SSE41 const short *wptr = (const short*)ptr; @@ -360,8 +341,11 @@ npyv_loadn_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npy_ default: return npyv_loadn_s32(ptr, stride); } // switch - return vfill; +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m128i workaround = vfill; + vfill = _mm_or_si128(workaround, vfill); #endif + return vfill; } // fill zero to rest lanes NPY_FINLINE npyv_s32 @@ -402,22 +386,9 @@ NPY_FINLINE npyv_s64 npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_int64 fill) { assert(nlane > 0); -#ifdef NPYV__CLANG_ZEROUPPER - if (nlane <= 2) { - npy_int64 NPY_DECL_ALIGNED(16) data[2] = {fill, fill}; - for (npy_uint64 i = 0; i < nlane; ++i) { - data[i] = ptr[i*stride]; - } - return npyv_loada_s64(data); - } -#else if (nlane == 1) { - const __m128i vfill = npyv_setall_s64(fill); - return _mm_castpd_si128( - _mm_loadl_pd(_mm_castsi128_pd(vfill), (double*)ptr) - ); + return npyv_load_till_s64(ptr, 1, fill); } -#endif return npyv_loadn_s64(ptr, stride); } // fill zero to rest lanes @@ -437,9 +408,14 @@ NPY_FINLINE npyv_s32 npyv_loadn2_till_s32(const npy_int32 *ptr, npy_intp stride, assert(nlane > 0); if (nlane == 1) { const __m128i vfill = npyv_set_s32(0, 0, fill_lo, fill_hi); - return _mm_castpd_si128( + __m128i a = _mm_castpd_si128( _mm_loadl_pd(_mm_castsi128_pd(vfill), (double*)ptr) ); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile __m128i workaround = a; + a = _mm_or_si128(workaround, a); + #endif + return a; } return npyv_loadn2_s32(ptr, stride); } diff --git a/numpy/core/src/common/simd/vec/memory.h b/numpy/core/src/common/simd/vec/memory.h index 4545e53e9094..dbcdc16da395 100644 --- a/numpy/core/src/common/simd/vec/memory.h +++ b/numpy/core/src/common/simd/vec/memory.h @@ -210,24 +210,33 @@ NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, n const npyv_u32 vlane = npyv_setall_u32(blane); const npyv_b32 mask = vec_cmpgt(vlane, steps); npyv_s32 a = vec_load_len(ptr, blane*4-1); - return vec_sel(vfill, a, mask); + a = vec_sel(vfill, a, mask); #else + npyv_s32 a; switch(nlane) { case 1: - return vec_insert(ptr[0], vfill, 0); + a = vec_insert(ptr[0], vfill, 0); + break; case 2: - return (npyv_s32)vec_insert( + a = (npyv_s32)vec_insert( *npyv__ptr2u64(ptr), (npyv_u64)vfill, 0 ); + break; case 3: vfill = vec_insert(ptr[2], vfill, 2); - return (npyv_s32)vec_insert( + a = (npyv_s32)vec_insert( *npyv__ptr2u64(ptr), (npyv_u64)vfill, 0 ); + break; default: return npyv_load_s32(ptr); } #endif +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = a; + a = vec_or(workaround, a); +#endif + return a; } // fill zero to rest lanes NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) @@ -244,7 +253,12 @@ NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, n { assert(nlane > 0); if (nlane == 1) { - return npyv_set_s64(ptr[0], fill); + npyv_s64 r = npyv_set_s64(ptr[0], fill); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s64 workaround = r; + r = vec_or(workaround, r); + #endif + return r; } return npyv_load_s64(ptr); } @@ -264,7 +278,12 @@ NPY_FINLINE npyv_s32 npyv_load2_till_s32(const npy_int32 *ptr, npy_uintp nlane, { assert(nlane > 0); if (nlane == 1) { - return npyv_set_s32(ptr[0], ptr[1], fill_lo, fill_hi); + npyv_s32 r = npyv_set_s32(ptr[0], ptr[1], fill_lo, fill_hi); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = r; + r = vec_or(workaround, r); + #endif + return r; } return npyv_load_s32(ptr); } @@ -299,6 +318,10 @@ npyv_loadn_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npy_ default: return npyv_loadn_s32(ptr, stride); } // switch +#if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = vfill; + vfill = vec_or(workaround, vfill); +#endif return vfill; } // fill zero to rest lanes @@ -311,7 +334,7 @@ npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_ { assert(nlane > 0); if (nlane == 1) { - return npyv_set_s64(*ptr, fill); + return npyv_load_till_s64(ptr, nlane, fill); } return npyv_loadn_s64(ptr, stride); } @@ -325,7 +348,12 @@ NPY_FINLINE npyv_s32 npyv_loadn2_till_s32(const npy_int32 *ptr, npy_intp stride, { assert(nlane > 0); if (nlane == 1) { - return npyv_set_s32(ptr[0], ptr[1], fill_lo, fill_hi); + npyv_s32 r = npyv_set_s32(ptr[0], ptr[1], fill_lo, fill_hi); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = r; + r = vec_or(workaround, r); + #endif + return r; } return npyv_loadn2_s32(ptr, stride); } @@ -333,7 +361,12 @@ NPY_FINLINE npyv_s32 npyv_loadn2_tillz_s32(const npy_int32 *ptr, npy_intp stride { assert(nlane > 0); if (nlane == 1) { - return (npyv_s32)npyv_set_s64(*(npy_int64*)ptr, 0); + npyv_s32 r = (npyv_s32)npyv_set_s64(*(npy_int64*)ptr, 0); + #if NPY_SIMD_GUARD_PARTIAL_LOAD + volatile npyv_s32 workaround = r; + r = vec_or(workaround, r); + #endif + return r; } return npyv_loadn2_s32(ptr, stride); } diff --git a/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src index 7ba3981e8119..c8bcedb6bbdc 100644 --- a/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src +++ b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src @@ -31,59 +31,6 @@ /******************************************************************************** ** Defining ufunc inner functions ********************************************************************************/ - -/* - * clang has a bug that's present at -O1 or greater. When partially loading a - * vector register for a divide operation, the remaining elements are set - * to 1 to avoid divide-by-zero. The partial load is paired with a partial - * store after the divide operation. clang notices that the entire register - * is not needed for the store and optimizes out the fill of 1 to the remaining - * elements. This causes either a divide-by-zero or 0/0 with invalid exception - * that we were trying to avoid by filling. - * - * Using a dummy variable marked 'volatile' convinces clang not to ignore - * the explicit fill of remaining elements. If `-ftrapping-math` is - * supported, then it'll also avoid the bug. `-ftrapping-math` is supported - * on Apple clang v12+ for x86_64. It is not currently supported for arm64. - * `-ftrapping-math` is set by default of Numpy builds in - * numpy/distutils/ccompiler.py. - * - * Note: Apple clang and clang upstream have different versions that overlap - */ -#if defined(__clang__) - #if defined(__apple_build_version__) - // Apple Clang - #if __apple_build_version__ < 12000000 - // Apple Clang before v12 - #define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 1 - #elif defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) - // Apple Clang after v12, targeting i386 or x86_64 - #define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 0 - #else - // Apple Clang after v12, not targeting i386 or x86_64 - #define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 1 - #endif - #else - // Clang, not Apple Clang - #if __clang_major__ < 10 - // Clang before v10 - #define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 1 - #elif defined(_MSC_VER) - // clang-cl has the same bug - #define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 1 - #elif defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) - // Clang v10+, targeting i386 or x86_64 - #define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 0 - #else - // Clang v10+, not targeting i386 or x86_64 - #define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 1 - #endif - #endif -#else -// Not a Clang compiler -#define WORKAROUND_CLANG_PARTIAL_LOAD_BUG 0 -#endif - /**begin repeat * Float types * #type = npy_float, npy_double# @@ -148,12 +95,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_store_@sfx@((@type@*)dst, r0); npyv_store_@sfx@((@type@*)(dst + vstep), r1); } - #if @is_div@ && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - const int vstop = hstep - 1; - #else - const int vstop = 0; - #endif // #if @is_div@ && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - for (; len > vstop; len -= hstep, src0 += vstep, src1 += vstep, dst += vstep) { + for (; len > 0; len -= hstep, src0 += vstep, src1 += vstep, dst += vstep) { #if @is_div@ npyv_@sfx@ a = npyv_load_till_@sfx@((const @type@*)src0, len, 1.0@c@); npyv_@sfx@ b = npyv_load_till_@sfx@((const @type@*)src1, len, 1.0@c@); @@ -164,15 +106,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_@sfx@ r = npyv_@intrin@_@sfx@(a, b); npyv_store_till_@sfx@((@type@*)dst, len, r); } - #if @is_div@ && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - // last partial iteration for divide and working around clang partial load bug - if(len > 0){ - npyv_@sfx@ a = npyv_load_till_@sfx@((const @type@*)src0, len, 1.0@c@); - volatile npyv_@sfx@ b = npyv_load_till_@sfx@((const @type@*)src1, len, 1.0@c@); - npyv_@sfx@ r = npyv_@intrin@_@sfx@(a, b); - npyv_store_till_@sfx@((@type@*)dst, len, r); - } - #endif // #if @is_div@ && WORKAROUND_CLANG_PARTIAL_LOAD_BUG } else if (ssrc0 == 0 && ssrc1 == sizeof(@type@) && sdst == ssrc1) { npyv_@sfx@ a = npyv_setall_@sfx@(*((@type@*)src0)); @@ -184,12 +117,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_store_@sfx@((@type@*)dst, r0); npyv_store_@sfx@((@type@*)(dst + vstep), r1); } - #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - const int vstop = hstep - 1; - #else - const int vstop = 0; - #endif // #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - for (; len > vstop; len -= hstep, src1 += vstep, dst += vstep) { + for (; len > 0; len -= hstep, src1 += vstep, dst += vstep) { #if @is_div@ || @is_mul@ npyv_@sfx@ b = npyv_load_till_@sfx@((const @type@*)src1, len, 1.0@c@); #else @@ -198,14 +126,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_@sfx@ r = npyv_@intrin@_@sfx@(a, b); npyv_store_till_@sfx@((@type@*)dst, len, r); } - #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - // last partial iteration for multiply / divide and working around clang partial load bug - if(len > 0){ - volatile npyv_@sfx@ b = npyv_load_till_@sfx@((const @type@*)src1, len, 1.0@c@); - npyv_@sfx@ r = npyv_@intrin@_@sfx@(a, b); - npyv_store_till_@sfx@((@type@*)dst, len, r); - } - #endif // #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG } else if (ssrc1 == 0 && ssrc0 == sizeof(@type@) && sdst == ssrc0) { npyv_@sfx@ b = npyv_setall_@sfx@(*((@type@*)src1)); @@ -217,12 +137,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_store_@sfx@((@type@*)dst, r0); npyv_store_@sfx@((@type@*)(dst + vstep), r1); } - #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - const int vstop = hstep - 1; - #else - const int vstop = 0; - #endif // #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - for (; len > vstop; len -= hstep, src0 += vstep, dst += vstep) { + for (; len > 0; len -= hstep, src0 += vstep, dst += vstep) { #if @is_div@ || @is_mul@ npyv_@sfx@ a = npyv_load_till_@sfx@((const @type@*)src0, len, 1.0@c@); #else @@ -231,14 +146,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_@sfx@ r = npyv_@intrin@_@sfx@(a, b); npyv_store_till_@sfx@((@type@*)dst, len, r); } - #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG - // last partial iteration for multiply / divide and working around clang partial load bug - if(len > 0){ - volatile npyv_@sfx@ a = npyv_load_till_@sfx@((const @type@*)src0, len, 1.0@c@); - npyv_@sfx@ r = npyv_@intrin@_@sfx@(a, b); - npyv_store_till_@sfx@((@type@*)dst, len, r); - } - #endif // #if (@is_div@ || @is_mul@) && WORKAROUND_CLANG_PARTIAL_LOAD_BUG } else { goto loop_scalar; } @@ -279,8 +186,6 @@ NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@_indexed) /**end repeat1**/ /**end repeat**/ -#undef WORKAROUND_CLANG_PARTIAL_LOAD_BUG - //############################################################################### //## Complex Single/Double precision //############################################################################### diff --git a/numpy/core/src/umath/loops_unary_fp.dispatch.c.src b/numpy/core/src/umath/loops_unary_fp.dispatch.c.src index c4e7b8929f1d..f6404f6f7d68 100644 --- a/numpy/core/src/umath/loops_unary_fp.dispatch.c.src +++ b/numpy/core/src/umath/loops_unary_fp.dispatch.c.src @@ -93,58 +93,6 @@ NPY_FINLINE double c_square_f64(double a) #define CONTIG 0 #define NCONTIG 1 -/* - * clang has a bug that's present at -O1 or greater. When partially loading a - * vector register for a reciprocal operation, the remaining elements are set - * to 1 to avoid divide-by-zero. The partial load is paired with a partial - * store after the reciprocal operation. clang notices that the entire register - * is not needed for the store and optimizes out the fill of 1 to the remaining - * elements. This causes either a divide-by-zero or 0/0 with invalid exception - * that we were trying to avoid by filling. - * - * Using a dummy variable marked 'volatile' convinces clang not to ignore - * the explicit fill of remaining elements. If `-ftrapping-math` is - * supported, then it'll also avoid the bug. `-ftrapping-math` is supported - * on Apple clang v12+ for x86_64. It is not currently supported for arm64. - * `-ftrapping-math` is set by default of Numpy builds in - * numpy/distutils/ccompiler.py. - * - * Note: Apple clang and clang upstream have different versions that overlap - */ -#if defined(__clang__) - #if defined(__apple_build_version__) - // Apple Clang - #if __apple_build_version__ < 12000000 - // Apple Clang before v12 - #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 - #elif defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) - // Apple Clang after v12, targeting i386 or x86_64 - #define WORKAROUND_CLANG_RECIPROCAL_BUG 0 - #else - // Apple Clang after v12, not targeting i386 or x86_64 - #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 - #endif - #else - // Clang, not Apple Clang - #if __clang_major__ < 10 - // Clang before v10 - #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 - #elif defined(_MSC_VER) - // clang-cl has the same bug - #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 - #elif defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) - // Clang v10+, targeting i386 or x86_64 - #define WORKAROUND_CLANG_RECIPROCAL_BUG 0 - #else - // Clang v10+, not targeting i386 or x86_64 - #define WORKAROUND_CLANG_RECIPROCAL_BUG 1 - #endif - #endif -#else -// Not a Clang compiler -#define WORKAROUND_CLANG_RECIPROCAL_BUG 0 -#endif - /**begin repeat * #TYPE = FLOAT, DOUBLE# * #sfx = f32, f64# @@ -155,7 +103,6 @@ NPY_FINLINE double c_square_f64(double a) * #kind = rint, floor, ceil, trunc, sqrt, absolute, square, reciprocal# * #intr = rint, floor, ceil, trunc, sqrt, abs, square, recip# * #repl_0w1 = 0*7, 1# - * #RECIP_WORKAROUND = 0*7, WORKAROUND_CLANG_RECIPROCAL_BUG# */ /**begin repeat2 * #STYPE = CONTIG, NCONTIG, CONTIG, NCONTIG# @@ -228,15 +175,6 @@ static void simd_@TYPE@_@kind@_@STYPE@_@DTYPE@ npyv_@sfx@ v_src0 = npyv_loadn_tillz_@sfx@(src, ssrc, len); #endif #endif - #if @RECIP_WORKAROUND@ - /* - * Workaround clang bug. We use a dummy variable marked 'volatile' - * to convince clang that the entire vector is needed. We only - * want to do this for the last iteration / partial load-store of - * the loop since 'volatile' forces a refresh of the contents. - */ - volatile npyv_@sfx@ unused_but_workaround_bug = v_src0; - #endif // @RECIP_WORKAROUND@ npyv_@sfx@ v_unary0 = npyv_@intr@_@sfx@(v_src0); #if @DTYPE@ == CONTIG npyv_store_till_@sfx@(dst, len, v_unary0); @@ -252,8 +190,6 @@ static void simd_@TYPE@_@kind@_@STYPE@_@DTYPE@ #endif // @VCHK@ /**end repeat**/ -#undef WORKAROUND_CLANG_RECIPROCAL_BUG - /******************************************************************************** ** Defining ufunc inner functions ********************************************************************************/ From c3f4b052988467ce180949536eb6e504397569b9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 6 Sep 2023 08:36:33 -0600 Subject: [PATCH 103/120] REL: Prepare for the NumPy 1.26.0rc1 release. - Update 1.26.0-changelog.rst - Update 1.26.0-notes.rst - Update pyproject.toml - Update pyproject.toml.setuppy - Update .mailmap [wheel build] --- .mailmap | 4 + doc/changelog/1.26.0-changelog.rst | 45 ++++++- .../upcoming_changes/23789.new_feature.rst | 7 -- .../upcoming_changes/24053.new_feature.rst | 5 - .../upcoming_changes/24532.new_feature.rst | 12 -- .../upcoming_changes/24555.improvement.rst | 5 - .../upcoming_changes/24555.new_feature.rst | 10 -- doc/source/release/1.26.0-notes.rst | 113 +++++++++++++++++- pyproject.toml | 2 +- pyproject.toml.setuppy | 2 +- 10 files changed, 160 insertions(+), 45 deletions(-) delete mode 100644 doc/release/upcoming_changes/23789.new_feature.rst delete mode 100644 doc/release/upcoming_changes/24053.new_feature.rst delete mode 100644 doc/release/upcoming_changes/24532.new_feature.rst delete mode 100644 doc/release/upcoming_changes/24555.improvement.rst delete mode 100644 doc/release/upcoming_changes/24555.new_feature.rst diff --git a/.mailmap b/.mailmap index 47ba80804786..9f83be9356fc 100644 --- a/.mailmap +++ b/.mailmap @@ -401,6 +401,7 @@ Matthew Harrigan Matthias Bussonnier Matthieu Darbois Matti Picus +Matti Picus mattip Maximilian Konrad Melissa Weber Mendonça Melissa Weber Mendonça @@ -427,6 +428,8 @@ Mitchell Faas <35742861+Mitchell-Faas@users.noreply.gi Muhammad Kasim Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> +Namami Shanker +Namami Shanker NamamiShanker Nathaniel J. Smith Naveen Arunachalam naveenarun Neil Girdhar @@ -477,6 +480,7 @@ Raghuveer Devulapalli <44766858+r-devulap@users.noreply Rajas Rade lkdmttg7 Rakesh Vasudevan Ralf Gommers +Ralf Gommers rgommers Rehas Sachdeva Ritta Narita Riya Sharma diff --git a/doc/changelog/1.26.0-changelog.rst b/doc/changelog/1.26.0-changelog.rst index 9f0cd79d23b7..67576479254e 100644 --- a/doc/changelog/1.26.0-changelog.rst +++ b/doc/changelog/1.26.0-changelog.rst @@ -2,14 +2,22 @@ Contributors ============ -A total of 11 people contributed to this release. People with a "+" by their +A total of 18 people contributed to this release. People with a "+" by their names contributed a patch for the first time. +* @DWesl +* Albert Steppi + * Bas van Beek * Charles Harris +* Developer-Ecosystem-Engineering +* Jake Vanderplas +* Marten van Kerkwijk * Matti Picus * Melissa Weber Mendonça +* Namami Shanker +* Nathan Goldbaum * Ralf Gommers +* Rohit Goswami * Sayed Adel * Sebastian Berg * Stefan van der Walt @@ -19,7 +27,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 18 pull requests were merged for this release. +A total of 51 pull requests were merged for this release. * `#24305 `__: MAINT: Prepare 1.26.x branch for development * `#24308 `__: MAINT: Massive update of files from main for numpy 1.26 @@ -39,3 +47,36 @@ A total of 18 pull requests were merged for this release. * `#24404 `__: BLD: vendor meson-python to make the Windows builds with SIMD... * `#24405 `__: BLD, SIMD: The meson CPU dispatcher implementation * `#24406 `__: MAINT: Remove versioneer +* `#24409 `__: REL: Prepare for the NumPy 1.26.0b1 release. +* `#24453 `__: MAINT: Pin upper version of sphinx. +* `#24455 `__: ENH: Add prefix to _ALIGN Macro +* `#24456 `__: BUG: cleanup warnings [skip azp][skip circle][skip travis][skip... +* `#24460 `__: MAINT: Upgrade to spin 0.5 +* `#24495 `__: BUG: ``asv dev`` has been removed, use ``asv run``. +* `#24496 `__: BUG: Fix meson build failure due to unchanged inplace auto-generated... +* `#24521 `__: BUG: fix issue with git-version script, needs a shebang to run +* `#24522 `__: BUG: Use a default assignment for git_hash [skip ci] +* `#24524 `__: BUG: fix NPY_cast_info error handling in choose +* `#24526 `__: BUG: Fix common block handling in f2py +* `#24541 `__: CI,TYP: Bump mypy to 1.4.1 +* `#24542 `__: BUG: Fix assumed length f2py regression +* `#24544 `__: MAINT: Harmonize fortranobject +* `#24545 `__: TYP: add kind argument to numpy.isin type specification +* `#24561 `__: BUG: fix comparisons between masked and unmasked structured arrays +* `#24590 `__: CI: Exclude import libraries from list of DLLs on Cygwin. +* `#24591 `__: BLD: fix ``_umath_linalg`` dependencies +* `#24594 `__: MAINT: Stop testing on ppc64le. +* `#24602 `__: BLD: meson-cpu: fix SIMD support on platforms with no features +* `#24606 `__: BUG: Change Cython ``binding`` directive to "False". +* `#24613 `__: ENH: Adopt new macOS Accelerate BLAS/LAPACK Interfaces, including... +* `#24614 `__: DOC: Update building docs to use Meson +* `#24615 `__: TYP: Add the missing ``casting`` keyword to ``np.clip`` +* `#24616 `__: TST: convert cython test from setup.py to meson +* `#24617 `__: MAINT: Fixup ``fromnumeric.pyi`` +* `#24622 `__: BUG, ENH: Fix ``iso_c_binding`` type maps and fix ``bind(c)``... +* `#24629 `__: TYP: Allow ``binary_repr`` to accept any object implementing... +* `#24630 `__: TYP: Explicitly declare ``dtype`` and ``generic`` hashable +* `#24637 `__: ENH: Refactor the typing "reveal" tests using `typing.assert_type` +* `#24638 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 +* `#24647 `__: ENH: ``meson`` backend for ``f2py`` +* `#24648 `__: MAINT: Refactor partial load Workaround for Clang diff --git a/doc/release/upcoming_changes/23789.new_feature.rst b/doc/release/upcoming_changes/23789.new_feature.rst deleted file mode 100644 index 58158486c9f1..000000000000 --- a/doc/release/upcoming_changes/23789.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -Array API v2022.12 support in ``numpy.array_api`` -------------------------------------------------- - -- ``numpy.array_api`` now full supports the `v2022.12 version - `__ of the array API standard. Note - that this does not yet include the optional ``fft`` extension in the - standard. diff --git a/doc/release/upcoming_changes/24053.new_feature.rst b/doc/release/upcoming_changes/24053.new_feature.rst deleted file mode 100644 index f32eeef293c1..000000000000 --- a/doc/release/upcoming_changes/24053.new_feature.rst +++ /dev/null @@ -1,5 +0,0 @@ -Support for the updated Accelerate BLAS/LAPACK library, including ILP64 (64-bit -integer) support, in macOS 13.3 has been added. This brings arm64 support, and -significant performance improvements of up to 10x for commonly used linear -algebra operations. When Accelerate is selected at build time, the 13.3+ -version will automatically be used if available. diff --git a/doc/release/upcoming_changes/24532.new_feature.rst b/doc/release/upcoming_changes/24532.new_feature.rst deleted file mode 100644 index 504b1d431cff..000000000000 --- a/doc/release/upcoming_changes/24532.new_feature.rst +++ /dev/null @@ -1,12 +0,0 @@ -``meson`` backend for ``f2py`` ------------------------------- -``f2py`` in compile mode (i.e. ``f2py -c``) now accepts the ``--backend meson`` option. This is the default option -for Python ``3.12`` on-wards. Older versions will still default to ``--backend -distutils``. - -To support this in realistic use-cases, in compile mode ``f2py`` takes a -``--dep`` flag one or many times which maps to ``dependency()`` calls in the -``meson`` backend, and does nothing in the ``distutils`` backend. - - -There are no changes for users of ``f2py`` only as a code generator, i.e. without ``-c``. diff --git a/doc/release/upcoming_changes/24555.improvement.rst b/doc/release/upcoming_changes/24555.improvement.rst deleted file mode 100644 index 65fae76088ee..000000000000 --- a/doc/release/upcoming_changes/24555.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``iso_c_binding`` support for ``f2py`` --------------------------------------- -Previously, users would have to define their own custom ``f2cmap`` file to use -type mappings defined by the Fortran2003 ``iso_c_binding`` intrinsic module. -These type maps are now natively supported by ``f2py`` diff --git a/doc/release/upcoming_changes/24555.new_feature.rst b/doc/release/upcoming_changes/24555.new_feature.rst deleted file mode 100644 index 770754f4f29f..000000000000 --- a/doc/release/upcoming_changes/24555.new_feature.rst +++ /dev/null @@ -1,10 +0,0 @@ -``bind(c)`` support for ``f2py`` --------------------------------- -Both functions and subroutines can be annotated with ``bind(c)``. ``f2py`` will -handle both the correct type mapping, and preserve the unique label for other -``C`` interfaces. - -**Note:** ``bind(c, name = 'routine_name_other_than_fortran_routine')`` is not - honored by the ``f2py`` bindings by design, since ``bind(c)`` with the ``name`` - is meant to guarantee only the same name in ``C`` and ``Fortran``, not in - ``Python`` and ``Fortran``. diff --git a/doc/source/release/1.26.0-notes.rst b/doc/source/release/1.26.0-notes.rst index 0e7f10e03787..9ac0a95c0b81 100644 --- a/doc/source/release/1.26.0-notes.rst +++ b/doc/source/release/1.26.0-notes.rst @@ -21,9 +21,75 @@ The highlights of this release are: - Cython 3.0.0 compatibility. - Use of the Meson build system - Updated SIMD support +- f2py fixes, meson and bind(x) support The Python versions supported in this release are 3.9-3.12. + +New Features +============ + +Array API v2022.12 support in ``numpy.array_api`` +------------------------------------------------- + +- ``numpy.array_api`` now full supports the `v2022.12 version + `__ of the array API standard. Note + that this does not yet include the optional ``fft`` extension in the + standard. + +(`gh-23789 `__) + +Support for the updated Accelerate BLAS/LAPACK library +------------------------------------------------------ +Support for the updated Accelerate BLAS/LAPACK library, including ILP64 (64-bit +integer) support, in macOS 13.3 has been added. This brings arm64 support, and +significant performance improvements of up to 10x for commonly used linear +algebra operations. When Accelerate is selected at build time, the 13.3+ +version will automatically be used if available. + +(`gh-24053 `__) + +``meson`` backend for ``f2py`` +------------------------------ +``f2py`` in compile mode (i.e. ``f2py -c``) now accepts the ``--backend meson`` +option. This is the default option for Python ``3.12`` on-wards. Older versions +will still default to ``--backend distutils``. + +To support this in realistic use-cases, in compile mode ``f2py`` takes a +``--dep`` flag one or many times which maps to ``dependency()`` calls in the +``meson`` backend, and does nothing in the ``distutils`` backend. + +There are no changes for users of ``f2py`` only as a code generator, i.e. +without ``-c``. + +(`gh-24532 `__) + +``bind(c)`` support for ``f2py`` +-------------------------------- +Both functions and subroutines can be annotated with ``bind(c)``. ``f2py`` will +handle both the correct type mapping, and preserve the unique label for other +``C`` interfaces. + +**Note:** ``bind(c, name = 'routine_name_other_than_fortran_routine')`` is not +honored by the ``f2py`` bindings by design, since ``bind(c)`` with the ``name`` +is meant to guarantee only the same name in ``C`` and ``Fortran``, not in +``Python`` and ``Fortran``. + +(`gh-24555 `__) + + +Improvements +============ + +``iso_c_binding`` support for ``f2py`` +-------------------------------------- +Previously, users would have to define their own custom ``f2cmap`` file to use +type mappings defined by the Fortran2003 ``iso_c_binding`` intrinsic module. +These type maps are now natively supported by ``f2py`` + +(`gh-24555 `__) + + Build system changes ==================== @@ -75,27 +141,37 @@ issue tracker. We aim to phase out ``setup.py`` builds as soon as possible, and therefore would like to see all potential blockers surfaced early on in the 1.26.0 release cycle. + Contributors ============ -A total of 11 people contributed to this release. People with a "+" by their +A total of 18 people contributed to this release. People with a "+" by their names contributed a patch for the first time. +* @DWesl +* Albert Steppi + * Bas van Beek * Charles Harris +* Developer-Ecosystem-Engineering +* Jake Vanderplas +* Marten van Kerkwijk * Matti Picus * Melissa Weber Mendonça +* Namami Shanker +* Nathan Goldbaum * Ralf Gommers +* Rohit Goswami * Sayed Adel * Sebastian Berg * Stefan van der Walt * Tyler Reddy * Warren Weckesser + Pull requests merged ==================== -A total of 18 pull requests were merged for this release. +A total of 51 pull requests were merged for this release. * `#24305 `__: MAINT: Prepare 1.26.x branch for development * `#24308 `__: MAINT: Massive update of files from main for numpy 1.26 @@ -115,3 +191,36 @@ A total of 18 pull requests were merged for this release. * `#24404 `__: BLD: vendor meson-python to make the Windows builds with SIMD... * `#24405 `__: BLD, SIMD: The meson CPU dispatcher implementation * `#24406 `__: MAINT: Remove versioneer +* `#24409 `__: REL: Prepare for the NumPy 1.26.0b1 release. +* `#24453 `__: MAINT: Pin upper version of sphinx. +* `#24455 `__: ENH: Add prefix to _ALIGN Macro +* `#24456 `__: BUG: cleanup warnings [skip azp][skip circle][skip travis][skip... +* `#24460 `__: MAINT: Upgrade to spin 0.5 +* `#24495 `__: BUG: ``asv dev`` has been removed, use ``asv run``. +* `#24496 `__: BUG: Fix meson build failure due to unchanged inplace auto-generated... +* `#24521 `__: BUG: fix issue with git-version script, needs a shebang to run +* `#24522 `__: BUG: Use a default assignment for git_hash [skip ci] +* `#24524 `__: BUG: fix NPY_cast_info error handling in choose +* `#24526 `__: BUG: Fix common block handling in f2py +* `#24541 `__: CI,TYP: Bump mypy to 1.4.1 +* `#24542 `__: BUG: Fix assumed length f2py regression +* `#24544 `__: MAINT: Harmonize fortranobject +* `#24545 `__: TYP: add kind argument to numpy.isin type specification +* `#24561 `__: BUG: fix comparisons between masked and unmasked structured arrays +* `#24590 `__: CI: Exclude import libraries from list of DLLs on Cygwin. +* `#24591 `__: BLD: fix ``_umath_linalg`` dependencies +* `#24594 `__: MAINT: Stop testing on ppc64le. +* `#24602 `__: BLD: meson-cpu: fix SIMD support on platforms with no features +* `#24606 `__: BUG: Change Cython ``binding`` directive to "False". +* `#24613 `__: ENH: Adopt new macOS Accelerate BLAS/LAPACK Interfaces, including... +* `#24614 `__: DOC: Update building docs to use Meson +* `#24615 `__: TYP: Add the missing ``casting`` keyword to ``np.clip`` +* `#24616 `__: TST: convert cython test from setup.py to meson +* `#24617 `__: MAINT: Fixup ``fromnumeric.pyi`` +* `#24622 `__: BUG, ENH: Fix ``iso_c_binding`` type maps and fix ``bind(c)``... +* `#24629 `__: TYP: Allow ``binary_repr`` to accept any object implementing... +* `#24630 `__: TYP: Explicitly declare ``dtype`` and ``generic`` hashable +* `#24637 `__: ENH: Refactor the typing "reveal" tests using `typing.assert_type` +* `#24638 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 +* `#24647 `__: ENH: ``meson`` backend for ``f2py`` +* `#24648 `__: MAINT: Refactor partial load Workaround for Clang diff --git a/pyproject.toml b/pyproject.toml index 3f3bc121ad10..c9ca8fc7d418 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ requires = [ [project] name = "numpy" -version = "1.26.0b1" +version = "1.26.0rc1" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} diff --git a/pyproject.toml.setuppy b/pyproject.toml.setuppy index ceef9ac2a692..4d14e50ae044 100644 --- a/pyproject.toml.setuppy +++ b/pyproject.toml.setuppy @@ -3,7 +3,7 @@ # to avoid building with Meson (e.g., in the Emscripten/Pyodide CI job) [project] name = "numpy" -version = "1.26.0b1" +version = "1.26.0rc1" [build-system] requires = [ From 9e43126b8a9c5392ecd3287a4e1ad2a22f2cc086 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filipe=20La=C3=ADns?= Date: Mon, 14 Aug 2023 15:28:17 +0100 Subject: [PATCH 104/120] BLD: allow specifying the long double format to avoid the runtime check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Filipe Laíns --- numpy/core/meson.build | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/numpy/core/meson.build b/numpy/core/meson.build index d32bb7406d33..473362f1b7e9 100644 --- a/numpy/core/meson.build +++ b/numpy/core/meson.build @@ -377,7 +377,9 @@ endforeach # https://github.com/numpy/numpy/blob/eead09a3d02c09374942cdc787c0b5e4fe9e7472/numpy/core/setup_common.py#L264-L434 # This port is in service of solving gh-23972 # as well as https://github.com/mesonbuild/meson/issues/11068 -longdouble_format = meson.get_compiler('c').run( +longdouble_format = meson.get_external_property('longdouble_format', 'UNKNOWN') +if longdouble_format == 'UNKNOWN' + longdouble_format = meson.get_compiler('c').run( ''' #include #include @@ -456,7 +458,8 @@ int main(void) { } } } -''').stdout() + ''').stdout() +endif if longdouble_format == 'UNKNOWN' or longdouble_format == 'UNDEFINED' error('Unknown long double format of size: ' + cc.sizeof('long double').to_string()) endif From b1d215bcc1bfed053805316f99bf893e1033e6f0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 7 Sep 2023 15:50:01 +0200 Subject: [PATCH 105/120] BLD: fix bug in random.mtrand extension, don't link libnpyrandom Closes gh-24490 --- numpy/random/meson.build | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/numpy/random/meson.build b/numpy/random/meson.build index 4980a80ba2c8..1802cf4ef561 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -11,7 +11,6 @@ npyrandom_sources = [ npyrandom_lib = static_library('npyrandom', npyrandom_sources, c_args: staticlib_cflags, - # include_directories: '../core/include', dependencies: [py_dep, np_core_dep], install: true, install_dir: np_dir / 'random/lib', @@ -52,23 +51,26 @@ if host_machine.system() == 'cygwin' c_args_random += ['-Wl,--export-all-symbols'] endif -# name, sources, extra link libs, extra c_args +# name, sources, extra c_args, extra static libs to link random_pyx_sources = [ - ['_bounded_integers', _bounded_integers_pyx, [], npymath_lib], - ['_common', '_common.pyx', [], []], - ['_mt19937', ['_mt19937.pyx', 'src/mt19937/mt19937.c', 'src/mt19937/mt19937-jump.c'], [], []], - ['_philox', ['_philox.pyx', 'src/philox/philox.c'], [], []], - ['_pcg64', ['_pcg64.pyx', 'src/pcg64/pcg64.c'], ['-U__GNUC_GNU_INLINE__'], []], - ['_sfc64', ['_sfc64.pyx', 'src/sfc64/sfc64.c'], [], []], - ['bit_generator', 'bit_generator.pyx', [], []], + ['_bounded_integers', _bounded_integers_pyx, [], [npyrandom_lib, npymath_lib]], + ['_common', '_common.pyx', [], [npyrandom_lib]], + ['_mt19937', ['_mt19937.pyx', 'src/mt19937/mt19937.c', 'src/mt19937/mt19937-jump.c'], + [], [npyrandom_lib] + ], + ['_philox', ['_philox.pyx', 'src/philox/philox.c'], [], [npyrandom_lib]], + ['_pcg64', ['_pcg64.pyx', 'src/pcg64/pcg64.c'], ['-U__GNUC_GNU_INLINE__'], [npyrandom_lib]], + ['_sfc64', ['_sfc64.pyx', 'src/sfc64/sfc64.c'], [], [npyrandom_lib]], + ['bit_generator', 'bit_generator.pyx', [], [npyrandom_lib]], # The `fs.copyfile` usage here is needed because these two .pyx files import # from _bounded_integers,and its pxd file is only present in the build directory - ['_generator', fs.copyfile('_generator.pyx'), [], npymath_lib], + ['_generator', fs.copyfile('_generator.pyx'), [], [npyrandom_lib, npymath_lib]], ['mtrand', [ fs.copyfile('mtrand.pyx'), 'src/distributions/distributions.c', 'src/legacy/legacy-distributions.c' - ], ['-DNP_RANDOM_LEGACY=1'], npymath_lib, + ], + ['-DNP_RANDOM_LEGACY=1'], [npymath_lib], ], ] foreach gen: random_pyx_sources @@ -77,7 +79,7 @@ foreach gen: random_pyx_sources c_args: [c_args_random, gen[2]], include_directories: 'src', dependencies: np_core_dep, - link_with: [npyrandom_lib, gen[3]], + link_with: gen[3], install: true, subdir: 'numpy/random', ) From 2fd83842626b35e3de01bbc52d41400a8c6a8e32 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 8 Sep 2023 16:46:53 +0200 Subject: [PATCH 106/120] TST: random: skip test for extending with Cython on 32-bit Windows --- numpy/random/tests/test_extending.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 8c684ca43f40..2783d1cdd9ac 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -49,6 +49,10 @@ cython = None +@pytest.mark.skipif( + sys.platform == "win32" and sys.maxsize < 2**32, + reason="Failing in 32-bit Windows wheel build job, skip for now" +) @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.skipif(cython is None, reason="requires cython") @pytest.mark.slow From d4555f14162fc6cdf4b74d11d879a219bed281df Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 8 Sep 2023 11:41:31 +0200 Subject: [PATCH 107/120] BLD: build wheels for 32-bit Python on Windows, using MSVC [wheel build] This doesn't include OpenBLAS, I haven't tried that - too much effort right now and it doesn't seem critical. Easier to revisit once OpenBLAS is more easily installable as a wheel. xref gh-23717 [skip circle] [skip cirrus] [skip azp] --- .github/workflows/wheels.yml | 7 +++++++ pyproject.toml | 14 ++++++-------- tools/wheels/cibw_test_command.sh | 4 ++++ tools/wheels/repair_windows.sh | 2 +- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 53ade8a876db..d83bddafdcfc 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -78,6 +78,7 @@ jobs: - [ubuntu-20.04, musllinux_x86_64] - [macos-12, macosx_x86_64] - [windows-2019, win_amd64] + - [windows-2019, win32] python: ["cp39", "cp310", "cp311", "cp312", "pp39"] exclude: # Don't build PyPy 32-bit windows @@ -100,6 +101,12 @@ jobs: # https://github.com/actions/checkout/issues/338 fetch-depth: 0 + - name: Setup MSVC (32-bit) + if: ${{ matrix.buildplat[1] == 'win32' }} + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: 'x86' + - name: pkg-config-for-win run: | choco install -y --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite diff --git a/pyproject.toml b/pyproject.toml index c9ca8fc7d418..c41fcc1ba92e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -149,7 +149,7 @@ tracker = "https://github.com/numpy/numpy/issues" # Note: the below skip command doesn't do much currently, the platforms to # build wheels for in CI are controlled in `.github/workflows/wheels.yml` and # `tools/ci/cirrus_wheels.yml`. -skip = "cp36-* cp37-* cp-38* pp37-* pp38-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux_aarch64 *-win32" +skip = "cp36-* cp37-* cp-38* pp37-* pp38-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux_aarch64" build-verbosity = "3" before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" # meson has a hard dependency on ninja, and we need meson to build @@ -178,17 +178,15 @@ test-skip = "*_universal2:arm64" environment = {CFLAGS="-fno-strict-aliasing -DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", CXXFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", NPY_USE_BLAS_ILP64="1", CC="clang", CXX = "clang++", RUNNER_OS="macOS"} [tool.cibuildwheel.windows] -archs = ['AMD64'] environment = {NPY_USE_BLAS_ILP64="1", CFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", CXXFLAGS="-DBLAS_SYMBOL_SUFFIX=64_ -DHAVE_BLAS_ILP64", LDFLAGS="", PKG_CONFIG_PATH="C:/opt/64/lib/pkgconfig"} config-settings = "setup-args=--vsenv" repair-wheel-command = "bash ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" -#[[tool.cibuildwheel.overrides]] -# Note: 32-bit Python wheel builds are skipped right now; probably needs -# --native-file to build due to `arch != pyarch` check in Meson's `python` dependency -# Note: uses 32-bit rather than 64-bit OpenBLAS -#select = "*-win32" -#environment = CFLAGS="-m32", LDFLAGS="-m32", PKG_CONFIG_PATH="/opt/32/lib/pkgconfig"} +[[tool.cibuildwheel.overrides]] +select = "*-win32" +environment = {PKG_CONFIG_PATH="/opt/32/lib/pkgconfig"} +config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true" +repair-wheel-command = "" [tool.spin] package = 'numpy' diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index c35fb56832e5..78966885f180 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -18,6 +18,10 @@ if [[ $RUNNER_OS == "macOS" && $RUNNER_ARCH == "X64" ]]; then # Needed so gfortran (not clang) can find system libraries like libm (-lm) # in f2py tests export LIBRARY_PATH="$LIBRARY_PATH:/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib" +elif [[ $RUNNER_OS == "Windows" && $IS_32_BIT == true ]] ; then + echo "Skip OpenBLAS version check for 32-bit Windows, no OpenBLAS used" + # Avoid this in GHA: "ERROR: Found GNU link.exe instead of MSVC link.exe" + rm /c/Program\ Files/Git/usr/bin/link.EXE else # For some reason the macos-x86_64 runner does not work with threadpoolctl # Skip this check there diff --git a/tools/wheels/repair_windows.sh b/tools/wheels/repair_windows.sh index a7aa209d21d9..0c44d60d6976 100644 --- a/tools/wheels/repair_windows.sh +++ b/tools/wheels/repair_windows.sh @@ -17,7 +17,7 @@ pushd numpy* # building with mingw. # We therefore find each PYD in the directory structure and strip them. -for f in $(find ./scipy* -name '*.pyd'); do strip $f; done +for f in $(find ./numpy* -name '*.pyd'); do strip $f; done # now repack the wheel and overwrite the original From c9e72cd2645fefe36c8b41a1ea4c5d8861ee3f67 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 13 Sep 2023 21:22:39 +0200 Subject: [PATCH 108/120] BLD: fix issue with compiler selection during cross compilation This resulted in two test failures showing significant accuracy issues for the 1.26.0rc1 with the Linux aarch64 builds in conda-forge. Closes gh-24660 --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 1f8351f16f9c..82d41db3d4b5 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 1f8351f16f9ce55965449b8e299c6d0fbca7f5df +Subproject commit 82d41db3d4b51124c5f138078c079b8eb62ecdeb From b713467a6eb9d0979a2cc37895cf39e0cc2ac92a Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 15 Apr 2023 18:04:25 +0000 Subject: [PATCH 109/120] TST: Add test for gh-23276 --- .../tests/src/crackfortran/data_stmts.f90 | 18 ++++++++++ numpy/f2py/tests/test_data.py | 33 +++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 numpy/f2py/tests/src/crackfortran/data_stmts.f90 create mode 100644 numpy/f2py/tests/test_data.py diff --git a/numpy/f2py/tests/src/crackfortran/data_stmts.f90 b/numpy/f2py/tests/src/crackfortran/data_stmts.f90 new file mode 100644 index 000000000000..0eb97a563899 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/data_stmts.f90 @@ -0,0 +1,18 @@ +! gh-23276 +module cmplxdat + implicit none + integer :: i, j + real :: x, y + real, dimension(2) :: z + complex(kind=8), target :: medium_ref_index + complex(kind=8), target :: ref_index_one, ref_index_two + complex(kind=8), dimension(2) :: my_array + real(kind=8), dimension(3) :: my_real_array = (/1.0d0, 2.0d0, 3.0d0/) + + data i, j / 2, 3 / + data x, y / 1.5, 2.0 / + data z / 3.5, 7.0 / + data medium_ref_index / (1.d0, 0.d0) / + data ref_index_one, ref_index_two / (13.0d0, 21.0d0), (-30.0d0, 43.0d0) / + data my_array / (1.0d0, 2.0d0), (-3.0d0, 4.0d0) / +end module cmplxdat diff --git a/numpy/f2py/tests/test_data.py b/numpy/f2py/tests/test_data.py new file mode 100644 index 000000000000..2ce4bae4fb95 --- /dev/null +++ b/numpy/f2py/tests/test_data.py @@ -0,0 +1,33 @@ +import os +import pytest +import numpy as np + +from . import util +from numpy.f2py.crackfortran import crackfortran + + +class TestData(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_stmts.f90")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.cmplxdat.i == 2 + assert self.module.cmplxdat.j == 3 + assert self.module.cmplxdat.x == 1.5 + assert self.module.cmplxdat.y == 2.0 + assert self.module.cmplxdat.medium_ref_index == np.array(1.+0.j) + assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1.+2.j, -3.+4.j])) + assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) + assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) + assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) + + def test_crackedlines(self): + mod = crackfortran(self.sources) + assert mod[0]['vars']['x']['='] == '1.5' + assert mod[0]['vars']['y']['='] == '2.0' + assert mod[0]['vars']['my_real_array']['='] == '(/1.0d0, 2.0d0, 3.0d0/)' + assert mod[0]['vars']['ref_index_one']['='] == '(13.0d0, 21.0d0)' + assert mod[0]['vars']['ref_index_two']['='] == '(-30.0d0, 43.0d0)' + # assert mod[0]['vars']['my_array']['='] == '(1.0d0, 2.0d0), (-3.0d0, 4.0d0)' + # assert mod[0]['vars']['z']['='] == '(/ 3.5, 7.0 /)' From 47c8d03620f59d800fed9c6f0ff0b096c709f4fc Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 15 Apr 2023 18:07:52 +0000 Subject: [PATCH 110/120] BUG: Simplify and fix gh-23276 --- numpy/f2py/crackfortran.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 2f1686f64c3e..286586351d0a 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1437,10 +1437,10 @@ def analyzeline(m, case, line): outmess( 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) continue - i = 0 - j = 0 llen = len(l[1]) - for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): + for idx, v in enumerate(rmbadname( + [x.strip() for x in markoutercomma(l[0]).split('@,@')]) + ): if v[0] == '(': outmess( 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) @@ -1449,18 +1449,20 @@ def analyzeline(m, case, line): # wrapping. continue fc = 0 - while (i < llen) and (fc or not l[1][i] == ','): - if l[1][i] == "'": - fc = not fc - i = i + 1 - i = i + 1 + vtype = vars[v].get('typespec') + + if (vtype == 'complex'): + cmplxpat = r"\(.*?\)" + matches = re.findall(cmplxpat, l[1]) + else: + matches = l[1].split(',') + if v not in vars: vars[v] = {} - if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]: + if '=' in vars[v] and not vars[v]['='] == matches[idx]: outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( - v, vars[v]['='], l[1][j:i - 1])) - vars[v]['='] = l[1][j:i - 1] - j = i + v, vars[v]['='], matches[idx])) + vars[v]['='] = matches[idx] last_name = v groupcache[groupcounter]['vars'] = vars if last_name is not None: From b948db83bb37622bc296cb3ec05596a7f27d73ae Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 15 Apr 2023 18:59:50 +0000 Subject: [PATCH 111/120] TST: Add tests for edge case with data statements --- numpy/f2py/tests/test_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/tests/test_data.py b/numpy/f2py/tests/test_data.py index 2ce4bae4fb95..3b8ca544ec74 100644 --- a/numpy/f2py/tests/test_data.py +++ b/numpy/f2py/tests/test_data.py @@ -29,5 +29,5 @@ def test_crackedlines(self): assert mod[0]['vars']['my_real_array']['='] == '(/1.0d0, 2.0d0, 3.0d0/)' assert mod[0]['vars']['ref_index_one']['='] == '(13.0d0, 21.0d0)' assert mod[0]['vars']['ref_index_two']['='] == '(-30.0d0, 43.0d0)' - # assert mod[0]['vars']['my_array']['='] == '(1.0d0, 2.0d0), (-3.0d0, 4.0d0)' - # assert mod[0]['vars']['z']['='] == '(/ 3.5, 7.0 /)' + assert mod[0]['vars']['my_array']['='] == '(/(1.0d0, 2.0d0), (-3.0d0, 4.0d0)/)' + assert mod[0]['vars']['z']['='] == '(/3.5, 7.0/)' From 6e6352690162b23ba285b8fdc9cd08877f5a1765 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 15 Apr 2023 19:00:09 +0000 Subject: [PATCH 112/120] MAINT: Add a helper --- numpy/f2py/auxfuncs.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 535e324286bd..0c08e0a5e2cf 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -28,7 +28,7 @@ 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', 'getargs2', 'getcallprotoargument', 'getcallstatement', 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', - 'getusercode1', 'hasbody', 'hascallstatement', 'hascommon', + 'getusercode1', 'getdimension', 'hasbody', 'hascallstatement', 'hascommon', 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', 'isallocatable', 'isarray', 'isarrayofstrings', 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', @@ -420,6 +420,13 @@ def isexternal(var): return 'attrspec' in var and 'external' in var['attrspec'] +def getdimension(var): + dimpattern = r"\((.*?)\)" + if 'attrspec' in var.keys(): + if any('dimension' in s for s in var['attrspec']): + return [re.findall(dimpattern, v) for v in var['attrspec']][0] + + def isrequired(var): return not isoptional(var) and isintent_nothide(var) From cb3ffca988525e5701af2814502fc6563e7d3968 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sat, 15 Apr 2023 19:00:16 +0000 Subject: [PATCH 113/120] BUG: Handle data statements in pyf files correctly --- numpy/f2py/crackfortran.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 286586351d0a..cc041ec56e6e 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1450,6 +1450,7 @@ def analyzeline(m, case, line): continue fc = 0 vtype = vars[v].get('typespec') + vdim = getdimension(vars[v]) if (vtype == 'complex'): cmplxpat = r"\(.*?\)" @@ -1462,7 +1463,12 @@ def analyzeline(m, case, line): if '=' in vars[v] and not vars[v]['='] == matches[idx]: outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( v, vars[v]['='], matches[idx])) - vars[v]['='] = matches[idx] + + if vdim is not None: + # Need to assign multiple values to one variable + vars[v]['='] = "(/{}/)".format(", ".join(matches)) + else: + vars[v]['='] = matches[idx] last_name = v groupcache[groupcounter]['vars'] = vars if last_name is not None: From 5ef53c6ad250eaf46739ca4d960e384e28857715 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 14 Sep 2023 15:35:23 +0200 Subject: [PATCH 114/120] TYP: Add annotations for the py3.12 buffer protocol --- numpy/__init__.pyi | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 32d084b6e137..a185bfe754e3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,4 +1,5 @@ import builtins +import sys import os import mmap import ctypes as ct @@ -1440,17 +1441,18 @@ _ShapeType = TypeVar("_ShapeType", bound=Any) _ShapeType2 = TypeVar("_ShapeType2", bound=Any) _NumberType = TypeVar("_NumberType", bound=number[Any]) -# There is currently no exhaustive way to type the buffer protocol, -# as it is implemented exclusively in the C API (python/typing#593) -_SupportsBuffer = Union[ - bytes, - bytearray, - memoryview, - _array.array[Any], - mmap.mmap, - NDArray[Any], - generic, -] +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) @@ -1513,6 +1515,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): order: _OrderKACF = ..., ) -> _ArraySelf: ... + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + def __class_getitem__(self, item: Any) -> GenericAlias: ... @overload @@ -2570,6 +2575,9 @@ class generic(_ArrayOrScalarCommon): @property def flat(self: _ScalarType) -> flatiter[ndarray[Any, _dtype[_ScalarType]]]: ... + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + @overload def astype( self, @@ -2772,6 +2780,9 @@ class object_(generic): def __float__(self) -> float: ... def __complex__(self) -> complex: ... + if sys.version_info >= (3, 12): + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + # The `datetime64` constructors requires an object with the three attributes below, # and thus supports datetime duck typing class _DatetimeScalar(Protocol): From 56e580d2e2c9b238de3ed2ce2299356013bace27 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 14 Sep 2023 15:49:26 +0200 Subject: [PATCH 115/120] TYP: Use `collections.abc.Buffer` in the Array API --- numpy/array_api/_typing.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/array_api/_typing.py b/numpy/array_api/_typing.py index 3f9b7186aebb..e63a375b5f66 100644 --- a/numpy/array_api/_typing.py +++ b/numpy/array_api/_typing.py @@ -17,6 +17,8 @@ "PyCapsule", ] +import sys + from typing import ( Any, Literal, @@ -63,8 +65,11 @@ def __len__(self, /) -> int: ... float64, ]] +if sys.version_info >= (3, 12): + from collections.abc import Buffer as SupportsBufferProtocol +else: + SupportsBufferProtocol = Any -SupportsBufferProtocol = Any PyCapsule = Any class SupportsDLPack(Protocol): From 92aab8c8ba396e046e7a10bc27a0979dc5bc030f Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 14 Sep 2023 15:50:08 +0200 Subject: [PATCH 116/120] TYP: Use `collections.abc.Buffer` in the `npt.ArrayLike` definition --- numpy/_typing/_array_like.py | 25 +++++++++++-------- .../tests/data/reveal/array_constructors.pyi | 8 ++++++ 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index cba6fffaf955..883e817d9a6c 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,7 +1,9 @@ from __future__ import annotations +import sys from collections.abc import Collection, Callable, Sequence from typing import Any, Protocol, Union, TypeVar, runtime_checkable + from numpy import ( ndarray, dtype, @@ -76,17 +78,18 @@ def __array_function__( _NestedSequence[_T], ] -# TODO: support buffer protocols once -# -# https://bugs.python.org/issue27501 -# -# is resolved. See also the mypy issue: -# -# https://github.com/python/typing/issues/593 -ArrayLike = _DualArrayLike[ - dtype[Any], - Union[bool, int, float, complex, str, bytes], -] +if sys.version_info >= (3, 12): + from collections.abc import Buffer + + ArrayLike = Buffer | _DualArrayLike[ + dtype[Any], + Union[bool, int, float, complex, str, bytes], + ] +else: + ArrayLike = _DualArrayLike[ + dtype[Any], + Union[bool, int, float, complex, str, bytes], + ] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 3eb560aafd9e..0bfbc63093a3 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -211,3 +211,11 @@ assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) assert_type(np.block(C), npt.NDArray[Any]) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer + + def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... + + buffer: Buffer + assert_type(create_array(buffer), npt.NDArray[Any]) From f2121dc67c9ad034fa3d39058264456131e21d6a Mon Sep 17 00:00:00 2001 From: Liang Yan Date: Fri, 21 Jul 2023 10:13:18 +0800 Subject: [PATCH 117/120] DOC: Fix doc build warning for random. (cherry picked from commit 464c03fc2b7d5aee33c0721f7e4ccf83e8a4fbc2) --- doc/source/reference/random/examples/cython/index.rst | 6 +++++- .../examples/cython/{setup.py.rst => meson.build.rst} | 6 +++--- 2 files changed, 8 insertions(+), 4 deletions(-) rename doc/source/reference/random/examples/cython/{setup.py.rst => meson.build.rst} (68%) diff --git a/doc/source/reference/random/examples/cython/index.rst b/doc/source/reference/random/examples/cython/index.rst index 368f5fcd5676..ffe0425ebad7 100644 --- a/doc/source/reference/random/examples/cython/index.rst +++ b/doc/source/reference/random/examples/cython/index.rst @@ -4,8 +4,12 @@ Extending `numpy.random` via Cython ----------------------------------- +.. _note: + +Starting with NumPy 1.26.0, Meson is the default build system for NumPy. +See :ref:`distutils-status-migration`. .. toctree:: - setup.py.rst + meson.build.rst extending.pyx extending_distributions.pyx diff --git a/doc/source/reference/random/examples/cython/setup.py.rst b/doc/source/reference/random/examples/cython/meson.build.rst similarity index 68% rename from doc/source/reference/random/examples/cython/setup.py.rst rename to doc/source/reference/random/examples/cython/meson.build.rst index bc7a74c59382..ed4fd9b6a2d4 100644 --- a/doc/source/reference/random/examples/cython/setup.py.rst +++ b/doc/source/reference/random/examples/cython/meson.build.rst @@ -1,5 +1,5 @@ -setup.py --------- +meson.build +----------- -.. literalinclude:: ../../../../../../numpy/random/_examples/cython/setup.py +.. literalinclude:: ../../../../../../numpy/random/_examples/cython/meson.build :language: python From 44fe37c1e23a6f4a397822cbfa532a110eef2d9a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 15 Sep 2023 17:38:49 +0200 Subject: [PATCH 118/120] DOC: fix two cross-references to SciPy docs in "building from source" --- doc/source/user/building.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index c3fa350a74e4..3c728f99cc5c 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -51,7 +51,7 @@ Building NumPy requires the following software installed: can be used, including optimized LAPACK libraries such as OpenBLAS or MKL. The choice and location of these libraries as well as include paths and other such build options can be specified in a ``.pc`` file, as documented in - :ref:`scipy:using-pkg-config-to-detect-libraries-in-a-nonstandard-location`. + :ref:`scipy:building-blas-and-lapack`. 4) Cython @@ -138,7 +138,7 @@ file. Cross compilation ----------------- -For cross compilation instructions, see :doc:`scipy:cross_compilation` and the -`Meson documentation `_. +For cross compilation instructions, see :doc:`scipy:building/cross_compilation` +and the `Meson documentation `_. .. _meson: https://mesonbuild.com/Cross-compilation.html#cross-compilation From 4893b24867f0bba433f9f55feb74f9b657e4a015 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 15 Sep 2023 17:54:17 +0200 Subject: [PATCH 119/120] DEV: improve `spin docs` command Explain how to build a zipfile of the html docs for release purposes. Remove --install-deps, as that is bad practice and `pip` invocations for dependencies should not be present in this CLI. Closes gh-24713 --- .spin/cmds.py | 16 +++++----------- doc/HOWTO_RELEASE.rst | 9 ++++----- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 656625afe2a9..ea994c38311e 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -75,13 +75,8 @@ def build(ctx, meson_args, jobs=None, clean=False, verbose=False): default="auto", help="Number of parallel build jobs" ) -@click.option( - "--install-deps/--no-install-deps", - default=False, - help="Install dependencies before building" -) @click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs, install_deps): +def docs(ctx, sphinx_target, clean, first_build, jobs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -97,13 +92,12 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, install_deps): spin docs TARGET - """ - if sphinx_target not in ('targets', 'help'): - if install_deps: - util.run(['pip', 'install', '-q', '-r', 'doc_requirements.txt']) + E.g., to build a zipfile of the html docs for distribution: + + spin docs dist + """ meson.docs.ignore_unknown_options = True - del ctx.params['install_deps'] ctx.forward(meson.docs) diff --git a/doc/HOWTO_RELEASE.rst b/doc/HOWTO_RELEASE.rst index 032d51780e9d..d4dbb504ea5d 100644 --- a/doc/HOWTO_RELEASE.rst +++ b/doc/HOWTO_RELEASE.rst @@ -100,12 +100,11 @@ github actions. Building docs ------------- -We are no longer building ``PDF`` files. All that will be needed is +We are no longer building pdf files, only html docs. The ``numpy-html.zip`` +needed to upload to the doc server can be built with ``spin docs dist``. -- virtualenv (pip). - -The other requirements will be filled automatically during the documentation -build process. +To install the necessary doc build dependencies into your development +environment, run ``pip install -r doc_requirements.txt``. Uploading to PyPI From 2f7c195ea12de3827e957cee85565b645bbd136b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 6 Sep 2023 17:00:56 -0600 Subject: [PATCH 120/120] REL: Prepare for the NumPy 1.26.0 release - Update 1.26.0-changelog.rst - Update 1.26.0-notes.rst - Update pyproject.toml - Update pyproject.toml.setuppy - Small fix to f2py annotations [wheel build] --- doc/changelog/1.26.0-changelog.rst | 14 ++++++++++++-- doc/source/release/1.26.0-notes.rst | 15 +++++++++++++-- numpy/f2py/__init__.pyi | 1 - pyproject.toml | 2 +- pyproject.toml.setuppy | 2 +- 5 files changed, 27 insertions(+), 7 deletions(-) diff --git a/doc/changelog/1.26.0-changelog.rst b/doc/changelog/1.26.0-changelog.rst index 67576479254e..84151fa0959b 100644 --- a/doc/changelog/1.26.0-changelog.rst +++ b/doc/changelog/1.26.0-changelog.rst @@ -2,7 +2,7 @@ Contributors ============ -A total of 18 people contributed to this release. People with a "+" by their +A total of 20 people contributed to this release. People with a "+" by their names contributed a patch for the first time. * @DWesl @@ -10,7 +10,9 @@ names contributed a patch for the first time. * Bas van Beek * Charles Harris * Developer-Ecosystem-Engineering +* Filipe Laíns + * Jake Vanderplas +* Liang Yan + * Marten van Kerkwijk * Matti Picus * Melissa Weber Mendonça @@ -27,7 +29,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 51 pull requests were merged for this release. +A total of 59 pull requests were merged for this release. * `#24305 `__: MAINT: Prepare 1.26.x branch for development * `#24308 `__: MAINT: Massive update of files from main for numpy 1.26 @@ -80,3 +82,11 @@ A total of 51 pull requests were merged for this release. * `#24638 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 * `#24647 `__: ENH: ``meson`` backend for ``f2py`` * `#24648 `__: MAINT: Refactor partial load Workaround for Clang +* `#24653 `__: REL: Prepare for the NumPy 1.26.0rc1 release. +* `#24659 `__: BLD: allow specifying the long double format to avoid the runtime... +* `#24665 `__: BLD: fix bug in random.mtrand extension, don't link libnpyrandom +* `#24675 `__: BLD: build wheels for 32-bit Python on Windows, using MSVC +* `#24700 `__: BLD: fix issue with compiler selection during cross compilation +* `#24701 `__: BUG: Fix data stmt handling for complex values in f2py +* `#24707 `__: TYP: Add annotations for the py3.12 buffer protocol +* `#24718 `__: DOC: fix a few doc build issues on 1.26.x and update `spin docs`... diff --git a/doc/source/release/1.26.0-notes.rst b/doc/source/release/1.26.0-notes.rst index 9ac0a95c0b81..0fbbec9a0a98 100644 --- a/doc/source/release/1.26.0-notes.rst +++ b/doc/source/release/1.26.0-notes.rst @@ -22,6 +22,7 @@ The highlights of this release are: - Use of the Meson build system - Updated SIMD support - f2py fixes, meson and bind(x) support +- Support for the updated Accelerate BLAS/LAPACK library The Python versions supported in this release are 3.9-3.12. @@ -145,7 +146,7 @@ therefore would like to see all potential blockers surfaced early on in the Contributors ============ -A total of 18 people contributed to this release. People with a "+" by their +A total of 20 people contributed to this release. People with a "+" by their names contributed a patch for the first time. * @DWesl @@ -153,7 +154,9 @@ names contributed a patch for the first time. * Bas van Beek * Charles Harris * Developer-Ecosystem-Engineering +* Filipe Laíns + * Jake Vanderplas +* Liang Yan + * Marten van Kerkwijk * Matti Picus * Melissa Weber Mendonça @@ -171,7 +174,7 @@ names contributed a patch for the first time. Pull requests merged ==================== -A total of 51 pull requests were merged for this release. +A total of 59 pull requests were merged for this release. * `#24305 `__: MAINT: Prepare 1.26.x branch for development * `#24308 `__: MAINT: Massive update of files from main for numpy 1.26 @@ -224,3 +227,11 @@ A total of 51 pull requests were merged for this release. * `#24638 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 * `#24647 `__: ENH: ``meson`` backend for ``f2py`` * `#24648 `__: MAINT: Refactor partial load Workaround for Clang +* `#24653 `__: REL: Prepare for the NumPy 1.26.0rc1 release. +* `#24659 `__: BLD: allow specifying the long double format to avoid the runtime... +* `#24665 `__: BLD: fix bug in random.mtrand extension, don't link libnpyrandom +* `#24675 `__: BLD: build wheels for 32-bit Python on Windows, using MSVC +* `#24700 `__: BLD: fix issue with compiler selection during cross compilation +* `#24701 `__: BUG: Fix data stmt handling for complex values in f2py +* `#24707 `__: TYP: Add annotations for the py3.12 buffer protocol +* `#24718 `__: DOC: fix a few doc build issues on 1.26.x and update `spin docs`... diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index 6e3a82cf8f44..81b6a24f39ec 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -14,7 +14,6 @@ class _F2PyDict(_F2PyDictBase, total=False): ltx: list[str] __all__: list[str] -__path__: list[str] test: PytestTester def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... diff --git a/pyproject.toml b/pyproject.toml index c41fcc1ba92e..c8620c90f4e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ requires = [ [project] name = "numpy" -version = "1.26.0rc1" +version = "1.26.0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} diff --git a/pyproject.toml.setuppy b/pyproject.toml.setuppy index 4d14e50ae044..0925099ed523 100644 --- a/pyproject.toml.setuppy +++ b/pyproject.toml.setuppy @@ -3,7 +3,7 @@ # to avoid building with Meson (e.g., in the Emscripten/Pyodide CI job) [project] name = "numpy" -version = "1.26.0rc1" +version = "1.26.0" [build-system] requires = [