diff --git a/.circleci/config.yml b/.circleci/config.yml index 8f9fa8c9fed0..530631281c80 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.4 + - image: cimg/python:3.11.8 working_directory: ~/repo @@ -74,7 +74,7 @@ jobs: . venv/bin/activate cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-j2 -n" make -e html || echo "ignoring errors for now, see gh-13114" + SPHINXOPTS="-W -n" make -e html if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then echo "doc build failed: build/html is empty" exit -1 @@ -85,7 +85,7 @@ jobs: command: | . venv/bin/activate cd doc/neps - SPHINXOPTS="-j2 -q" make -e html + SPHINXOPTS="-n" make -e html || echo "ignoring errors for now" - store_artifacts: path: doc/build/html/ diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 728a91f691b3..db980e27f2b6 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -23,7 +23,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' env: - PYODIDE_VERSION: 0.25.0 + PYODIDE_VERSION: 0.25.1 # PYTHON_VERSION and EMSCRIPTEN_VERSION are determined by PYODIDE_VERSION. # The appropriate versions can be found in the Pyodide repodata.json # "info" field, or in Makefile.envs: @@ -42,7 +42,7 @@ jobs: - name: Set up Python ${{ env.PYTHON_VERSION }} id: setup-python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ env.PYTHON_VERSION }} @@ -69,11 +69,6 @@ jobs: with open(env_file, "a") as myfile: myfile.write(f"PYODIDE_BUILD_PATH={pyodide_build_path}\n") - - name: Apply patch(es) for pyodide-build installation - run: | - ls -a ${{ env.PYODIDE_BUILD_PATH }} - patch -d "${{ env.PYODIDE_BUILD_PATH }}" -p1 < tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch - - name: Build NumPy for Pyodide run: | pyodide build -Cbuild-dir=build -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index d0df9834ad70..997b606fc6c8 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - name: Install linter requirements @@ -58,7 +58,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - uses: ./.github/meson_actions @@ -72,7 +72,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: 'pypy3.9-v7.3.12' - name: Setup using scipy-openblas @@ -119,7 +119,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - name: Install build and test dependencies from PyPI @@ -156,14 +156,14 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.9' - name: Install build and benchmarking dependencies run: | sudo apt-get update sudo apt-get install libopenblas-dev ninja-build - pip install spin cython asv virtualenv packaging + pip install asv virtualenv packaging -r requirements/build_requirements.txt - name: Install NumPy run: | spin build -- -Dcpu-dispatch=none @@ -187,7 +187,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -225,11 +225,11 @@ jobs: uses: actions/checkout@v4 with: repository: data-apis/array-api-tests - ref: '9afe8c709d81f005c98d383c82ad5e1c2cd8166c' # Latest commit as of 2023-11-24 + ref: '3cf8ef654c456d9fd1633d64e67b4470465940e9' # Latest commit as of 2024-04-09 submodules: 'true' path: 'array-api-tests' - name: Set up Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -246,8 +246,6 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - # remove once https://github.com/data-apis/array-api-tests/pull/217 is merged - touch pytest.ini pytest array_api_tests -v -c pytest.ini --ci --max-examples=2 --derandomize --disable-deadline --skips-file ${GITHUB_WORKSPACE}/tools/ci/array-api-skips.txt custom_checks: @@ -259,7 +257,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 598a1c784b62..3b23072dccfa 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -196,7 +196,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -224,7 +224,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -284,7 +284,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -347,7 +347,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -383,7 +383,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' diff --git a/.github/workflows/linux_compiler_sanitizers.yml b/.github/workflows/linux_compiler_sanitizers.yml index 78e90122c348..5ac6c6fde48b 100644 --- a/.github/workflows/linux_compiler_sanitizers.yml +++ b/.github/workflows/linux_compiler_sanitizers.yml @@ -4,7 +4,8 @@ on: pull_request: branches: - main - - maintenance/** + # Disable this test on the 2.0.x branch, it hangs. + # - maintenance/** defaults: run: @@ -30,7 +31,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 967e16b327a9..d4d6fe4a4989 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.0.1 + uses: actions/cache@v4.0.2 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 13ef2bffe005..aa4fe75f14cf 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -62,9 +62,9 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - uses: ./.github/meson_actions name: Build/Test @@ -79,9 +79,9 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: - python-version: '3.9' + python-version: '3.10' - name: Install GCC/8/9 run: | @@ -117,7 +117,7 @@ jobs: - [ "without optimizations", "-Dallow-noblas=true -Ddisable-optimization=true", - "3.12-dev" + "3.12" ] - [ "native", @@ -132,7 +132,7 @@ jobs: - [ "without avx512/avx2/fma3", "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C", - "3.9" + "3.10" ] env: @@ -144,7 +144,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -152,13 +152,13 @@ jobs: intel_sde_avx512: needs: [baseline_only] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -170,27 +170,23 @@ jobs: - name: Install dependencies run: | - sudo apt update - sudo apt install -y g++-13 - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 1 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 1 python -m pip install -r requirements/build_requirements.txt python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512f -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() run: cat build/meson-logs/meson-log.txt - - name: SIMD tests (KNM) + - name: SIMD tests (SKX) run: | export NUMPY_SITE=$(realpath build-install/usr/lib/python*/site-packages/) export PYTHONPATH="$PYTHONPATH:$NUMPY_SITE" cd build-install && - sde -knm -- python -c "import numpy; numpy.show_config()" && - sde -knm -- python -m pytest $NUMPY_SITE/numpy/_core/tests/test_simd* + sde -skx -- python -c "import numpy; numpy.show_config()" && + sde -skx -- python -m pytest $NUMPY_SITE/numpy/_core/tests/test_simd* - name: linalg/ufunc/umath tests (TGL) run: | @@ -206,13 +202,13 @@ jobs: intel_sde_spr: needs: [baseline_only] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' @@ -224,15 +220,11 @@ jobs: - name: Install dependencies run: | - sudo apt update - sudo apt install -y g++-13 - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 1 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 1 python -m pip install -r requirements/build_requirements.txt python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr + run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr - name: Meson Log if: always() diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index d0d7605221f4..7c759631c863 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -119,7 +119,7 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.10' diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 6d0a25eb71c5..781bba2f1f0d 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -54,7 +54,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 734dd635b549..72a9a8c8bad2 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -26,6 +26,9 @@ on: branches: - main - maintenance/** + push: + tags: + - v* workflow_dispatch: concurrency: @@ -85,9 +88,11 @@ jobs: python: ["cp39", "cp310", "cp311", "cp312", "pp39"] exclude: # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32] + - buildplat: [windows-2019, win32, ""] python: "pp39" - - buildplat: [ ubuntu-20.04, musllinux_x86_64 ] + - buildplat: [ ubuntu-20.04, musllinux_x86_64, "" ] + python: "pp39" + - buildplat: [ macos-14, macosx_arm64, accelerate ] python: "pp39" env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} @@ -98,11 +103,6 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: true - # versioneer.py requires the latest tag to be reachable. Here we - # fetch the complete history to get access to the tags. - # A shallow clone can work when the following issue is resolved: - # https://github.com/actions/checkout/issues/338 - fetch-tags: true - name: Setup MSVC (32-bit) if: ${{ matrix.buildplat[1] == 'win32' }} @@ -123,13 +123,18 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "3.x" - name: Setup macOS if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' run: | + # Needed due to https://github.com/actions/runner-images/issues/3371 + # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md + echo "FC=gfortran-13" >> "$GITHUB_ENV" + echo "F77=gfortran-13" >> "$GITHUB_ENV" + echo "F90=gfortran-13" >> "$GITHUB_ENV" if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then # macosx_arm64 and macosx_x86_64 with accelerate # only target Sonoma onwards @@ -149,7 +154,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ce3fb7832089eb3e723a0a99cab7f3eaccf074fd # v2.16.5 + uses: pypa/cibuildwheel@8d945475ac4b1aac4ae08b2fd27db9917158b6ce # v2.17.0 env: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} @@ -212,13 +217,8 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: true - # versioneer.py requires the latest tag to be reachable. Here we - # fetch the complete history to get access to the tags. - # A shallow clone can work when the following issue is resolved: - # https://github.com/actions/checkout/issues/338 - fetch-tags: true # Used to push the built wheels - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: # Build sdist on lowest supported Python python-version: "3.9" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 38a6cf24b7e0..bf10ff006649 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -31,13 +31,13 @@ jobs: fetch-tags: true - name: Setup Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.11' - name: Install build dependencies from PyPI run: | - python -m pip install spin Cython + python -m pip install -r requirements/build_requirements.txt - name: Install pkg-config run: | @@ -48,7 +48,10 @@ jobs: - name: Install Clang-cl if: matrix.compiler == 'Clang-cl' run: | - choco install llvm -y --version=16.0.6 + # llvm is preinstalled, but leave + # this here in case we need to pin the + # version at some point. + #choco install llvm -y - name: Install NumPy (MSVC) if: matrix.compiler == 'MSVC' @@ -91,7 +94,7 @@ jobs: fetch-tags: true - name: Setup Python (32-bit) - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: '3.10' architecture: 'x86' diff --git a/.mailmap b/.mailmap index 2d910fe98fea..143ad1c4a9b2 100644 --- a/.mailmap +++ b/.mailmap @@ -7,8 +7,8 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. - @8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +@Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> @DWesl <22566757+DWesl@users.noreply.github.com> @Endolith @GalaxySnail @@ -18,33 +18,49 @@ @Lisa <34400837+lyzlisa@users.noreply.github.com> @Patrick <39380924+xamm@users.noreply.github.com> @Scian <65375075+hoony6134@users.noreply.github.com> +@Searchingdays @amagicmuffin <2014wcheng@gmail.com> @code-review-doctor +@cook-1229 <70235336+cook-1229@users.noreply.github.com> @dg3192 <113710955+dg3192@users.noreply.github.com> +@ellaella12 +@ellaella12 <120079323+ellaella12@users.noreply.github.com> @h-vetinari @h6197627 <44726212+h6197627@users.noreply.github.com> @jbCodeHub @juztamau5 @legoffant <58195095+legoffant@users.noreply.github.com> +@liang3zy22 <35164941+liang3zy22@users.noreply.github.com> @luzpaz @luzpaz +@matoro +@mcp292 +@mgunyho <20118130+mgunyho@users.noreply.github.com> +@msavinash <73682349+msavinash@users.noreply.github.com> +@mykykh <49101849+mykykh@users.noreply.github.com> @partev @pkubaj @pmvz +@pojaghi <36278217+pojaghi@users.noreply.github.com> @pratiklp00 @sfolje0 @spacescientist +@stefan6419846 +@stefan6419846 <96178532+stefan6419846@users.noreply.github.com> @tajbinjohn @tautaus +@undermyumbrella1 @xoviat <49173759+xoviat@users.noreply.github.com> @xoviat <49173759+xoviat@users.noreply.github.com> @yan-wyb @yetanothercheer Aaron Baecker +Adrin Jalali Arun Kota Arun Kota Arun Kota Aarthi Agurusa Adarsh Singh ADARSH SINGH +Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS @@ -89,6 +105,8 @@ Andrea Bianchi Andrea Bianchi andrea-bia Ankit Dwivedi Ankit Dwivedi +Ankur Singh +Ankur Singh <98346896+ankur0904@users.noreply.github.com> Amir Sarabadani Anas Khan Anatoly Techtonik @@ -126,6 +144,7 @@ Bhargav V <12525622+brpy@users.noreply.github.com> Bas van Beek <43369155+BvB93@users.noreply.github.com> Behzad Nouri Ben Nathanson +Ben Woodruff Benjamin Root Benjamin Root weathergod Bernardt Duvenhage @@ -173,6 +192,7 @@ Chun-Wei Chen Chunlin Fang Chunlin Fang <834352945@qq.com> Chunlin Fang +Cobalt Yang Colin Snyder <8csnyder@gmail.com> <47012605+colinsnyder@users.noreply.github.com> Constanza Fierro Dahyun Kim @@ -205,24 +225,30 @@ Derek Homeier Derek Homeier Derrick Williams Devin Shanahan +Daval Parmar <53395856+DhavalParmar61@users.noreply.github.com> Digya Acharya Dima Pasechnik Dima Pasechnik Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Ding Liu Ding Liu +D.J. Ramones +D.J. Ramones <50655786+djramones@users.noreply.github.com> Dmitriy Shalyga Dmitry Belov Dustan Levenstein <43019642+dustanlevenstein@users.noreply.github.com> Dylan Cutler Ed Schofield Egor Zindy +Élie Goudout +Élie Goudout <114467748+eliegoudout@users.noreply.github.com> Elliott M. Forney Erik M. Bray Erik M. Bray Erik M. Bray Eric Fode Eric Fode Eric Quintero +Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> @@ -277,13 +303,18 @@ Gerhard Hobler Giannis Zapantis Guillaume Peillex Jack J. Woehr +Jacob M. Casey Jaime Fernandez Jaime Fernandez Jaime Fernandez Jake Close +Jake VanderPlas +Jake VanderPlas +Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey Jan Schlüter @@ -351,6 +382,8 @@ Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso Konrad Kapp +Kristoffer Pedersen +Kristoffer Pedersen Kriti Singh Kmol Yuan Kumud Lakara <55556183+kumudlakara@users.noreply.github.com> @@ -366,14 +399,18 @@ Licht Takeuchi Lorenzo Mammana Lillian Zha Lillian Zha +Linus Sommer +Linus Sommer <95619282+linus-md@users.noreply.github.com> Lu Yun Chi <32014765+LuYunChi@users.noreply.github.com> Luis Pedro Coelho +Lucas Colley Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar +Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> @@ -386,6 +423,8 @@ Mark Wiebe Mark Wiebe Mars Lee Mars Lee <46167686+MarsBarLee@users.noreply.github.com> +Marten van Kerkwijk +Marten van Kerkwijk Martin Goodson Martin Reinecke Martin Teichmann @@ -395,18 +434,24 @@ Matheus Vieira Portela Matheus Santana Patriarca Mathieu Lamarre Matías Ríos +Matt Hancock Matt Ord Matt Ord <55235095+Matt-Ord@users.noreply.github.com> -Matt Hancock +Matt Thompson +Matthias Bussonnier Martino Sorbaro Márton Gunyhó Mattheus Ueckermann Matthew Barber Matthew Harrigan Matthias Bussonnier +Matthias Schaufelberger +Matthias Schaufelberger <45293673+maisevector@users.noreply.github.com> Matthieu Darbois Matti Picus Matti Picus mattip +Maya Anderson +Maya Anderson <63074550+andersonm-ibm@users.noreply.github.com> Maximilian Konrad Melissa Weber Mendonça Melissa Weber Mendonça @@ -430,7 +475,9 @@ Miles Cranmer Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohaned Qunaibit Muhammad Kasim +Muhammed Muhsin Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi @@ -445,24 +492,30 @@ Nicolas Scheffer Nicolas Scheffer nickdg Nicholas McKibben Nick Minkyu Lee fivemok <9394929+fivemok@users.noreply.github.com> +Nyakku Shigure Norwid Behrnd Norwid Behrnd -Oliver Eberle Oleksiy Kononenko Oleksiy Kononenko <35204136+oleksiyskononenko@users.noreply.github.com> +Oliver Eberle +Olivier Barthelemy +Olivier Mattelaer Omar Ali Omid Rajaei Omid Rajaei <89868505+rajaeinet@users.noreply.github.com> Ondřej Čertík Óscar Villellas Guillén +Pablo Losada +Pablo Losada <48804010+TheHawz@users.noreply.github.com> Panos Mavrogiorgos Pantelis Antonoudiou Pantelis Antonoudiou Pat Miller patmiller Paul Ivanov Paul Ivanov -Paul YS Lee Paul Paul Jacobson +Paul Reece +Paul YS Lee Paul Pey Lian Lim Pey Lian Lim <2090236+pllim@users.noreply.github.com> Pearu Peterson @@ -488,6 +541,7 @@ Rakesh Vasudevan Ralf Gommers Ralf Gommers rgommers Rehas Sachdeva +Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma Robert Kern @@ -499,6 +553,7 @@ Rohit Goswami Roland Kaufmann Roman Yurchak Ronan Lamy Ronan Lamy +Rostan Tabet Roy Jacobson Russell Hewett Ryan Blakemore @@ -514,6 +569,7 @@ Sam Radhakrishnan = <=> # committed without an email address Samesh Lakhotia Samesh Lakhotia <43701530+sameshl@users.noreply.github.com> Sami Salonen +Samuel Albanie Sanchez Gonzalez Alvaro Sanya Sinha <83265366+ssanya942@users.noreply.github.com> Saransh Chopra @@ -521,6 +577,8 @@ Saullo Giovani Saurabh Mehta Sayantika Banik Schrijvers Luc +Sean Cheah +Sean Cheah <67928790+thalassemia@users.noreply.github.com> Sebastian Berg Sebastian Schleehauf Serge Guelton @@ -594,9 +652,12 @@ William Spotz Wim Glenn Wojtek Ruszczewski Wojciech Rzadkowski <33913808+wrzadkow@users.noreply.github.com> +Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau +Yash Pethe +Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> @@ -607,6 +668,7 @@ Yuji Kanagawa Yuki K Yury Kirienko Zac Hatfield-Dodds +Zach Brugh <111941670+zachbrugh@users.noreply.github.com> Zé Vinícius Zhang Na Zixu Zhao @@ -614,4 +676,5 @@ Ziyan Zhou Zieji Pohz Zieji Pohz <8103276+zjpoh@users.noreply.github.com> Zolboo Erdenebaatar +Zolisa Bleki Zolisa Bleki <44142765+zoj613@users.noreply.github.com> diff --git a/.spin/cmds.py b/.spin/cmds.py index 11e2b1b0e2d3..d98908666a33 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,11 +1,7 @@ import os import shutil -import sys -import argparse -import tempfile import pathlib import shutil -import json import pathlib import importlib import subprocess @@ -99,7 +95,7 @@ def changelog(ctx, token, revision_range): ) @click.argument("meson_args", nargs=-1) @click.pass_context -def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False): +def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False, *args, **kwargs): """🔧 Build package with Meson/ninja and install MESON_ARGS are passed through e.g.: @@ -136,11 +132,13 @@ def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose= @click.option( '--jobs', '-j', metavar='N_JOBS', - default="auto", - help="Number of parallel build jobs" + # Avoids pydata_sphinx_theme extension warning from default="auto". + default="1", + help=("Number of parallel build jobs." + "Can be set to `auto` to use all cores.") ) @click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs): +def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -163,6 +161,21 @@ def docs(ctx, sphinx_target, clean, first_build, jobs): """ meson.docs.ignore_unknown_options = True + # See https://github.com/scientific-python/spin/pull/199 + # Can be changed when spin updates to 0.11, and moved to pyproject.toml + if clean: + clean_dirs = [ + './doc/build/', + './doc/source/reference/generated', + './doc/source/reference/random/bit_generators/generated', + './doc/source/reference/random/generated', + ] + + for target_dir in clean_dirs: + if os.path.isdir(target_dir): + print(f"Removing {target_dir!r}") + shutil.rmtree(target_dir) + # Run towncrier without staging anything for commit. This is the way to get # release notes snippets included in a local doc build. cmd = ['towncrier', 'build', '--version', '2.x.y', '--keep', '--draft'] @@ -209,7 +222,7 @@ def docs(ctx, sphinx_target, clean, first_build, jobs): '--verbose', '-v', is_flag=True, default=False ) @click.pass_context -def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): +def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): """🔧 Run tests PYTEST_ARGS are passed through directly to pytest, e.g.: @@ -260,6 +273,83 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): ctx.forward(meson.test) +@click.command() +@click.argument("pytest_args", nargs=-1) +@click.option( + "-j", + "n_jobs", + metavar='N_JOBS', + default="1", + help=("Number of parallel jobs for testing. " + "Can be set to `auto` to use all cores.") +) +@click.option( + '--verbose', '-v', is_flag=True, default=False +) +@click.pass_context +def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): + """🔧 Run doctests of objects in the public API. + + PYTEST_ARGS are passed through directly to pytest, e.g.: + + spin check-docs -- --pdb + + To run tests on a directory: + + \b + spin check-docs numpy/linalg + + To report the durations of the N slowest doctests: + + spin check-docs -- --durations=N + + To run doctests that match a given pattern: + + \b + spin check-docs -- -k "slogdet" + spin check-docs numpy/linalg -- -k "det and not slogdet" + + \b + Note: + ----- + + \b + - This command only runs doctests and skips everything under tests/ + - This command only doctests public objects: those which are accessible + from the top-level `__init__.py` file. + + """ # noqa: E501 + try: + # prevent obscure error later + import scipy_doctest + except ModuleNotFoundError as e: + raise ModuleNotFoundError("scipy-doctest not installed") from e + if (not pytest_args): + pytest_args = ('numpy',) + + if (n_jobs != "1") and ('-n' not in pytest_args): + pytest_args = ('-n', str(n_jobs)) + pytest_args + + if verbose: + pytest_args = ('-v',) + pytest_args + + # turn doctesting on: + doctest_args = ( + '--doctest-modules', + '--doctest-collect=api' + ) + + pytest_args = pytest_args + doctest_args + + ctx.params['pytest_args'] = pytest_args + + for extra_param in ('n_jobs', 'verbose'): + del ctx.params[extra_param] + + ctx.forward(meson.test) + + + # From scipy: benchmarks/benchmarks/common.py def _set_mem_rlimit(max_mem=None): """ @@ -313,7 +403,7 @@ def _run_asv(cmd): '/usr/local/lib/ccache', '/usr/local/lib/f90cache' ]) env = os.environ - env['PATH'] = f'EXTRA_PATH:{PATH}' + env['PATH'] = f'{EXTRA_PATH}{os.pathsep}{PATH}' # Control BLAS/LAPACK threads env['OPENBLAS_NUM_THREADS'] = '1' @@ -490,7 +580,7 @@ def bench(ctx, tests, compare, verbose, quick, commits): }) @click.argument("python_args", metavar='', nargs=-1) @click.pass_context -def python(ctx, python_args): +def python(ctx, python_args, *args, **kwargs): """🐍 Launch Python shell with PYTHONPATH set OPTIONS are passed through directly to Python, e.g.: @@ -621,16 +711,10 @@ def notes(ctx, version_override): ) # towncrier build --version 2.1 --yes cmd = ["towncrier", "build", "--version", version, "--yes"] - try: - p = util.run( - cmd=cmd, - sys_exit=False, - output=True, - encoding="utf-8" - ) - except subprocess.SubprocessError as e: + p = util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") + if p.returncode != 0: raise click.ClickException( - f"`towncrier` failed returned {e.returncode} with error `{e.stderr}`" + f"`towncrier` failed returned {p.returncode} with error `{p.stderr}`" ) output_path = project_config['tool.towncrier.filename'].format(version=version) diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index cc458723f28f..0baf374e1e3f 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -14,10 +14,7 @@ steps: displayName: 'Install dependencies; some are optional to avoid test skips' - powershell: | - choco install -y --stoponfirstfailure unzip choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - choco install --stoponfirstfailure ninja - echo "##vso[task.setvariable variable=RTOOLS43_HOME]c:\rtools43" displayName: 'Install utilities' - powershell: | @@ -42,7 +39,7 @@ steps: - powershell: | cd tools # avoid root dir to not pick up source tree # Get a gfortran onto the path for f2py tests - $env:PATH = "$env:RTOOLS43_HOME\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + $env:PATH = "c:\\rtools43\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" If ( $env:TEST_MODE -eq "full" ) { pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml } else { diff --git a/doc/Makefile b/doc/Makefile index 2f04c7084ce9..eccd40b1adef 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -11,7 +11,7 @@ PYVER:=$(shell python3 -c 'from sys import version_info as v; print("{0}.{1}".fo PYTHON = python$(PYVER) # You can set these variables from the command line. -SPHINXOPTS ?= +SPHINXOPTS ?= -W SPHINXBUILD ?= LANG=C sphinx-build PAPER ?= DOXYGEN ?= doxygen @@ -25,7 +25,7 @@ FILES= # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -WT --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \ +ALLSPHINXOPTS = -T --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \ $(SPHINXOPTS) source .PHONY: help clean html web htmlhelp latex changes linkcheck \ diff --git a/doc/changelog/2.0.0-changelog.rst b/doc/changelog/2.0.0-changelog.rst new file mode 100644 index 000000000000..78e250f508d9 --- /dev/null +++ b/doc/changelog/2.0.0-changelog.rst @@ -0,0 +1,1304 @@ + +Contributors +============ + +A total of 212 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @Algorithmist-Girl + +* @DWesl +* @Illviljan +* @Searchingdays +* @ellaella12 + +* @liang3zy22 + +* @matoro + +* @mcp292 + +* @mgunyho + +* @msavinash + +* @mykykh + +* @pojaghi + +* @pratiklp00 + +* @stefan6419846 + +* @undermyumbrella1 + +* Aaron Meurer +* Aditi Saluja + +* Adrin Jalali + +* Agriya Khetarpal + +* Albert Steppi + +* Alex Cabrera + +* Alexander Grund +* Andrea Bianchi + +* Andreas Florath + +* Andrew Ardill + +* Andrew Ho + +* Andrew Nelson +* Andrey Rybakov + +* Ankur Singh + +* Anton Prosekin + +* Antony Lee +* Arun Kannawadi + +* Bas van Beek +* Ben Woodruff + +* Bharat Raghunathan +* Bhavya Alekhya + +* Brandon Smith + +* Brian Walshe + +* Brigitta Sipőcz +* Brock Mendel +* Carl Meyer + +* Charles Bousseau + +* Charles Harris +* Chris Sidebottom +* Christian Lorentzen +* Christian Veenhuis +* Christoph Reiter +* Christopher Sidebottom +* Clément Robert +* Cédric Hannotier +* Cobalt Yang + +* Gonçalo Bárias + +* D.J. Ramones + +* DanShatford + +* Daniel Li + +* Daniel Vanzo +* Daval Parmar +* Developer-Ecosystem-Engineering +* Dhruv Rawat + +* Dimitri Papadopoulos Orfanos +* Edward E +* Edward Yang + +* Eisuke Kawashima + +* Eliah Kagan + +* Élie Goudout + +* Elliott Sales de Andrade +* Emil Olszewski + +* Emily Hunt + +* Éric Piel + +* Eric Wieser +* Eric Xie + +* Even Rouault + +* Evgeni Burovski +* Filipe Laíns + +* Francisco Sousa + +* Ganesh Kathiresan +* Gonçalo Bárias + +* Gonzalo Tornaría + +* Hans Meine +* Heberto Mayorquin + +* Heinz-Alexander Fuetterer + +* Hood Chatham +* Hugo van Kemenade +* Ivan A. Melnikov + +* Jacob M. Casey + +* Jake Lishman + +* Jake VanderPlas +* James Oliver + +* Jan Wassenberg + +* Janukan Sivajeyan + +* Johann Rohwer + +* Johannes Kaisinger + +* John Muradeli + +* Joris Van den Bossche +* Justus Magin +* Jyn Spring 琴春 +* Kai Striega +* Kevin Sheppard +* Kevin Wu + +* Khawaja Junaid + +* Kit Lee + +* Kristian Minchev + +* Kristoffer Pedersen + +* Kuan-Wei Chiu + +* Lane Votapka + +* Larry Bradley +* Leo Singer +* Liang Yan + +* Linus Sommer + +* Logan Thomas +* Lucas Colley + +* Luiz Eduardo Amaral + +* Lukas Geiger +* Lysandros Nikolaou + +* Maanas Arora + +* Maharshi Basu + +* Mahder Gebremedhin + +* Marcel Bargull + +* Marcel Loose + +* Mark Mentovai + +* Mark Ryan + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Barber +* Matthew Thompson + +* Matthias Bussonnier +* Matthias Koeppe +* Matthias Schaufelberger + +* Matti Picus +* Maxwell Aladago +* Maya Anderson + +* Melissa Weber Mendonça +* Meng Xiangzhuo + +* Michael Kiffer +* Miki Watanabe (渡邉 美希) +* Milan Curcic + +* Miles Cranmer +* Miro Hrončok + +* Mohamed E. BRIKI + +* Mohaned Qunaibit + +* Mohit Kumar + +* Muhammed Muhsin + +* Mukulika Pahari +* Munira Alduraibi + +* Namami Shanker +* Nathan Goldbaum +* Nyakku Shigure + +* Ola x Nilsson + +* Olivier Mattelaer + +* Olivier Grisel +* Omid Rajaei +* Pablo Losada + +* Pamphile Roy +* Paul Reece + +* Pedro Kaj Kjellerup Nacht + +* Peiyuan Liu + +* Peter Hawkins +* Pierre +* Pieter Eendebak +* Quentin Barthélemy + +* Raghuveer Devulapalli +* Ralf Gommers +* Randy Eckenrode + +* Raquel Braunschweig + +* Richard Howe + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ronald van Elburg + +* Ross Barnowski +* Sam James + +* Sam Van Kooten + +* Samuel Albanie + +* Sarah Wang + +* Sarah Zwiep + +* Sarah-Yifei-Wang + +* Sarthak Dawar + +* Sayantika Banik +* Sayed Adel +* Sean Cheah + +* Sebastian Berg +* Serge Guelton +* Shalini Roy + +* Shen Zhou +* Shubhal Gupta + +* Stefan van der Walt +* Stefano Rivera + +* Takumasa N. + +* Taras Tsugrii +* Thomas A Caswell +* Thomas Grainger + +* Thomas Li +* Tim Hoffmann +* Tim Paine + +* Timo Röhling + +* Trey Woodlief + +* Tyler Reddy +* Victor Tang + +* Vladimir Fokow + +* Warren Weckesser +* Warrick Ball + +* Will Ayd +* William Andrea + +* William Ayd + +* Xiangyi Wang + +* Yash Pethe + +* Yuki K +* Zach Brugh + +* Zach Rottman + +* Zolisa Bleki + +Pull requests merged +==================== + +A total of 1078 pull requests were merged for this release. + +* `#15457 `__: BUG: Adds support for array parameter declaration in fortran... +* `#21199 `__: ENH: expose datetime.c functions to cython +* `#21429 `__: ENH: Added ``bitwise_count`` UFuncs +* `#21760 `__: MAINT: Make output of Polynomial representations consistent +* `#21975 `__: ENH: Add binding for random pyx files +* `#22449 `__: ENH: Update scalar representations as per NEP 51 +* `#22657 `__: BUG: Fix common block handling in f2py +* `#23096 `__: BLD, SIMD: The meson CPU dispatcher implementation +* `#23282 `__: BUG: Fix data stmt handling for complex values in f2py +* `#23347 `__: DOC: changed formula in random.Generator.pareto doc #22701 +* `#23351 `__: ENH: Use AVX512-FP16 SVML content for float16 umath functions +* `#23508 `__: DOC: Update scalar types in ``Py{TYPE}ArrType_Type`` +* `#23537 `__: NEP: add NEP on a Python API cleanup for NumPy 2.0 +* `#23611 `__: DOC: Make input/output type consistent and add more examples... +* `#23729 `__: ENH: allow int sequences as shape arguments in numpy.memmap +* `#23762 `__: API: Add .mT attribute for arrays +* `#23764 `__: CI,TYP: Bump mypy to 1.4.1 +* `#23780 `__: BUG: Create complex scalars from real and imaginary parts +* `#23785 `__: DOC: tweak NEP 50 examples +* `#23787 `__: DOC: Add brief note about custom converters to genfromtext. +* `#23789 `__: ENH: add copy parameter for api.reshape function +* `#23795 `__: Use tuple instead of string for (LOWER|UPPER)_TABLEs. +* `#23804 `__: REL: Prepare main for NumPy 2.0.0 development +* `#23809 `__: MAINT: removing the deprecated submodule +* `#23810 `__: MAINT: Bump github/codeql-action from 2.3.3 to 2.3.4 +* `#23813 `__: DOC: Clean up errstate handling in our tests +* `#23814 `__: DOC: switching to use the plot directive +* `#23817 `__: MAINT: Bump github/codeql-action from 2.3.4 to 2.3.5 +* `#23819 `__: BUG: Doctest doesn't have a SHOW_WARNINGS directive. +* `#23822 `__: DOC: Added ``pathlib.Path`` where applicable +* `#23825 `__: BLD: use cython3 for one CI run +* `#23826 `__: MAINT: io.open → open +* `#23828 `__: MAINT: fix typos found by codespell +* `#23830 `__: API: deprecate compat and selected lib utils +* `#23831 `__: DOC: use float64 instead of float128 in docstring +* `#23832 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23834 `__: MAINT: IOError → OSError +* `#23835 `__: MAINT: Update versioneer: 0.26 → 0.28 +* `#23836 `__: DOC: update distutils migration guide +* `#23838 `__: BLD: switch to meson-python as the default build backend +* `#23840 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23841 `__: MAINT: Bump pypa/cibuildwheel from 2.12.3 to 2.13.0 +* `#23843 `__: MAINT: Update download-wheels +* `#23845 `__: MAINT: Do not call PyArray_Item_XDECREF in PyArray_Pack +* `#23846 `__: TST: Add tests for np.argsort +* `#23847 `__: MAINT: const correctness for the generalized ufunc C API +* `#23850 `__: MAINT: Bump actions/dependency-review-action from 3.0.4 to 3.0.6 +* `#23851 `__: CI: Update cirrus nightly wheel upload token +* `#23852 `__: CI: Change "weekly" to "nightly" in cirrus +* `#23854 `__: DOC:removed examples which refers to a non existent function +* `#23855 `__: BUG: make use of locals() in a comprehension fully compatible... +* `#23856 `__: CI: bump nightly upload frequency to twice a week +* `#23857 `__: BUG: fix cron syntax +* `#23859 `__: DOC: Note that f2py isn't consiered safe +* `#23861 `__: MAINT: Remove all "NumPy 2" as that should be main now +* `#23865 `__: MAINT: Bump github/codeql-action from 2.3.5 to 2.3.6 +* `#23868 `__: DOC: Fix ``NPY_OUT_ARRAY`` to ``NPY_ARRAY_OUT_ARRAY`` in how-to-extend... +* `#23871 `__: NEP: Fix NEP 53 file format and minor formatting issue +* `#23878 `__: TST: Add tests for np.argsort +* `#23881 `__: ENH: Add array API standard v2022.12 support to numpy.array_api +* `#23887 `__: TYP,DOC: Annotate and document the ``metadata`` parameter of... +* `#23897 `__: DOC: Fix transpose() description with a correct reference to... +* `#23898 `__: API: Change string to bool conversions to be consistent with... +* `#23902 `__: MAINT: Use ``--allow-downgrade`` option for rtools. +* `#23906 `__: MAINT: Use vectorcall for call forwarding in methods +* `#23907 `__: MAINT: Bump github/codeql-action from 2.3.6 to 2.13.4 +* `#23908 `__: MAINT: Bump actions/checkout from 3.5.2 to 3.5.3 +* `#23911 `__: BUG: Allow np.info on non-hashable objects with a dtype +* `#23912 `__: API: Switch to NEP 50 behavior by default +* `#23913 `__: ENH: let zeros, empty, and empty_like accept dtype classes +* `#23914 `__: DOC: Fix reference ``ComplexWarning`` in release note +* `#23915 `__: DOC: Update development_environment doc. +* `#23916 `__: ABI: Bump C-ABI to 2 but accept older NumPy if compiled against... +* `#23917 `__: ENH: Speed up boolean indexing of flatiters +* `#23918 `__: DOC: Fix references to ``AxisError`` in docstrings +* `#23919 `__: API: Remove interrupt handling and ``noprefix.h`` +* `#23920 `__: DOC: fix DOI on badge +* `#23921 `__: DEP: Expire the PyDataMem_SetEventHook deprecation and remove... +* `#23922 `__: API: Remove ``seterrobj``/``geterrobj``/``extobj=`` and related C-API... +* `#23923 `__: BUG:Fix for call to 'vec_st' is ambiguous +* `#23924 `__: MAINT: Bump pypa/cibuildwheel from 2.13.0 to 2.13.1 +* `#23925 `__: MAINT: Disable SIMD version of float64 sin and cos +* `#23927 `__: DOC: Fix references to ``r_`` in ``mr_class`` docstring +* `#23935 `__: MAINT: Update to latest x86-simd-sort +* `#23936 `__: ENH,API: Make the errstate/extobj a contextvar +* `#23941 `__: BUG: Fix NpyIter cleanup in einsum error path +* `#23942 `__: BUG: Fixup for win64 fwrite issue +* `#23943 `__: DOC: Update required C++ version in building.rst (and copy-edit). +* `#23944 `__: DOC: const correctness in PyUFunc_FromFuncAndData... functions +* `#23950 `__: MAINT: Upgrade install-rtools version +* `#23952 `__: Replace a divider with a colon for _monotonicity +* `#23953 `__: BUG: Fix AVX2 intrinsic npyv_store2_till_s64 on MSVC > 19.29 +* `#23960 `__: DOC: adding release note for 23809 +* `#23961 `__: BLD: update pypy in CI to latest version +* `#23962 `__: TEST: change subprocess call to capture stderr too +* `#23964 `__: MAINT: Remove references to removed functions +* `#23965 `__: MAINT: Simplify codespaces conda environment activation +* `#23967 `__: DOC: Fix references to ``trimseq`` in docstrings +* `#23969 `__: MAINT: Update main after 1.25.0 release. +* `#23971 `__: BUG: Fix private procedures in ``f2py`` modules +* `#23977 `__: MAINT: pipes.quote → shlex.quote +* `#23979 `__: MAINT: Fix typos found by codespell +* `#23980 `__: MAINT: use ``yield from`` where applicable +* `#23982 `__: BLD: Port long double identification to C for meson +* `#23983 `__: BLD: change file extension for installed static libraries back... +* `#23984 `__: BLD: improve handling of CBLAS, add ``-Duse-ilp64`` build option +* `#23985 `__: Revert "TST: disable longdouble string/print tests on Linux aarch64" +* `#23990 `__: DOC: Fix np.vectorize Doc +* `#23991 `__: CI: BLD: build wheels and fix test suite for Python 3.12 +* `#23995 `__: MAINT: Do not use ``--side-by-side`` choco option +* `#23997 `__: MAINT: make naming of C aliases for dtype classes consistent +* `#23998 `__: DEP: Expire ``set_numeric_ops`` and the corresponding C functions... +* `#24004 `__: BUG: Fix reduction ``return NULL`` to be ``goto fail`` +* `#24006 `__: ENH: Use high accuracy SVML for double precision umath functions +* `#24009 `__: DOC: Update __array__ description +* `#24011 `__: API: Remove ``old_defines.h`` (part of NumPy 1.7 deprecated C-API) +* `#24012 `__: MAINT: Remove hardcoded f2py numeric/numarray compatibility switch +* `#24014 `__: BUG: Make errstate decorator compatible with threading +* `#24017 `__: MAINT: Further cleanups for errstate +* `#24018 `__: ENH: Use Highway's VQSort on AArch64 +* `#24020 `__: Fix typo in random sampling documentation +* `#24021 `__: BUG: Fix error message for nanargmin/max of empty sequence +* `#24025 `__: TST: improve test for Cholesky decomposition +* `#24026 `__: DOC: Add note for installing ``asv`` library to run benchmark tests +* `#24027 `__: DOC: Fix reference to ``__array_struct__`` in ``arrays.interface.rst`` +* `#24029 `__: DOC: Add link to NEPs in top navbar +* `#24030 `__: BUG: Avoid undefined behavior in array.astype() +* `#24031 `__: BUG: Ensure ``__array_ufunc__`` works without any kwargs passed +* `#24046 `__: DOC: Fix reference to python module ``string`` in ``routines.char.rst`` +* `#24047 `__: DOC: Fix reference to ``array()`` in release note +* `#24049 `__: MAINT: Update main after 1.24.4 release. +* `#24051 `__: MAINT: Pin urllib3 to avoid anaconda-client bug. +* `#24052 `__: MAINT: Bump ossf/scorecard-action from 2.1.3 to 2.2.0 +* `#24053 `__: ENH: Adopt new macOS Accelerate BLAS/LAPACK Interfaces, including... +* `#24054 `__: BUG: Multiply or divides using SIMD without a full vector can... +* `#24058 `__: DOC: Remove references to ``PyArray_SetNumericOps`` and ``PyArray_GetNumericOps`` in release note +* `#24059 `__: MAINT: Remove ability to enter errstate twice (sequentially) +* `#24060 `__: BLD: use ``-ftrapping-math`` with Clang on macOS in Meson build +* `#24061 `__: DOC: PR adds casting option's description to Glossary and ``numpy.concatenate``. +* `#24068 `__: DOC: Add NpzFile class documentation. +* `#24071 `__: MAINT: Overwrite previous wheels when uploading to anaconda. +* `#24073 `__: API: expose PyUFunc_GiveFloatingpointErrors in the dtype API +* `#24075 `__: DOC: Add missing indentation in ``ma.mT`` docstring +* `#24076 `__: DOC: Fix incorrect reST markups in ``numpy.void`` docstring +* `#24077 `__: DOC: Fix documentation for ``ndarray.mT`` +* `#24082 `__: MAINT: testing for IS_MUSL closes #24074 +* `#24083 `__: ENH: Add ``spin`` command ``gdb``; customize ``docs`` and ``test`` +* `#24085 `__: ENH: Replace npy complex structs with native complex types +* `#24087 `__: NEP: Mark NEP 51 as accepted +* `#24090 `__: MAINT: print error from verify_c_api_version.py failing +* `#24092 `__: TST: Pin pydantic<2 in Pyodide workflow +* `#24094 `__: ENH: Added compiler ``args`` and ``link_args`` +* `#24097 `__: DOC: Add reference to dtype parameter in NDArray +* `#24098 `__: ENH: raise early exception if 0d array is used in np.cross +* `#24100 `__: DOC: Clarify correlate function definition +* `#24101 `__: BUG: Fix empty structured array dtype alignment +* `#24102 `__: DOC: fix rst formatting in datetime C API docs +* `#24103 `__: BUG: Only replace dtype temporarily if dimensions changed +* `#24105 `__: DOC: Correctly use savez_compressed in examples for that function. +* `#24107 `__: ENH: Add ``spin benchmark`` command +* `#24112 `__: DOC: Fix warnings and errors caused by reference/c-api/datetimes +* `#24113 `__: DOC: Fix the reference in the docstring of numpy.meshgrid +* `#24123 `__: BUG: ``spin gdb``: launch Python directly so that breakpoint... +* `#24124 `__: MAINT: Bump actions/setup-node from 3.6.0 to 3.7.0 +* `#24125 `__: MAINT: import numpy as ``np`` in ``spin ipython`` +* `#24126 `__: ENH: add mean keyword to std and var +* `#24130 `__: DOC: Fix warning for PyArray_MapIterNew. +* `#24133 `__: DOC: Update python as glue doc. +* `#24135 `__: DOC: Fix string types in ``arrays.dtypes.rst`` +* `#24138 `__: DOC: add NEP 54 on SIMD - moving to C++ and adopting Highway... +* `#24142 `__: ENH: Allow NEP 42 dtypes to use np.save and np.load +* `#24143 `__: Corrected a grammatical error in doc/source/user/absolute_beginners.rst +* `#24144 `__: API: Remove several niche objects for numpy 2.0 python API cleanup +* `#24149 `__: MAINT: Update main after 1.25.1 release. +* `#24150 `__: BUG: properly handle negative indexes in ufunc_at fast path +* `#24152 `__: DOC: Fix reference warning for recarray. +* `#24153 `__: BLD, TST: refactor test to use meson not setup.py, improve spin... +* `#24154 `__: API: deprecate undocumented functions +* `#24158 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24159 `__: MAINT: Bump pypa/cibuildwheel from 2.13.1 to 2.14.0 +* `#24160 `__: MAINT: Update cibuildwheel to 2.14.0 +* `#24161 `__: BUG: histogram small range robust +* `#24162 `__: ENH: Improve clang-cl compliance +* `#24163 `__: MAINT: update pytest, hypothesis, pytest-cov, and pytz in test_requirements.txt +* `#24172 `__: DOC: Add note that NEP 29 is superseded by SPEC 0 +* `#24173 `__: MAINT: Bump actions/setup-python from 4.6.1 to 4.7.0 +* `#24176 `__: MAINT: do not use copyswap in flatiter internals +* `#24178 `__: BUG: PyObject_IsTrue and PyObject_Not error handling in setflags +* `#24187 `__: BUG: Fix the signature for np.array_api.take +* `#24188 `__: BUG: fix choose refcount leak +* `#24191 `__: BUG: array2string does not add signs for positive integers. Fixes... +* `#24193 `__: DEP: Remove datetime64 deprecation warning when constructing... +* `#24196 `__: MAINT: Remove versioneer +* `#24199 `__: BLD: update OpenBLAS to an intermediate commit +* `#24201 `__: ENH: Vectorize np.partition and np.argpartition using AVX-512 +* `#24202 `__: MAINT: Bump pypa/cibuildwheel from 2.14.0 to 2.14.1 +* `#24204 `__: BUG: random: Fix check for both uniform variates being 0 in random_beta() +* `#24205 `__: MAINT: Fix new or residual typos found by codespell +* `#24206 `__: TST: convert remaining setup.py tests to meson instead +* `#24208 `__: CI: Add a sanitizer CI job +* `#24211 `__: BUG: Fix reference count leak in str(scalar). +* `#24212 `__: BUG: fix invalid function pointer conversion error +* `#24214 `__: ENH: Create helper for conversion to arrays +* `#24219 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24220 `__: BUG: random: Fix generation of nan by dirichlet. +* `#24222 `__: BUG: Fix cblas detection for the wheel builds +* `#24223 `__: BUG: Fix undefined behavior in complex pow(). +* `#24224 `__: API: Make 64bit default integer on 64bit windows +* `#24225 `__: DOC: Fix doc build warning for random. +* `#24227 `__: DOC: Update year in doc/source/conf.py to 2023 +* `#24228 `__: DOC: fix some double includes in f2py.getting-started.rst +* `#24231 `__: API: expose NPY_DTYPE macro in the dtype API +* `#24235 `__: BLD: only install the ``f2py`` command, not ``f2py3`` or ``f2py3.X`` +* `#24236 `__: BLD: update requirements to use cython>3.0 +* `#24237 `__: BUG: Added missing PyObject_IsTrue error check (return -1) #24177 +* `#24238 `__: BLD/CI: re-enable ILP64 usage and PyPy job in Azure +* `#24240 `__: BUG: Fix C types in scalartypes +* `#24248 `__: BUG: Factor out slow ``getenv`` call used for memory policy warning +* `#24249 `__: TST: enable test that checks for ``numpy.array_api`` entry point +* `#24250 `__: CI: Test NumPy against OpenBLAS weekly builds +* `#24254 `__: ENH: add weighted quantile for inverted_cdf +* `#24256 `__: DEV: Use ``exec_lines`` and not profile dir for ``spin ipython`` +* `#24257 `__: BUG: Add size check for threaded array assignment +* `#24258 `__: DEP: Remove PyArray complex macros and move PyArray_MIN/MAX +* `#24262 `__: DOC: Fix links to random.Generator methods in quickstart +* `#24263 `__: BUG: Fix use of renamed variable. +* `#24267 `__: BUG: random: Fix generation of nan by beta. +* `#24268 `__: CI: Enable running intel_spr_sde_test with Intel SDE +* `#24270 `__: BUG: Move legacy check for void printing +* `#24271 `__: API: Remove legacy-inner-loop-selector +* `#24272 `__: BUG: do not modify the input to ufunc_at +* `#24273 `__: TYP: Trim down the ``_NestedSequence.__getitem__`` signature +* `#24276 `__: DOC: Remove ``np.source`` and ``np.lookfor`` +* `#24277 `__: DOC: inconsistency between doc and code +* `#24278 `__: DOC: fix a couple typos and rst formatting errors in NEP 0053 +* `#24279 `__: CI/BLD: fail by default if no BLAS/LAPACK, add 32-bit Python... +* `#24281 `__: BUG: Further fixes to indexing loop and added tests +* `#24285 `__: CI: correct URL in cirrus.star +* `#24286 `__: CI: only build cirrus wheels when requested +* `#24287 `__: DOC: Fix some incorrectly formatted documents +* `#24289 `__: DOC: update code comment about ``NPY_USE_BLAS_ILP64`` environment... +* `#24291 `__: CI: improve test suite runtime via pytest parallelism and disabling... +* `#24298 `__: DOC: update stride reference doc. +* `#24299 `__: BUG: Fix assumed length f2py regression +* `#24303 `__: CI: apt update before apt install on cirrus +* `#24304 `__: MAINT: Update main after 1.25.2 release. +* `#24307 `__: CI: Cannot run ``intel_spr_sde_test`` on Intel SDE +* `#24311 `__: BLD: update openblas to newer version +* `#24312 `__: DEP: Finalize ``fastCopyAndTranpose`` and other old C-funcs/members... +* `#24315 `__: DOC: Fix some links in documents +* `#24316 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 1... +* `#24320 `__: DOC: Remove promoting twitter in heading +* `#24321 `__: DEP: Remove deprecated numpy.who +* `#24331 `__: DOC: Fix reference warning for buffer. +* `#24332 `__: DOC: Refactor description of ``PyArray_FromAny/PyArray_CheckFromAny`` +* `#24346 `__: DOC: use nightly dependencies [skip actions] [azp skip] [skip... +* `#24347 `__: DOC: Update to release upcoming change document +* `#24349 `__: BUG: polynomial: Handle non-array inputs in polynomial class... +* `#24354 `__: TST: fix distutils tests for deprecations in recent setuptools... +* `#24357 `__: API: Cleaning numpy/__init__.py and main namespace - Part 2 [NEP... +* `#24358 `__: BUG: flexible inheritance segfault +* `#24360 `__: BENCH: fix small array det benchmark +* `#24362 `__: DOC: Add release notes for complex types changes in 2.x +* `#24364 `__: BUG: Remove #undef complex from npy_common.h +* `#24369 `__: ENH: assert_array_less should report max violations instead of... +* `#24370 `__: BLD: Clean up build for complex +* `#24371 `__: MAINT: Fix codespaces setup.sh script +* `#24372 `__: MAINT: Bump pypa/cibuildwheel from 2.14.1 to 2.15.0 +* `#24373 `__: MAINT: Bump actions/dependency-review-action from 3.0.6 to 3.0.7 +* `#24374 `__: MAINT: Update cibuildwheel for cirrus builds +* `#24376 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 3... +* `#24379 `__: ENH: Vendor meson for multi-target build support +* `#24380 `__: DOC: Remove extra indents in documents +* `#24383 `__: DOC: Fix reference warning for ABCPolyBase. +* `#24393 `__: DOC: Add missing sphinx reference roles +* `#24396 `__: BLD: vendor meson-python to make the Windows builds with SIMD... +* `#24400 `__: TST: revert xfail in ``test_umath.py`` +* `#24402 `__: DOC: Fix reference warning for routines.polynomials.rst. +* `#24407 `__: DOC: add warning to ``allclose``, revise "Notes" in ``isclose`` +* `#24412 `__: [BUG] Return value of use_hugepage in hugepage_setup +* `#24413 `__: BUG: cleanup warnings [skip azp][skip circle][skip travis][skip... +* `#24414 `__: BLD: allow specifying the long double format to avoid the runtime... +* `#24415 `__: MAINT: Bump actions/setup-node from 3.7.0 to 3.8.0 +* `#24419 `__: CI/BUG: add Python 3.12 CI job and fix ``numpy.distutils`` AttributeError +* `#24420 `__: ENH: Introduce tracer for enabled CPU targets on each optimized... +* `#24421 `__: DOC: Remove mixed capitalization +* `#24422 `__: MAINT: Remove unused variable ``i`` +* `#24423 `__: MAINT: Bump actions/dependency-review-action from 3.0.7 to 3.0.8 +* `#24425 `__: CI: only run cirrus on commit to PR [skip actions] +* `#24427 `__: MAINT: revert adding ``distutils`` and ``array_api`` to ``np.__all__`` +* `#24434 `__: DOC: Fix reference warning for types-and-structures.rst. +* `#24435 `__: CI: cirrus run linux_aarch64 first +* `#24437 `__: MAINT: Bump actions/setup-node from 3.8.0 to 3.8.1 +* `#24439 `__: MAINT: Pin upper version of sphinx. +* `#24442 `__: DOC: Fix reference warning in Arrayterator and recfunctions. +* `#24445 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 4... +* `#24452 `__: ENH: Add prefix to _ALIGN Macro +* `#24457 `__: MAINT: Upgrade to spin 0.5 +* `#24461 `__: MAINT: Refactor partial load workaround for Clang +* `#24463 `__: MAINT: Fix broken link in runtests.py +* `#24468 `__: BUG: Fix meson build failure due to unchanged inplace auto-generated... +* `#24469 `__: DEP: Replace deprecation warning for non-integral arguments in... +* `#24471 `__: DOC: Fix some incorrect markups +* `#24473 `__: MAINT: Improve docstring and performance of trimseq +* `#24476 `__: MAINT: Move ``RankWarning`` to exceptions module +* `#24477 `__: MAINT: Remove deprecated functions [NEP 52] +* `#24479 `__: CI: Implements Cross-Compile Builds for armhf, ppc64le, and s390x +* `#24481 `__: DOC: Rm np.who from autosummary. +* `#24483 `__: NEP: add NEP 55 for a variable width string dtype +* `#24484 `__: BUG: fix NPY_cast_info error handling in choose +* `#24485 `__: DOC: Fix some broken links +* `#24486 `__: BUG: ``asv dev`` has been removed, use ``asv run`` instead. +* `#24487 `__: DOC: Fix reference warning in some rst and code files. +* `#24488 `__: MAINT: Stop testing on ppc64le. +* `#24493 `__: CI: GitHub Actions CI job restructuring +* `#24494 `__: API: Remove deprecated ``msort`` function +* `#24498 `__: MAINT: Re-write 16-bit qsort dispatch +* `#24504 `__: DOC: Remove extra indents in docstrings +* `#24505 `__: DOC: Fix mentions in ``isin`` docs +* `#24510 `__: DOC: Add missing changelogs for NEP 52 PRs +* `#24511 `__: BUG: Use a default assignment for git_hash [skip ci] +* `#24513 `__: API: Update ``lib.histograms`` namespace +* `#24515 `__: BUG: fix issue with git-version script, needs a shebang to run +* `#24516 `__: DOC: unpin sphinx +* `#24517 `__: MAINT: Harmonize fortranobject, drop C99 style for loop +* `#24518 `__: MAINT: Add expiration notes for NumPy 2.0 removals +* `#24519 `__: MAINT: remove ``setup.py`` and other files for distutils builds +* `#24520 `__: CI: remove obsolete jobs, and move macOS and conda Azure jobs... +* `#24523 `__: CI: switch the Cygwin job to Meson +* `#24527 `__: TYP: add kind argument to numpy.isin type specification +* `#24528 `__: MAINT: Bump actions/checkout from 3.5.3 to 3.6.0 +* `#24532 `__: ENH: ``meson`` backend for ``f2py`` +* `#24535 `__: CI: remove spurious wheel build action runs +* `#24536 `__: API: Update ``lib.nanfunctions`` namespace +* `#24537 `__: API: Update ``lib.type_check`` namespace +* `#24538 `__: API: Update ``lib.function_base`` namespace +* `#24539 `__: CI: fix CircleCI job for move to Meson +* `#24540 `__: API: Add ``lib.array_utils`` namespace +* `#24543 `__: DOC: re-pin sphinx<7.2 +* `#24547 `__: DOC: Cleanup removed objects +* `#24549 `__: DOC: fix typos in percentile documentation +* `#24551 `__: Update .mailmap 2 +* `#24555 `__: BUG, ENH: Fix ``iso_c_binding`` type maps and fix ``bind(c)``... +* `#24556 `__: BUG: fix comparisons between masked and unmasked structured arrays +* `#24559 `__: BUG: ensure nomask in comparison result is not broadcast +* `#24560 `__: CI/BENCH: move more jobs to Meson and fix all broken benchmarks +* `#24562 `__: DOC: Fix typos +* `#24564 `__: API: Readd ``add_docstring`` and ``add_newdoc`` to ``np.lib`` +* `#24566 `__: API: Update ``lib.shape_base`` namespace +* `#24567 `__: API: Update ``arraypad``,``arraysetops``, ``ufunclike`` and ``utils``... +* `#24570 `__: CI: Exclude import libraries from list of DLLs on Cygwin. +* `#24571 `__: MAINT: Add tests for Polynomial with fractions.Fraction coefficients +* `#24573 `__: DOC: Update building docs to use Meson +* `#24577 `__: API: Update ``lib.twodim_base`` namespace +* `#24578 `__: API: Update ``lib.polynomial`` and ``lib.npyio`` namespaces +* `#24579 `__: DOC: fix ``import mat`` warning. +* `#24580 `__: API: Update ``lib.stride_tricks`` namespace +* `#24581 `__: API: Update ``lib.index_tricks`` namespace +* `#24582 `__: DOC: fix typos in ndarray.setflags doc +* `#24584 `__: BLD: fix ``_umath_linalg`` dependencies +* `#24587 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 5... +* `#24589 `__: NEP: fix typos and formatting in NEP 55 +* `#24596 `__: BUG: Fix hash of user-defined dtype +* `#24598 `__: DOC: fix two misspellings in documentation +* `#24599 `__: DOC: unpin sphinx to pick up 7.2.5 +* `#24600 `__: DOC: wrong name in docs +* `#24601 `__: BLD: meson-cpu: fix SIMD support on platforms with no features +* `#24605 `__: DOC: fix isreal docstring (complex -> imaginary) +* `#24607 `__: DOC: Fix import find_common_type warning[skip actions][skip cirrus][s… +* `#24610 `__: MAINT: Avoid creating an intermediate array in np.quantile +* `#24611 `__: TYP: Add the missing ``casting`` keyword to ``np.clip`` +* `#24612 `__: DOC: Replace "cube cube-root" with "cube root" in cbrt docstring +* `#24618 `__: DOC: Fix markups for code blocks +* `#24620 `__: DOC: Update NEP 52 file +* `#24623 `__: TYP: Explicitly declare ``dtype`` and ``generic`` as hashable +* `#24625 `__: CI: Switch SIMD tests to meson +* `#24626 `__: DOC: add release notes link to PyPI. +* `#24628 `__: TYP: Allow ``binary_repr`` to accept any object implementing... +* `#24631 `__: DOC: Clarify usage of --include-paths as an f2py CLI argument +* `#24634 `__: API: Rename ``numpy/core`` to ``numpy/_core`` [NEP 52] +* `#24635 `__: ENH: Refactor the typing "reveal" tests using ``typing.assert_type`` +* `#24636 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 +* `#24643 `__: TYP, MAINT: General type annotation maintenance +* `#24644 `__: MAINT: remove the ``oldnumeric.h`` header +* `#24657 `__: Add read-only token to linux_qemu.yml +* `#24658 `__: BUG, ENH: Access ``PyArrayMultiIterObject`` fields using macros. +* `#24663 `__: ENH: optimisation of array_equal +* `#24664 `__: BLD: fix bug in random.mtrand extension, don't link libnpyrandom +* `#24666 `__: MAINT: Bump actions/upload-artifact from 3.1.2 to 3.1.3 +* `#24667 `__: DOC: TEST.rst: add example with ``pytest.mark.parametrize`` +* `#24671 `__: BLD: build wheels for 32-bit Python on Windows, using MSVC +* `#24672 `__: MAINT: Bump actions/dependency-review-action from 3.0.8 to 3.1.0 +* `#24674 `__: DOC: Remove extra indents in documents +* `#24677 `__: DOC: improve the docstring's examples for np.searchsorted +* `#24679 `__: MAINT: Refactor of ``numpy/core/_type_aliases.py`` +* `#24680 `__: ENH: add parameter ``strict`` to ``assert_allclose`` +* `#24681 `__: BUG: Fix weak promotion with some mixed float/int dtypes +* `#24682 `__: API: Remove ``ptp``, ``itemset`` and ``newbyteorder`` from ``np.ndarray``... +* `#24690 `__: DOC: Fix reference warning in some rst files +* `#24691 `__: ENH: Add the Array Iterator API to Cython +* `#24693 `__: DOC: NumPy 2.0 migration guide +* `#24695 `__: CI: enable use of Cirrus CI compute credits by collaborators +* `#24696 `__: DOC: Updated the f2py docs to remove a note on ``-fimplicit-none`` +* `#24697 `__: API: Readd ``sctypeDict`` to the main namespace +* `#24698 `__: BLD: fix issue with compiler selection during cross compilation +* `#24702 `__: DOC: Fix typos +* `#24705 `__: TYP: Add annotations for the py3.12 buffer protocol +* `#24710 `__: BUG: Fix np.quantile([0, 1], 0, method='weibull') +* `#24711 `__: BUG: Fix np.quantile([Fraction(2,1)], 0.5) +* `#24714 `__: DOC: Update asarray docstring to use shares_memory +* `#24715 `__: DOC: Fix trailing backticks characters. +* `#24716 `__: CI: do apt update before apt install +* `#24717 `__: MAINT: remove relaxed strides debug build setting +* `#24721 `__: DOC: Doc fixes and updates. +* `#24725 `__: MAINT: Update main after 1.26.0 release. +* `#24733 `__: BLD, BUG: Fix build failure for host flags e.g. ``-march=native``... +* `#24735 `__: MAINT: Update RELEASE_WALKTHROUGH +* `#24740 `__: MAINT: Bump pypa/cibuildwheel from 2.15.0 to 2.16.0 +* `#24741 `__: MAINT: Remove cibuildwheel pin in cirrus_wheels +* `#24745 `__: ENH: Change default values in polynomial package +* `#24752 `__: DOC: Fix reference warning in some rst files +* `#24753 `__: BLD: add libquadmath to licences and other tweaks +* `#24758 `__: ENH: fix printing structured dtypes with a non-legacy dtype member +* `#24762 `__: BUG: Fix order of Windows OS detection macros. +* `#24766 `__: DOC: add a note on the ``.c.src`` format to the distutils migration... +* `#24770 `__: ENH: add parameter ``strict`` to ``assert_equal`` +* `#24772 `__: MAINT: align test_dispatcher s390x targets with _umath_tests_mtargets +* `#24775 `__: ENH: add parameter ``strict`` to ``assert_array_less`` +* `#24777 `__: BUG: ``numpy.array_api``: fix ``linalg.cholesky`` upper decomp... +* `#24778 `__: BUG: Fix DATA statements for f2py +* `#24780 `__: DOC: Replace http:// by https:// +* `#24781 `__: MAINT, DOC: fix typos found by codespell +* `#24787 `__: DOC: Closes issue #24730, 'sigma' to 'signum' in piecewise example +* `#24791 `__: BUG: Fix f2py to enable use of string optional inout argument +* `#24792 `__: TYP,DOC: Document the ``np.number`` parameter type as invariant +* `#24793 `__: MAINT: fix licence path win +* `#24795 `__: MAINT : fix spelling mistake for "imaginary" param in _read closes... +* `#24798 `__: MAINT: Bump actions/checkout from 4.0.0 to 4.1.0 +* `#24799 `__: MAINT: Bump maxim-lobanov/setup-xcode from 1.5.1 to 1.6.0 +* `#24802 `__: BLD: updated vendored-meson/meson for mips64 fix +* `#24805 `__: DOC: Fix reference warning in some rst files +* `#24806 `__: BUG: Fix build on ppc64 when the baseline set to Power9 or higher +* `#24807 `__: API: Remove zero names from dtype aliases +* `#24811 `__: DOC: explain why we avoid string.ascii_letters +* `#24812 `__: MAINT: Bump pypa/cibuildwheel from 2.16.0 to 2.16.1 +* `#24816 `__: MAINT: Upgrade to spin 0.7 +* `#24817 `__: DOC: Fix markups for emphasis +* `#24818 `__: API: deprecate size-2 inputs for ``np.cross`` [Array API] +* `#24820 `__: MAINT: remove ``wheel`` as a build dependency +* `#24825 `__: DOC: Fix docstring of matrix class +* `#24828 `__: BUG, SIMD: use scalar cmul on bad Apple clang x86_64 +* `#24834 `__: DOC: Update debugging section +* `#24835 `__: ENH: Add ufunc for np.char.isalpha +* `#24839 `__: BLD: use scipy-openblas wheel +* `#24845 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24847 `__: DOC: Fix reference warning in some rst files +* `#24848 `__: DOC: TESTS.rst: suggest np.testing assertion function strict=True +* `#24854 `__: MAINT: Remove 'a' dtype alias +* `#24858 `__: ENH: Extend np.add ufunc to work with unicode and byte dtypes +* `#24860 `__: MAINT: Bump pypa/cibuildwheel from 2.16.1 to 2.16.2 +* `#24864 `__: MAINT: Xfail test failing on PyPy. +* `#24866 `__: API: Add ``NumpyUnpickler`` +* `#24867 `__: DOC: Update types table +* `#24868 `__: ENH: Add find/rfind ufuncs for unicode and byte dtypes +* `#24869 `__: BUG: Fix ma.convolve if propagate_mask=False +* `#24875 `__: DOC: testing.assert_array_equal: distinguish from assert_equal +* `#24876 `__: BLD: fix math func feature checks, fix FreeBSD build, add CI... +* `#24877 `__: ENH: testing: argument ``err_msg`` of assertion functions can be... +* `#24878 `__: ENH: isclose/allclose: support array_like ``atol``/``rtol`` +* `#24880 `__: BUG: Fix memory leak in timsort's buffer resizing +* `#24883 `__: BLD: fix "Failed to guess install tag" in meson-log.txt, add... +* `#24884 `__: DOC: replace 'a' dtype with 'S' in format_parser docs +* `#24886 `__: DOC: Fix eigenvector typo in linalg.py docs +* `#24887 `__: API: Add ``diagonal`` and ``trace`` to ``numpy.linalg`` [Array API] +* `#24888 `__: API: Make ``intp`` ``ssize_t`` and introduce characters nN +* `#24891 `__: MAINT: Bump ossf/scorecard-action from 2.2.0 to 2.3.0 +* `#24893 `__: ENH: meson: implement BLAS/LAPACK auto-detection and many CI... +* `#24896 `__: API: Add missing deprecation and release note files +* `#24901 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24904 `__: BUG: loongarch doesn't use REAL(10) +* `#24910 `__: BENCH: Fix benchmark bug leading to failures +* `#24913 `__: DOC: fix typos +* `#24915 `__: API: Allow comparisons with and between any python integers +* `#24920 `__: MAINT: Reenable PyPy wheel builds. +* `#24922 `__: API: Add ``np.long`` and ``np.ulong`` +* `#24923 `__: ENH: Add Cython enumeration for NPY_FR_GENERIC +* `#24925 `__: DOC: Fix parameter markups in ``c-api/ufunc.rst`` +* `#24927 `__: DOC: how-to-io.rst: document solution for NumPy JSON serialization +* `#24930 `__: MAINT: Update main after 1.26.1 release. +* `#24931 `__: ENH: testing: consistent names for actual and desired results +* `#24935 `__: DOC: Update lexsort docstring for axis kwargs +* `#24938 `__: DOC: Add warning about ill-conditioning to linalg.inv docstring +* `#24939 `__: DOC: Add legacy directive to mark outdated objects +* `#24940 `__: API: Add ``svdvals`` to ``numpy.linalg`` [Array API] +* `#24941 `__: MAINT: Bump actions/checkout from 4.1.0 to 4.1.1 +* `#24943 `__: MAINT: don't warn for symbols needed by import_array() +* `#24945 `__: MAINT: Make ``numpy.fft.helper`` private +* `#24946 `__: MAINT: Make ``numpy.linalg.linalg`` private +* `#24947 `__: ENH: Add startswith & endswith ufuncs for unicode and bytes dtypes +* `#24949 `__: API: Enforce ABI version and print info when compiled against... +* `#24950 `__: TEST: Add test for checking functions' one location rule +* `#24951 `__: ENH: Add isdigit/isspace/isdecimal/isnumeric ufuncs for string... +* `#24953 `__: DOC: Indicate shape param of ndarray.reshape is position-only +* `#24958 `__: MAINT: Remove unhelpful error replacements from ``import_array()`` +* `#24959 `__: MAINT: Python API cleanup nitpicks +* `#24967 `__: BLD: use classic linker on macOS, the new one in XCode 15 has... +* `#24968 `__: BLD: mingw-w64 build fixes +* `#24969 `__: MAINT: fix a few issues with CPython main/3.13.0a1 +* `#24970 `__: BLD: Use the correct Python interpreter when running tempita.py +* `#24975 `__: DOC: correct Logo SVG files rendered in dark by Figma +* `#24978 `__: MAINT: testing: rename parameters x/y to actual/desired +* `#24979 `__: BLD: clean up incorrect-but-hardcoded define for ``strtold_l``... +* `#24980 `__: BLD: remove ``NPY_USE_BLAS_ILP64`` environment variable [wheel... +* `#24981 `__: DOC: revisions to "absolute beginners" tutorial +* `#24983 `__: ENH: Added a ``lint`` spin command +* `#24984 `__: DOC: fix reference in user/basics.rec.html#record-arrays +* `#24985 `__: MAINT: Disable warnings for items imported by pybind11 +* `#24986 `__: ENH: Added ``changelog`` spin command +* `#24988 `__: ENH: DType API slot for descriptor finalization before array... +* `#24990 `__: MAINT: Bump ossf/scorecard-action from 2.3.0 to 2.3.1 +* `#24991 `__: DOC: add note to default_rng about requiring non-negative seed +* `#24993 `__: BLD: musllinux_aarch64 [wheel build] +* `#24995 `__: DOC: update vectorize docstring for proper rendering of decorator... +* `#24996 `__: DOC: Clarify a point in basic indexing user guide +* `#24997 `__: DOC: Use ``spin`` to generate changelog +* `#25001 `__: DOC: Visually divide main license and bundled licenses in wheels +* `#25005 `__: MAINT: remove LGTM.com configuration file +* `#25006 `__: DOC: update ndarray.item docstring +* `#25008 `__: BLD: unvendor meson-python +* `#25010 `__: MAINT: test-refactor of ``numpy/_core/numeric.py`` +* `#25016 `__: DOC: standardize capitalization of headings +* `#25017 `__: ENH: Added ``notes`` command for spin +* `#25019 `__: Update .mailmap +* `#25022 `__: TYP: add None to ``__getitem__`` in ``numpy.array_api`` +* `#25029 `__: DOC: "What is NumPy?" section of the documentation +* `#25030 `__: DOC: Include ``np.long`` in ``arrays.scalars.rst`` +* `#25032 `__: MAINT: Add missing ``noexcept`` to shuffle helpers +* `#25037 `__: MAINT: Unpin urllib3 for anaconda-client install +* `#25039 `__: MAINT: Adjust typing for readded ``np.long`` +* `#25040 `__: BLD: make macOS version check for Accelerate NEWLAPACK more robust +* `#25042 `__: BUG: ensure passing ``np.dtype`` to itself doesn't crash +* `#25045 `__: ENH: Vectorize np.sort and np.partition with AVX2 +* `#25050 `__: TST: Ensure test is not run on 32bit platforms +* `#25051 `__: MAINT: Make bitfield integers unsigned +* `#25054 `__: API: Introduce ``np.isdtype`` function [Array API] +* `#25055 `__: BLD: improve detection of Netlib libblas/libcblas/liblapack +* `#25056 `__: DOC: Small fixes for NEP 52 +* `#25057 `__: MAINT: Add ``npy_2_compat.h`` which is designed to work also if... +* `#25059 `__: MAINT: ``np.long`` typing nitpick +* `#25060 `__: DOC: standardize capitalization of NEP headings +* `#25062 `__: ENH: Change add/isalpha ufuncs to use buffer class & general... +* `#25063 `__: BLD: change default of the ``allow-noblas`` option to true +* `#25064 `__: DOC: Fix description of auto bin_width +* `#25067 `__: DOC: add missing word to internals.rst +* `#25068 `__: TST: skip flaky test in test_histogram +* `#25072 `__: MAINT: default to C11 rather than C99, fix most build warnings... +* `#25073 `__: BLD,BUG: quadmath required where available [f2py] +* `#25078 `__: BUG: alpha doesn't use REAL(10) +* `#25079 `__: API: Introduce ``np.astype`` [Array API] +* `#25080 `__: API: Add and redefine ``numpy.bool`` [Array API] +* `#25081 `__: DOC: Provide migration notes for scalar inspection functions +* `#25082 `__: MAINT: Bump actions/dependency-review-action from 3.1.0 to 3.1.1 +* `#25085 `__: BLD: limit scipy-openblas32 wheel to 0.3.23.293.2 +* `#25086 `__: API: Add Array API aliases (math, bitwise, linalg, misc) [Array... +* `#25088 `__: API: Add Array API setops [Array API] +* `#25089 `__: BUG, BLD: Fixed VSX4 feature check +* `#25090 `__: BUG: Make n a long int for np.random.multinomial +* `#25091 `__: MAINT: Bump actions/dependency-review-action from 3.1.1 to 3.1.2 +* `#25092 `__: BLD: Fix features.h detection and blocklist complex trig funcs... +* `#25094 `__: BUG: Avoid intp conversion regression in Cython 3 +* `#25099 `__: DOC: Fix license identifier for OpenBLAS +* `#25101 `__: API: Add ``outer`` to ``numpy.linalg`` [Array API] +* `#25102 `__: MAINT: Print towncrier output file location +* `#25104 `__: ENH: Add str_len & count ufuncs for unicode and bytes dtypes +* `#25105 `__: API: Remove ``__array_prepare__`` +* `#25111 `__: TST: Use ``meson`` for testing ``f2py`` +* `#25123 `__: MAINT,BUG: Never import distutils above 3.12 [f2py] +* `#25124 `__: DOC: ``f2py`` CLI documentation enhancements +* `#25127 `__: DOC: angle: update documentation of convention when magnitude... +* `#25129 `__: BUG: Fix FP overflow error in division when the divisor is scalar +* `#25131 `__: MAINT: Update main after 1.26.2 release. +* `#25133 `__: DOC: std/var: improve documentation of ``ddof`` +* `#25136 `__: BUG: Fix -fsanitize=alignment issue in numpy/_core/src/multiarray/arraytypes.c.src +* `#25138 `__: API: Remove The MapIter API from public +* `#25139 `__: MAINT: Bump actions/dependency-review-action from 3.1.2 to 3.1.3 +* `#25140 `__: DOC: clarify boolean index error message +* `#25141 `__: TST: Explicitly pass NumPy path to cython during tests (also... +* `#25144 `__: DOC: Fix typo in NumPy 2.0 migration guide +* `#25145 `__: API: Add ``cross`` to ``numpy.linalg`` [Array API] +* `#25146 `__: BUG: fix issues with ``newaxis`` and ``linalg.solve`` in ``numpy.array_api`` +* `#25149 `__: API: bump MAXDIMS/MAXARGS to 64 introduce NPY_AXIS_RAVEL +* `#25151 `__: BLD, CI: revert pinning scipy-openblas +* `#25152 `__: ENH: Add strip/lstrip/rstrip ufuncs for unicode and bytes +* `#25154 `__: MAINT: Cleanup mapiter struct a bit +* `#25155 `__: API: Add ``matrix_norm``, ``vector_norm``, ``vecdot`` and ``matrix_transpose`` [Array API] +* `#25156 `__: API: Remove PyArray_REFCNT and NPY_REFCOUNT +* `#25157 `__: DOC: ``np.sort`` doc fix contiguous axis +* `#25158 `__: API: Make ``encoding=None`` the default in loadtxt +* `#25160 `__: BUG: Fix moving compiled executable to root with f2py -c on Windows +* `#25161 `__: API: Remove ``PyArray_GetCastFunc`` and any guarantee that ``->castfuncs``... +* `#25162 `__: NEP: Update NEP 55 +* `#25165 `__: DOC: mention submodule init in source install instructions +* `#25167 `__: MAINT: Add ``array-api-tests`` CI stage, add ``ndarray.__array_namespace__`` +* `#25168 `__: API: Introduce ``copy`` argument for ``np.asarray`` [Array API] +* `#25169 `__: API: Introduce ``correction`` argument for ``np.var`` and ``np.std``... +* `#25171 `__: ENH: Add replace ufunc for bytes and unicode dtypes +* `#25176 `__: DOC: replace integer overflow example +* `#25181 `__: BUG: Disallow shadowed modulenames +* `#25184 `__: MAINT,DOC: Fix inline licenses ``f2py`` +* `#25185 `__: MAINT: Fix sneaky typo [f2py] +* `#25186 `__: BUG: Handle ``common`` blocks with ``kind`` specifications from modules +* `#25193 `__: MAINT: Kill all instances of f2py.compile +* `#25194 `__: DOC: try to be nicer about f2py.compile +* `#25195 `__: BUG: Fix single to half-precision conversion on PPC64/VSX3 +* `#25196 `__: DOC: ``f2py`` rewrite with ``meson`` details +* `#25198 `__: MAINT: Replace deprecated ctypes.ARRAY(item_type, size) with... +* `#25209 `__: ENH: Expose abstract DType classes in the experimental DType... +* `#25212 `__: BUG: Don't try to grab callback modules +* `#25221 `__: TST: f2py: fix issue in test skip condition +* `#25222 `__: DOC: Fix wrong return type for PyArray_CastScalarToCType +* `#25223 `__: MAINT: Bump mymindstorm/setup-emsdk from 12 to 13 +* `#25226 `__: BUG: Handle ``iso_c_type`` mappings more consistently +* `#25228 `__: DOC: Improve description of ``axis`` parameter for ``np.median`` +* `#25230 `__: BUG: Raise error in ``np.einsum_path`` when output subscript is... +* `#25232 `__: DEV: Enable the ``spin lldb`` +* `#25233 `__: API: Add ``device`` and ``to_device`` to ``numpy.ndarray`` [Array... +* `#25238 `__: MAINT: do not use ``long`` type +* `#25243 `__: BUG: Fix non-contiguous 32-bit memory load when ARM/Neon is enabled +* `#25246 `__: CI: Add CI test for riscv64 +* `#25247 `__: ENH: Enable SVE detection for Highway VQSort +* `#25248 `__: DOC: Add release note for Highway VQSort on AArch64 +* `#25250 `__: DOC: fix typo (alignment) +* `#25253 `__: CI: streamline macos_arm64 test +* `#25254 `__: BUG: mips doesn't use REAL(10) +* `#25255 `__: ENH: add new wheel builds using Accelerate on macOS >=14 +* `#25257 `__: TST: PyPy needs another gc.collect on latest versions +* `#25259 `__: BUG: Fix output dtype when calling np.char methods with empty... +* `#25261 `__: MAINT: Bump conda-incubator/setup-miniconda from 2.2.0 to 3.0.0 +* `#25264 `__: MAINT: Bump actions/dependency-review-action from 3.1.3 to 3.1.4 +* `#25267 `__: BUG: Fix module name bug in signature files [urgent] [f2py] +* `#25271 `__: API: Shrink MultiIterObject and make ``NPY_MAXARGS`` a runtime... +* `#25272 `__: DOC: Mention installing threadpoolctl in issue template [skip... +* `#25276 `__: MAINT: Bump actions/checkout from 3 to 4 +* `#25280 `__: TST: Fix fp_noncontiguous and fpclass on riscv64 +* `#25282 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.0 to 3.0.1 +* `#25284 `__: CI: Install Lapack runtime on Cygwin. +* `#25287 `__: BUG: Handle .pyf.src and fix SciPy [urgent] +* `#25291 `__: MAINT: Allow initializing new-style dtypes inside numpy +* `#25292 `__: API: C-API removals +* `#25295 `__: MAINT: expose and use dtype classes in internal API +* `#25297 `__: BUG: enable linking of external libraries in the f2py Meson backend +* `#25299 `__: MAINT: Performance improvement of polyutils.as_series +* `#25300 `__: DOC: Document how to check for a specific dtype +* `#25302 `__: DOC: Clarify virtualenv setup and dependency installation +* `#25308 `__: MAINT: Update environment.yml to match *_requirements.txt +* `#25309 `__: DOC: Fix path to svg logo files +* `#25310 `__: DOC: Improve documentation for fill_diagonal +* `#25313 `__: BUG: Don't use the _Complex extension in C++ mode +* `#25314 `__: MAINT: Bump actions/setup-python from 4.7.1 to 4.8.0 +* `#25315 `__: MAINT: expose PyUFunc_AddPromoter in the internal ufunc API +* `#25316 `__: CI: remove no-blas=true from spin command on macos_arm64 ci [skip... +* `#25317 `__: ENH: Add fft optional extension submodule to numpy.array_api +* `#25321 `__: MAINT: Run f2py's meson backend with the same python that runs... +* `#25322 `__: DOC: Add examples for ``np.char`` functions +* `#25324 `__: DOC: Add examples for ``np.polynomial.polynomial`` functions +* `#25326 `__: DOC: Add examples to functions in ``np.polynomial.hermite`` +* `#25328 `__: DOC: Add ``np.polynomial.laguerre`` examples +* `#25329 `__: BUG: fix refcounting for dtypemeta aliases +* `#25331 `__: MAINT: Bump actions/setup-python from 4.8.0 to 5.0.0 +* `#25335 `__: BUG: Fix np.char for scalars and add tests +* `#25336 `__: API: make arange ``start`` argument positional-only +* `#25338 `__: BLD: update vendored Meson for AIX shared library fix +* `#25339 `__: DOC: fix some rendering and formatting issues in ``unique_*`` docstrings +* `#25340 `__: DOC: devguide cleanup: remove Gitwash and too verbose Git details +* `#25342 `__: DOC: Add more ``np.char`` documentation +* `#25346 `__: ENH: Enable 16-bit VQSort routines on AArch64 +* `#25347 `__: API: Introduce stringdtype [NEP 55] +* `#25350 `__: DOC: add "building from source" docs +* `#25354 `__: DOC: Add example for ``np.random.default_rng().binomial()`` +* `#25355 `__: DOC: Fix typo in ``np.random.default_rng().logistic()`` +* `#25356 `__: DOC: Add example for ``np.random.default_rng().exponential()`` +* `#25357 `__: DOC: Add example for ``np.random.default_rng().geometric()`` +* `#25361 `__: BUG: Fix regression with ``f2py`` wrappers when modules and subroutines... +* `#25364 `__: ENH,BUG: Handle includes for meson backend +* `#25367 `__: DOC: Fix refguide check script +* `#25368 `__: MAINT: add npy_gil_error to acquire the GIL and set an error +* `#25369 `__: DOC: Correct documentation for polyfit() +* `#25370 `__: ENH: Make numpy.array_api more portable +* `#25372 `__: BUG: Fix failing test_features on SapphireRapids +* `#25376 `__: BUG: Fix build issues on SPR and avx512_qsort float16 +* `#25383 `__: MAINT: Init ``base`` in cpu_avx512_kn +* `#25384 `__: MAINT: Add missing modules to refguide test +* `#25388 `__: API: Adjust ``linalg.pinv`` and ``linalg.cholesky`` to Array... +* `#25389 `__: BUG: ufunc api: update multiarray_umath import path +* `#25394 `__: MAINT: Bump actions/upload-artifact from 3.1.3 to 4.0.0 +* `#25397 `__: BUG, SIMD: Fix quicksort build error when Highway/SVE is enabled +* `#25398 `__: DOC: Plot exact distributions in logistic, logseries and weibull... +* `#25404 `__: DOC: Improve ``np.histogram`` docs +* `#25409 `__: API,MAINT: Reorganize array-wrap calling and introduce ``return_scalar`` +* `#25412 `__: DOC: Clean up of ``_generator.pyx`` +* `#25413 `__: DOC: Add example to ``rng.beta(...)`` +* `#25414 `__: DOC: Add missing examples to ``np.ma`` +* `#25416 `__: ENH: define a gufunc for vecdot (with BLAS support) +* `#25417 `__: MAINT: Bump actions/setup-node from 3.8.1 to 4.0.1 +* `#25418 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25425 `__: BUG: Fix two errors related to not checking for failed allocations +* `#25426 `__: BUG: avoid seg fault from OOB access in RandomState.set_state() +* `#25430 `__: TST: Fix test_numeric on riscv64 +* `#25431 `__: DOC: Improve ``np.mean`` documentation of the out argument +* `#25432 `__: DOC: Add ``numpy.lib`` docs page +* `#25434 `__: API,BUG,DEP: treat trailing comma as a tuple and thus a structured... +* `#25437 `__: API: Add ``rtol`` to ``matrix_rank`` and ``stable`` [Array API] +* `#25438 `__: DEV: add ``ninja`` to ``test_requirements.txt`` and clean up... +* `#25439 `__: BLD: remove ``-fno-strict-aliasing``, ``--strip-debug`` from cibuildwheel... +* `#25440 `__: CI: show meson-log.txt in Cirrus wheel builds +* `#25441 `__: API,ENH: Change definition of complex sign +* `#25443 `__: TST: fix issue with dtype conversion in ``test_avx_based_ufunc`` +* `#25444 `__: TST: remove ``TestNewBufferProtocol.test_error_too_many_dims`` +* `#25446 `__: Downgrade Highway to latest released version (1.0.7) +* `#25448 `__: TYP: Adjust type annotations for Numpy 2.0 changes +* `#25449 `__: TYP,CI: bump mypy from 1.5.1 to 1.7.1 +* `#25450 `__: MAINT: make the import-time check for old Accelerate more specific +* `#25451 `__: DOC: Fix names of subroutines. +* `#25453 `__: TYP,MAINT: Change more overloads to play nice with pyright +* `#25454 `__: DOC: fix typo ``v_stack`` in 2.0 migration guide +* `#25455 `__: BUG: fix macOS version checks for Accelerate support +* `#25456 `__: BLD: optimize BLAS and LAPACK search order +* `#25459 `__: BLD: fix uninitialized variable warnings from simd/neon/memory.h +* `#25462 `__: TST: skip two tests in aarch64 linux wheel builds +* `#25463 `__: ENH: Add np.strings namespace +* `#25473 `__: MAINT: use cholesky_up gufunc for upper Cholesky decomposition +* `#25484 `__: BUG: handle scalar input in np.char.replace +* `#25492 `__: DOC: update signature of PyArray_Conjugate +* `#25495 `__: API: adjust nD fft ``s`` param to array API +* `#25501 `__: DOC: Update a few interpreted text to verbatim/code. +* `#25503 `__: BLD: unpin cibuildwheel [wheel build] +* `#25504 `__: DOC: add pickleshare to doc dependencies +* `#25505 `__: BLD: replace uses of openblas_support with openblas wheels [wheel... +* `#25507 `__: DOC: mention string, bytes, and void dtypes in dtype intro +* `#25510 `__: BUG:Fix incorrect 'inner' method type annotation in __array_ufunc_ +* `#25511 `__: DOC: np.any: add multidimensional example +* `#25512 `__: DOC: add a section for dealing with NumPy 2.0 for downstream... +* `#25515 `__: BUG: three string ufunc bugs, one leading to segfault +* `#25516 `__: MAINT,BUG: Fix ``--dep`` when ``-L -l`` are present +* `#25520 `__: DOC: unambiguous np.histogram dtype description +* `#25521 `__: DOC: Improve error messages for random.choice +* `#25522 `__: BUG: fix incorrect strcmp implementation for unequal length strings +* `#25524 `__: MAINT: Update main after 1.26.3 release. +* `#25525 `__: MAINT: optimization and broadcasting for .replace() method for... +* `#25527 `__: DOC: Improve ``polynomial`` docs +* `#25528 `__: DOC: Add notes to ``rng.bytes()`` +* `#25529 `__: DOC: Add ``rng.f()`` plot +* `#25530 `__: DOC: Add ``rng.chisquare()`` plot +* `#25531 `__: API: allow building in cython with Py_LIMITED_API +* `#25533 `__: DOC: Improve ``poisson`` plot +* `#25534 `__: DOC: Indicate order is kwarg-only for ndarray.reshape. +* `#25535 `__: MAINT: fix ufunc debug tracing +* `#25536 `__: MAINT, ENH: Implement calling pocketfft via gufunc and allow... +* `#25538 `__: MAINT: Bump actions/dependency-review-action from 3.1.4 to 3.1.5 +* `#25540 `__: DOC: Fix typo in random.geometric docstring +* `#25542 `__: NEP: add NEP 56 on array API standard support in main namespace +* `#25545 `__: MAINT: Update copyright to 2024 (LICENSE & DOC) +* `#25549 `__: DOC: Using ``f2py`` with ``fypp`` +* `#25553 `__: BUG: Fix return shape of inverse_indices in unique_inverse +* `#25554 `__: BUG: support axes argument in np.linalg.tensordot +* `#25555 `__: MAINT, BLD: Fix unused inline functions warnings on clang +* `#25558 `__: ENH: Add replace ufunc to np.strings +* `#25560 `__: BUG: np.linalg.vector_norm: return correct shape for keepdims +* `#25563 `__: SIMD: Extend the enabled targets for Google Highway quicksort +* `#25569 `__: DOC: Fix a typo +* `#25570 `__: ENH: change list-of-array to tuple-of-array returns (Numba compat) +* `#25571 `__: MAINT: Return size_t from num_codepoints in string ufuncs Buffer... +* `#25573 `__: MAINT: add a C alias for the default integer DType +* `#25574 `__: DOC: ensure that docstrings for np.ndarray.copy, np.copy and... +* `#25575 `__: ENH: Wrap string ufuncs in np.strings to allow default arguments +* `#25579 `__: MAINT: Bump actions/upload-artifact from 4.0.0 to 4.1.0 +* `#25582 `__: CI: Bump azure pipeline timeout to 120 minutes +* `#25592 `__: BUG: Fix undefined behavior when converting NaN float16 to datetime... +* `#25593 `__: DOC: fix typos in 2.0 migration guide +* `#25594 `__: MAINT: replace uses of cython numpy.math.pxd with native routines +* `#25595 `__: BUG: Allow ``None`` as ``api_version`` in ``__array_namespace__``... +* `#25598 `__: BLD: include fix for MinGW platform detection +* `#25603 `__: DOC: Update tensordot documentation +* `#25608 `__: MAINT: skip installing rtools on azure +* `#25609 `__: DOC: fft: correct docs about recent deprecations +* `#25610 `__: ENH: Vectorize argsort and argselect with AVX2 +* `#25613 `__: BLD: fix building for windows ARM64 +* `#25614 `__: MAINT: Bump actions/dependency-review-action from 3.1.5 to 4.0.0 +* `#25615 `__: MAINT: add ``newaxis`` to ``__all__`` in ``numpy.array_api`` +* `#25625 `__: NEP: update NEP 55 text to match current stringdtype implementation +* `#25627 `__: TST: Fix f2py doc test collection in editable installs +* `#25628 `__: TST: Fix test_warning_calls on Python 3.12 +* `#25629 `__: TST: Bump pytz to 2023.3.post1 +* `#25631 `__: BUG: Use large file fallocate on 32 bit linux platforms +* `#25636 `__: MAINT: Move np.char methods to np.strings +* `#25638 `__: MAINT: Bump actions/upload-artifact from 4.1.0 to 4.2.0 +* `#25641 `__: DOC: Remove a duplicated argument ``shape`` in ``empty_like`` +* `#25646 `__: DOC: Fix links to f2py codes +* `#25648 `__: DOC: fix syntax highlighting issues in added f2py docs +* `#25650 `__: DOC: improve structure of reference guide +* `#25651 `__: ENH: Allow strings in logical ufuncs +* `#25652 `__: BUG: Fix AVX512 build flags on Intel Classic Compiler +* `#25656 `__: DOC: add autosummary API reference for DType clases. +* `#25657 `__: MAINT: fix warning about visibility tag on clang +* `#25660 `__: MAINT: Bump mymindstorm/setup-emsdk from 13 to 14 +* `#25662 `__: BUG: Allow NumPy int scalars to be divided by out-of-bound Python... +* `#25664 `__: DOC: minor improvement to the partition() docstrings +* `#25668 `__: BUG: correct irfft with n=1 on larger input +* `#25669 `__: BLD: fix potential issue with escape sequences in ``__config__.py`` +* `#25671 `__: MAINT: Bump actions/upload-artifact from 4.2.0 to 4.3.0 +* `#25672 `__: BUG: check for overflow when converting a string to an int scalar +* `#25673 `__: BUG: Ensure meson updates generated umath doc correctly. +* `#25674 `__: DOC: add a section on NumPy's module structure to the refguide +* `#25676 `__: NEP: add note on Python integer "exceptions" to NEP 50 +* `#25678 `__: DOC: fix docstring of quantile and percentile +* `#25680 `__: DOC: replace autosummary for numpy.dtypes with enumerated list +* `#25683 `__: DOC: Try add a section on NEP 50 to migration guide +* `#25687 `__: Update to OpenBLAS 0.3.26 +* `#25689 `__: MAINT: Simplify scalar int division a bit (no need for helper... +* `#25692 `__: DOC: Clarify deprecated width Parameter in numpy.binary_repr... +* `#25695 `__: DOC: empty: standardize notes about uninitialized values +* `#25697 `__: CI: add pinning for scipy-openblas wheels +* `#25699 `__: DOC: Fix some references in document +* `#25707 `__: DOC: fix a small np.einsum example +* `#25709 `__: MAINT: Include header defining backtrace +* `#25710 `__: TST: marks on a fixture have no effect +* `#25711 `__: ENH: support float and longdouble in FFT using C++ pocketfft... +* `#25712 `__: API: Make any and all return booleans by default +* `#25715 `__: [MAINT] Add regression test for np.geomspace +* `#25716 `__: CI: pin cygwin python to 3.9.16-1 [skip cirrus][skip azp][skip... +* `#25717 `__: DOC: Fix some minor formatting errors in NEPs +* `#25721 `__: DEP: Finalize future warning move in lstsq default +* `#25723 `__: NEP: Mark NEP 55 accepted +* `#25727 `__: DOC: Remove function name without signature in ``ma`` +* `#25730 `__: ENH: add a pkg-config file and a ``numpy-config`` script +* `#25732 `__: CI: use version 0.3.26.0.2 of scipy-openblas wheels +* `#25734 `__: DOC: Fix markups of code literals in ``polynomial`` +* `#25735 `__: MAINT: Bump pypa/cibuildwheel from 2.16.4 to 2.16.5 +* `#25736 `__: MAINT: Bump actions/cache from 3 to 4 +* `#25738 `__: MAINT: add ``trapezoid`` as the new name for ``trapz`` +* `#25739 `__: TST: run macos_arm64 test on Github Actions +* `#25740 `__: DOC: Fix doctest failure in ``polynomial`` +* `#25745 `__: DEV: add .editorconfig for C/C++ +* `#25751 `__: DOC: Update ruff rule instruction +* `#25753 `__: DOC: Fix ``ufunc.reduceat`` doc for ``dtype`` +* `#25754 `__: API: Expose the dtype C API +* `#25758 `__: DOC: Fix summary table in linalg routines document +* `#25761 `__: DEP: Finalize future warning for shape=1 descriptor dropping... +* `#25763 `__: CI/BLD: fix bash script tests for cibw +* `#25768 `__: DOC: in ufuncs ``dtype`` is not ignored when ``out`` is passed +* `#25772 `__: MAINT: Update main after 1.26.4 release. +* `#25774 `__: DOC: Update docs build dependencies install cmd +* `#25775 `__: ENH: Add index/rindex ufuncs for unicode and bytes dtypes +* `#25776 `__: DOC: Add missing ``np.size`` entry to routines +* `#25779 `__: MAINT: Bump actions/upload-artifact from 4.3.0 to 4.3.1 +* `#25780 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25783 `__: DOC: Remove references to ``distutils`` in simd document +* `#25785 `__: MAINT: Bump actions/setup-node from 4.0.1 to 4.0.2 +* `#25788 `__: ENH: Improve performance of np.tensordot +* `#25789 `__: MAINT,API: Always export static inline version of array accessor. +* `#25790 `__: MAINT: Private device struct shouldn't be in public header +* `#25791 `__: ENH: Add rest of unary ufuncs for unicode/bytes dtypes +* `#25792 `__: API: Create ``PyArray_DescrProto`` for legacy descriptor registration +* `#25793 `__: MAINT: update docstrings of string ufuncs to mention StringDType +* `#25794 `__: DEP: expire some deprecations +* `#25795 `__: DOC: fix docstring example in f2py.get_include +* `#25796 `__: MAINT: combine string ufuncs by passing on auxilliary data +* `#25797 `__: MAINT: Move ``NPY_VSTRING`` and make ``NPY_NTYPES NPY_TYPES_LEGACY`` +* `#25800 `__: REV: revert tuple/list return type changes for ``*split`` functions +* `#25801 `__: DOC: Update ``np.char.array`` docstring +* `#25802 `__: MAINT,API: Make metadata, c_metadata, fields, and names only... +* `#25803 `__: BLD: restore 'setup-args=-Duse-ilp64=true' in cibuildwheel [wheel... +* `#25804 `__: MAINT: Use preprocessor directive rather than code when adding... +* `#25806 `__: DOC: Update the CPU build options document +* `#25807 `__: DOC: Fix code-block formatting for new PyArray_RegisterDataType... +* `#25812 `__: API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs`` +* `#25813 `__: DOC: Update genfromtxt documentation +* `#25814 `__: MAINT: Use ``_ITEMSIZE`` rather than ``_DESCR(arr)->elsize`` +* `#25816 `__: API: Introduce ``PyDataType_FLAGS`` accessor for public access +* `#25817 `__: ENH: Add more const qualifiers to C API arguments +* `#25821 `__: BUG: ensure that FFT routines can deal with integer and bool... +* `#25822 `__: BLD: use homebrew gfortran +* `#25825 `__: MAINT: Bump actions/dependency-review-action from 4.0.0 to 4.1.0 +* `#25827 `__: DOC: run towncrier to consolidate the 2.0.0 release notes to... +* `#25828 `__: DOC: two minor fixes for DType API doc formatting +* `#25830 `__: DOC: Fix typo in nep 0052 +* `#25832 `__: DOC: add back 2.0.0 release note snippets that went missing +* `#25833 `__: DOC: Fix some reference warnings +* `#25834 `__: BUG: ensure static_string.buf is never NULL for a non-null string +* `#25837 `__: DEP: removed deprecated product/cumproduct/alltrue/sometrue +* `#25838 `__: MAINT: Update pinned setuptools for Python < 3.12 +* `#25839 `__: TST: fix Cython compile test which invokes ``meson`` +* `#25842 `__: DOC: Fix some incorrect rst markups +* `#25843 `__: BUG: ensure empty cholesky upper does not hang. +* `#25845 `__: DOC: Fix some typos +* `#25847 `__: MAINT: Adjust rest of string ufuncs to static_data approach +* `#25851 `__: DOC: Fix some reference warnings +* `#25852 `__: ENH: Support exotic installation of nvfortran +* `#25854 `__: BUG: Correctly refcount array descr in empty_like +* `#25855 `__: MAINT: Bump actions/dependency-review-action from 4.1.0 to 4.1.2 +* `#25856 `__: MAINT: Remove unnnecessary size argument in StringDType initializer +* `#25861 `__: CI: make chocolatey fail when a dependency doesn't install +* `#25862 `__: Revert "API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs``" +* `#25864 `__: ENH: Implement multiply ufunc for unicode & bytes +* `#25865 `__: ENH: print traceback after printing ABI mismatch error +* `#25866 `__: API: Fix compat header and add new import helpers +* `#25868 `__: MAINT: Bump actions/dependency-review-action from 4.1.2 to 4.1.3 +* `#25870 `__: BUG: use print to actually output something +* `#25873 `__: Update Highway to 1.1.0 +* `#25874 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.1 to 3.0.2 +* `#25876 `__: API: Remove no-op C API functions +* `#25877 `__: BUG: Include broadcasting for ``rtol`` argument in ``matrix_rank`` +* `#25879 `__: DOC: Add a document entry of ``PyArray_DescrProto`` +* `#25880 `__: DOC: README.md: point to user-friendly OpenSSF ScoreCard display +* `#25881 `__: BUG: Fix gh-25867 for used functions and subroutines +* `#25883 `__: BUG: fix typo in 'message' static variable of TestDeprecatedDTypeParenthesizedRepeatCount +* `#25884 `__: BUG: Fix typo in LEGACY_CONS_NON_NEGATVE_INBOUNDS_LONG +* `#25885 `__: DOC: fix typos +* `#25886 `__: MAINT: fix code comment typos in numpy/ directory +* `#25887 `__: BUG: Fix ``PyArray_FILLWBYTE`` Cython declaration +* `#25889 `__: CI: run apt update before apt-install in linux-blas workflow +* `#25890 `__: MAINT: refactor StringDType static_string implementation a bit. +* `#25891 `__: ENH: Add expandtabs ufunc for string & unicode dtypes +* `#25894 `__: CI, BLD, TST: Re-enable Emscripten/Pyodide CI job for NumPy +* `#25896 `__: ENH: implement stringdtype <-> timedelta roundtrip casts +* `#25897 `__: API: Make descr->f only accessible through ``PyDataType_GetArrFuncs`` +* `#25900 `__: CI, MAINT: use ``fetch-tags: true`` to speed up NumPy checkouts +* `#25901 `__: BLD: Add meson check to test presence of pocketfft git submodule +* `#25902 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.2 to 3.0.3 +* `#25905 `__: CI: allow job matrixes to run all jobs even when one fails +* `#25911 `__: MAINT: remove ``numpy.array_api`` module +* `#25912 `__: MAINT: Bump actions/cache from 4.0.0 to 4.0.1 +* `#25914 `__: API: Remove broadcasting ambiguity from np.linalg.solve +* `#25915 `__: DOC: Fix some document build errors about rst markups +* `#25919 `__: BUG: Ensure non-array logspace base does not influence dtype... +* `#25920 `__: NEP: update status fields of many NEPs +* `#25921 `__: DOC: update and copy-edit 2.0.0 release notes +* `#25922 `__: BUG: fix handling of copy keyword argument when calling __array__ +* `#25924 `__: BUG: remove vestiges of array_api [wheel build] +* `#25928 `__: DOC: Add note about np.char & np.strings in 2.0 migration guide +* `#25929 `__: DOC: Add mention of complex number changes to migration guide +* `#25931 `__: BUG: fix reference leak in PyArray_FromArrayAttr_int +* `#25932 `__: TST: skip rather than xfail a few tests to address CI log pollution +* `#25933 `__: MAINT: ensure towncrier can be run >1x, and is included in ``spin``... +* `#25937 `__: DOC: 2.0 release highlights and compat notes changes +* `#25939 `__: DOC: Add entries of ``npy_datetime`` and ``npy_timedelta`` +* `#25943 `__: API: Restructure the dtype struct to be new dtype friendly +* `#25944 `__: BUG: avoid incorrect stringdtype allocator sharing from array... +* `#25945 `__: BLD: try to build most macOS wheels on GHA +* `#25946 `__: DOC: Add and fixup/move docs for descriptor changes +* `#25947 `__: DOC: Fix incorrect rst markups of c function directives +* `#25948 `__: MAINT: Introduce NPY_FEATURE_VERSION_STRING and report it in... +* `#25950 `__: BUG: Fix reference leak in niche user old user dtypes +* `#25952 `__: BLD: use hash for mamba action +* `#25954 `__: API: Expose ``PyArray_Pack`` +* `#25955 `__: API: revert position-only 'start' in 'np.arange' +* `#25956 `__: Draft: [BUG] Fix Polynomial representation tests +* `#25958 `__: BUG: avoid incorrect type punning in NpyString_acquire_allocators +* `#25961 `__: TST, MAINT: Loosen tolerance in fft test. +* `#25962 `__: DOC: fix typos and rearrange CI +* `#25965 `__: CI: fix wheel tags for Cirrus macOS arm64 +* `#25973 `__: DOC: Backport gh-25971 and gh-25972 +* `#25977 `__: REL: Prepare for the NumPy 2.0.0b1 release [wheel build] +* `#25983 `__: CI: fix last docbuild warnings +* `#25986 `__: BLD: push a tag builds a wheel +* `#25987 `__: REL: Prepare for the NumPy 2.0.0b1 release (2) [wheel build] +* `#25994 `__: DOC: remove reverted release blurb [skip actions][skip azp][skip... +* `#25996 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25997 `__: REL: Prepare for the NumPy 2.0.0b1 release (3) +* `#26008 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26009 `__: MAINT: Remove sdist task from pavement.py +* `#26022 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#26023 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26034 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26035 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26036 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26040 `__: BUG: Filter out broken Highway platform +* `#26041 `__: BLD: omit pp39-macosx_arm64 from matrix [wheel build] +* `#26042 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26047 `__: ENH: install StringDType promoter for add +* `#26048 `__: MAINT: avoid use of flexible array member in public header +* `#26049 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26050 `__: BUG: fix reference count leak in __array__ internals +* `#26051 `__: BUG: add missing error handling in string to int cast internals +* `#26052 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26053 `__: CI: clean up some unused ``choco install`` invocations +* `#26068 `__: DOC: Backport np.strings docstrings +* `#26073 `__: DOC clarifications on debugging numpy +* `#26074 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26075 `__: BUG: Allow the new string dtype summation to work +* `#26076 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26085 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26109 `__: BUG: adapt cython files to new complex declarations (#26080) +* `#26110 `__: TYP: Adjust ``np.random.integers`` and ``np.random.randint`` +* `#26111 `__: API: Require reduce promoters to start with None to match +* `#26118 `__: MAINT: install all-string promoter for multiply +* `#26122 `__: BUG: fix reference counting error in stringdtype setup +* `#26124 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26127 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26131 `__: MAINT: add missing noexcept clauses +* `#26154 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26167 `__: MAINT: Escalate import warning to an import error +* `#26169 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26170 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26171 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26173 `__: DOC, TST: make ``numpy.version`` officially public +* `#26186 `__: MAINT: Update Pyodide to 0.25.1 +* `#26192 `__: BUG: Infinite Loop in numpy.base_repr +* `#26193 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26194 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26205 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26231 `__: API: Readd np.bool_ typing stub +* `#26256 `__: MAINT: Update array-api-tests job +* `#26259 `__: DOC: Backport various documentation fixes +* `#26262 `__: BLD: update to OpenBLAS 0.3.27.0.1 +* `#26265 `__: MAINT: Fix some typos +* `#26272 `__: BUG: Fixes for ``np.vectorize``. +* `#26283 `__: DOC: correct PR referenced in __array_wraps__ change note +* `#26293 `__: BUG: Ensure seed sequences are restored through pickling (#26260) +* `#26297 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26305 `__: DOC: Bump pydata-sphinx-theme version +* `#26306 `__: MAINT: Robust string meson template substitution +* `#26307 `__: BLD: use newer openblas wheels [wheel build] +* `#26312 `__: DOC: Follow-up fixes for new theme +* `#26330 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26331 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26332 `__: BUG: use PyArray_SafeCast in array_astype +* `#26334 `__: MAINT: Disable compiler sanitizer tests on 2.0.x +* `#26351 `__: ENH: introduce a notion of "compatible" stringdtype instances... +* `#26357 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26358 `__: BUG: Fix rfft for even input length. +* `#26360 `__: MAINT: Simplify bugfix for even rfft +* `#26373 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26374 `__: ENH: add support for nan-like null strings in string replace +* `#26393 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26400 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26402 `__: DOC: Add missing methods to numpy.strings docs +* `#26403 `__: DOC: Fix links in random documentation. +* `#26417 `__: BUG: support nan-like null strings in [l,r]strip +* `#26423 `__: DOC: Fix some typos and incorrect markups +* `#26424 `__: DOC: add reference docs for NpyString C API +* `#26425 `__: REL: Prepare for the NumPy 2.0.0rc2 release [wheel build] +* `#26427 `__: TYP: Fix ``fromrecords`` type hint and bump mypy to 1.10.0. +* `#26457 `__: MAINT: Various CI fixes +* `#26458 `__: BUG: Use Python pickle protocol version 4 for np.save (#26388) +* `#26459 `__: BUG: fixes for three related stringdtype issues (#26436) +* `#26460 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26461 `__: BUG: int32 and intc should both appear in sctypes +* `#26482 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26527 `__: DOC: fix NEP 50 reference +* `#26536 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI... +* `#26539 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26540 `__: BLD: Make NumPy build reproducibly +* `#26541 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26543 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26544 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26554 `__: BUG: Fix in1d fast-path range +* `#26555 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26569 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26583 `__: BUG: Fix memory leaks found with valgrind +* `#26584 `__: MAINT: Unpin pydata-sphinx-theme +* `#26587 `__: DOC: Added web docs for missing ma and strings routines +* `#26591 `__: BUG: Fix memory leaks found by valgrind +* `#26592 `__: DOC: Various documentation updates +* `#26635 `__: DOC: update 2.0 docs +* `#26651 `__: DOC: Update 2.0 migration guide +* `#26652 `__: BUG: Disallow string inputs for copy keyword in np.array and... +* `#26653 `__: BUG: Fix F77 ! comment handling +* `#26654 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to... +* `#26657 `__: BUG: fix memory leaks found with valgrind (next) +* `#26659 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26673 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26674 `__: MNT: catch invalid fixed-width dtype sizes +* `#26677 `__: CI: Use default llvm on Windows. +* `#26694 `__: DOC: document workaround for deprecation of dim-2 inputs to `cross` +* `#26695 `__: BUG: Adds asanyarray to start of linalg.cross (#26667) +* `#26696 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26697 `__: BUG: Fix bug in numpy.pad() + diff --git a/doc/changelog/2.0.1-changelog.rst b/doc/changelog/2.0.1-changelog.rst new file mode 100644 index 000000000000..5a0b9dd207fc --- /dev/null +++ b/doc/changelog/2.0.1-changelog.rst @@ -0,0 +1,52 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API diff --git a/doc/changelog/2.0.2-changelog.rst b/doc/changelog/2.0.2-changelog.rst new file mode 100644 index 000000000000..6622407dd8f6 --- /dev/null +++ b/doc/changelog/2.0.2-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index e2803c8d9d35..2e3f3cbf03c4 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -6,7 +6,8 @@ NEP 55 — Add a UTF-8 variable-width string DType to NumPy :Author: Nathan Goldbaum :Author: Warren Weckesser -:Status: Accepted +:Author: Marten van Kerkwijk +:Status: Final :Type: Standards Track :Created: 2023-06-29 :Updated: 2024-01-18 @@ -146,9 +147,6 @@ we propose to: types related to string support, enabling a migration path for a future deprecation of ``np.char``. -* An update to the ``npy`` and ``npz`` file formats to allow storage of - arbitrary-length sidecar data. - The following is out of scope for this work: * Changing DType inference for string data. @@ -161,6 +159,9 @@ The following is out of scope for this work: * Implement SIMD optimizations for string operations. +* An update to the ``npy`` and ``npz`` file formats to allow storage of + arbitrary-length sidecar data. + While we're explicitly ruling out implementing these items as part of this work, adding a new string DType helps set up future work that does implement some of these items. @@ -406,7 +407,7 @@ Missing data can be represented using a sentinel: >>> np.isnan(arr) array([False, True, False]) >>> np.empty(3, dtype=dt) - array([nan, nan, nan]) + array(['', '', '']) We only propose supporting user-provided sentinels. By default, empty arrays will be populated with empty strings: @@ -454,23 +455,12 @@ following the behavior of sorting an array containing ``nan``. String Sentinels ++++++++++++++++ -A string missing data value is an instance of ``str`` or subtype of ``str`` and -will be used as the default value for empty arrays: - - >>> arr = np.empty(3, dtype=StringDType(na_object='missing')) - >>> arr - array(['missing', 'missing', 'missing']) - -If such an array is passed to a string operation or a cast, "missing" entries -will be treated as if they have a value given by the string sentinel: +A string missing data value is an instance of ``str`` or subtype of ``str``. - >>> np.char.upper(arr) - array(['MISSING', 'MISSING', 'MISSING']) - -Comparison operations will similarly use the sentinel value directly for missing -entries. This is the primary usage of this pattern we've found in downstream -code, where a missing data sentinel like ``"__nan__"`` is passed to a low-level -sorting or partitioning algorithm. +Operations will use the sentinel value directly for missing entries. This is the +primary usage of this pattern we've found in downstream code, where a missing +data sentinel like ``"__nan__"`` is passed to a low-level sorting or +partitioning algorithm. Other Sentinels +++++++++++++++ @@ -544,11 +534,33 @@ future NumPy or a downstream library may add locale-aware sorting, case folding, and normalization for NumPy unicode strings arrays, but we are not proposing adding these features at this time. -Two ``StringDType`` instances are considered identical if they are created with -the same ``na_object`` and ``coerce`` parameter. We propose checking for unequal -``StringDType`` instances in the ``resolve_descriptors`` function of binary -ufuncs that take two string arrays and raising an error if an operation is -performed with unequal ``StringDType`` instances. +Two ``StringDType`` instances are considered equal if they are created with the +same ``na_object`` and ``coerce`` parameter. For ufuncs that accept more than +one string argument we also introduce the concept of "compatible" +``StringDType`` instances. We allow distinct DType instances to be used in ufunc +operations together if have the same ``na_object`` or if only one +or the other DType has an ``na_object`` explicitly set. We do not consider +string coercion for determining whether instances are compatible, although if +the result of the operation is a string, the result will inherit the stricter +string coercion setting of the original operands. + +This notion of "compatible" instances will be enforced in the +``resolve_descriptors`` function of binary ufuncs. This choice makes it easier +to work with non-default ``StringDType`` instances, because python strings are +coerced to the default ``StringDType`` instance, so the following idiomatic +expression is allowed:: + + >>> arr = np.array(["hello", "world"], dtype=StringDType(na_object=None)) + >>> arr + "!" + array(['hello!', 'world!'], dtype=StringDType(na_object=None)) + +If we only considered equality of ``StringDType`` instances, this would +be an error, making for an awkward user experience. If the operands have +distinct ``na_object`` settings, NumPy will raise an error because the choice +for the result DType is ambiguous:: + + >>> arr + np.array("!", dtype=StringDType(na_object="")) + TypeError: Cannot find common instance for incompatible dtype instances ``np.strings`` namespace ************************ @@ -562,14 +574,15 @@ be populated with string ufuncs: True We feel ``np.strings`` is a more intuitive name than ``np.char``, and eventually -will replace ``np.char`` once downstream libraries that conform to SPEC-0 can -safely switch to ``np.strings`` without needing any logic conditional on the NumPy -version. +will replace ``np.char`` once the minimum NumPy version supported by downstream +libraries per `SPEC-0 `_ is new +enough that they can safely switch to ``np.strings`` without needing any logic +conditional on the NumPy version. Serialization ************* -Since string data are stored outside the array buffer, serialization top the +Since string data are stored outside the array buffer, serialization to the ``npy`` format would requires a format revision to support storing variable-width sidecare data. Rather than doing this as part of this effort, we do not plan on supporting serialization to the ``npy`` or ``npz`` format without @@ -905,10 +918,10 @@ endian-dependent layouts of these structs is an implementation detail and is not publicly exposed in the API. Whether or not a string is stored directly on the arena buffer or in the heap is -signaled by setting the ``NPY_STRING_SHORT`` flag on the string data. Because -the maximum size of a heap-allocated string is limited to the size of the -largest 7-byte unsized integer, this flag can never be set for a valid heap -string. +signaled by setting the ``NPY_OUTSIDE_ARENA`` and ``NPY_STRING_LONG`` flags on +the string data. Because the maximum size of a heap-allocated string is limited +to the size of the largest 7-byte unsized integer, these flags can never be set +for a valid heap string. See :ref:`memorylayoutexamples` for some visual examples of strings in each of these memory layouts. @@ -956,20 +969,29 @@ exponentially expanding buffer, with an expansion factor of 1.25. Each string entry in the arena is prepended by a size, stored either in a ``char`` or a ``size_t``, depending on the length of the string. Strings with lengths between 16 or 8 (depending on architecture) and 255 are stored with a -``char`` size. We refer to these as "medium" strings internally and strings -stored this way have the ``NPY_STRING_MEDIUM`` flag set. This choice reduces the -overhead for storing smaller strings on the heap by 7 bytes per medium-length -string. +``char`` size. We refer to these as "medium" strings internally. This choice +reduces the overhead for storing smaller strings on the heap by 7 bytes per +medium-length string. Strings in the arena with lengths longer than 255 bytes +have the ``NPY_STRING_LONG`` flag set. If the contents of a packed string are freed and then assigned to a new string with the same size or smaller than the string that was originally stored in the -packed string, the existing short string or arena allocation is re-used, with -padding zeros written to the end of the subset of the buffer reserved for the -string. If the string is enlarged, the existing space in the arena buffer cannot -be used, so instead we resort to allocating space directly on the heap via -``malloc`` and the ``NPY_STRING_ON_HEAP`` flag is set. Any pre-existing flags -are kept set to allow future use of the string to determine if there is space in -the arena buffer allocated for the string for possible re-use. +packed string, the existing short string or arena allocation is re-used. There +is one exception however, when a string in the arena is overwritten with a short +string, the arena metadata is lost and the arena allocation cannot be re-used. + +If the string is enlarged, the existing space in the arena buffer cannot be +used, so instead we resort to allocating space directly on the heap via +``malloc`` and the ``NPY_STRING_OUTSIDE_ARENA`` and ``NPY_STRING_LONG`` flags +are set. Note that ``NPY_STRING_LONG`` can be set even for strings with lengths +less than 255 bytes in this case. Since the heap address overwrites the arena +offset, and future string replacements will be stored on the heap or directly +in the array buffer as a short string. + +No matter where it is stored, once a string is initialized it is marked with the +``NPY_STRING_INITIALIZED`` flag. This lets us clearly distinguish between an +unitialized empty string and a string that has been mutated into the empty +string. The size of the allocation is stored in the arena to allow reuse of the arena allocation if a string is mutated. In principle we could disallow re-use of the @@ -1022,13 +1044,7 @@ Freeing Strings Existing strings must be freed before discarding or re-using a packed string. The API is constructed to require this for all strings, even for short strings with no heap allocations. In all cases, all data in the packed string -are zeroed out, except for the flags, which are preserved except as noted below. - -For strings with data living in the arena allocation, the data for the string in -the arena buffer are zeroed out and the ``NPY_STRING_ARENA_FREED`` flag is set -on the packed string to indicate there is space in the arena for a later re-use -of the packed string. Heap strings have their heap allocation freed and the -``NPY_STRING_ON_HEAP`` flag removed. +are zeroed out, except for the flags, which are preserved. .. _memorylayoutexamples: @@ -1044,8 +1060,8 @@ Short strings store string data directly in the array buffer. On little-endian architectures, the string data appear first, followed by a single byte that allows space for four flags and stores the size of the string as an unsigned integer in the final 4 bits. In this example, the string contents are -"Hello world", with a size of 11. The only flag set indicates that this is a -short string. +"Hello world", with a size of 11. The flags indicate this string is stored +outside the arena and is initialized. .. image:: _static/nep-0055-arena-string-memory-layout.svg @@ -1058,9 +1074,8 @@ re-use of the arena allocation if a string is mutated. Also note that because the length of the string is small enough to fit in an ``unsigned char``, this is a "medium"-length string and the size requires only one byte in the arena allocation. An arena string larger than 255 bytes would need 8 bytes in the -arena to store the size in a ``size_t``. The only flag set indicates that this -is a such "medium"-length string with a size that fits in a ``unsigned -char``. Arena strings that are longer than 255 bytes have no flags set. +arena to store the size in a ``size_t``. The only flag set indicates this string +is initialized. .. image:: _static/nep-0055-heap-string-memory-layout.svg @@ -1068,24 +1083,28 @@ Heap strings store string data in a buffer returned by ``PyMem_RawMalloc`` and instead of storing an offset into an arena buffer, directly store the address of the heap address returned by ``malloc``. In this example, the string contents are "Numpy is a very cool library" and are stored at heap address -``0x4d3d3d3``. The string has one flag set, indicating that the allocation lives -directly on the heap rather than in the arena buffer. +``0x4d3d3d3``. The string has three flags set, indicating it is a "long" string +(e.g. not a short string) stored outside the arena, and is initialized. Note +that if this string were stored inside the arena, it would not have the long +string flag set because it requires less than 256 bytes to store. Empty Strings and Missing Data ++++++++++++++++++++++++++++++ The layout we have chosen has the benefit that newly created array buffer returned by ``calloc`` will be an array filled with empty strings by -construction, since a string with no flags set is a heap string with size -zero. This is not the only valid representation of an empty string, since other -flags may be set to indicate that the missing string is associated with a -pre-existing short string or arena string. Missing strings will have an -identical representation, except they will always have a flag, -``NPY_STRING_MISSING`` set in the flags field. Users will need to check if a -string is null before accessing an unpacked string buffer and we have set up the -C API in such a way as to force null-checking whenever a string is -unpacked. Both missing and empty strings are stored directly in the array buffer -and do not require additional heap storage. +construction, since a string with no flags set is an uninitialized zero-length +arena string. This is not the only valid representation of an empty string, since other +flags may be set to indicate that the empty string is associated with a +pre-existing short string or arena string. + +Missing strings will have an identical representation, except they will always +have a flag, ``NPY_STRING_MISSING`` set in the flags field. Users will need to +check if a string is null before accessing an unpacked string buffer and we have +set up the C API in such a way as to force null-checking whenever a string is +unpacked. Both missing and empty strings can be detected based on data in the +packed string representation and do not require corresponding room in the arena +allocation or extra heap allocations. Related work ------------ diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 4a489474d9d7..180dec530649 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -9,132 +9,54 @@ body { font-family: 'Open Sans', sans-serif; + font-size: medium; } -pre, code { - font-size: 100%; - line-height: 155%; -} - -h1 { - font-family: "Lato", sans-serif; - color: #013243; /* warm black */ -} - -h2 { - color: #4d77cf; /* han blue */ - letter-spacing: -.03em; -} +/* Making sure the navbar shows correctly in one line + Reduces the space between the top-left logo and the navbar section titles */ -h3 { - color: #013243; /* warm black */ - letter-spacing: -.03em; +.col-lg-3 { + width: 15%; } -/* Style the active version button. - -- dev: orange -- stable: green -- old, PR: red - -Colors from: - -Wong, B. Points of view: Color blindness. -Nat Methods 8, 441 (2011). https://doi.org/10.1038/nmeth.1618 -*/ - -/* If the active version has the name "dev", style it orange */ -#version_switcher_button[data-active-version-name*="dev"] { - background-color: #E69F00; - border-color: #E69F00; - color:#000000; -} +/* Version switcher colors from PyData Sphinx Theme */ -/* green for `stable` */ -#version_switcher_button[data-active-version-name*="stable"] { - background-color: #009E73; - border-color: #009E73; +.version-switcher__button[data-active-version-name*="devdocs"] { + background-color: var(--pst-color-warning); + border-color: var(--pst-color-warning); + opacity: 0.9; } -/* red for `old` */ -#version_switcher_button:not([data-active-version-name*="stable"], [data-active-version-name*="dev"], [data-active-version-name=""]) { - background-color: #980F0F; - border-color: #980F0F; +.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { + background-color: var(--pst-color-danger); + border-color: var(--pst-color-danger); + opacity: 0.9; } -/* Main page overview cards */ - -.sd-card { - background: #fff; - border-radius: 0; - padding: 30px 10px 20px 10px; - margin: 10px 0px; +.version-switcher__menu a.list-group-item { + font-size: small; } -.sd-card .sd-card-header { - text-align: center; +button.btn.version-switcher__button, +button.btn.version-switcher__button:hover { + color: black; + font-size: small; } -.sd-card .sd-card-header .sd-card-text { - margin: 0px; -} +/* Main index page overview cards */ .sd-card .sd-card-img-top { - height: 52px; - width: 52px; + height: 60px; + width: 60px; margin-left: auto; margin-right: auto; + margin-top: 10px; } -.sd-card .sd-card-header { - border: none; - background-color: white; - color: #150458 !important; - font-size: var(--pst-font-size-h5); - font-weight: bold; - padding: 2.5rem 0rem 0.5rem 0rem; -} - -.sd-card .sd-card-footer { - border: none; - background-color: white; -} +/* Main index page overview images */ -.sd-card .sd-card-footer .sd-card-text { - max-width: 220px; - margin-left: auto; - margin-right: auto; -} - -/* Dark theme tweaking */ html[data-theme=dark] .sd-card img[src*='.svg'] { - filter: invert(0.82) brightness(0.8) contrast(1.2); -} - -/* Main index page overview cards */ -html[data-theme=dark] .sd-card { - background-color:var(--pst-color-background); -} - -html[data-theme=dark] .sd-shadow-sm { - box-shadow: 0 .1rem 1rem rgba(250, 250, 250, .6) !important -} - -html[data-theme=dark] .sd-card .sd-card-header { - background-color:var(--pst-color-background); - color: #150458 !important; -} - -html[data-theme=dark] .sd-card .sd-card-footer { - background-color:var(--pst-color-background); -} - -html[data-theme=dark] h1 { - color: var(--pst-color-primary); -} - -html[data-theme=dark] h3 { - color: #0a6774; + filter: invert(0.82) brightness(0.8) contrast(1.2); } /* Legacy admonition */ @@ -143,15 +65,10 @@ div.admonition-legacy { border-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::before, -div.admonition>.admonition-title::before { +div.admonition-legacy>.admonition-title::after { color: var(--pst-color-warning); - content: var(--pst-icon-admonition-attention); - background-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::after, -div.admonition>.admonition-title::after { - color: var(--pst-color-warning); - content: var(--pst-icon-admonition-default); +div.admonition-legacy>.admonition-title { + background-color: var(--pst-color-warning-bg); } \ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py index 1e734c0134bc..4bbf84249118 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -41,10 +41,6 @@ class PyTypeObject(ctypes.Structure): ('tp_name', ctypes.c_char_p), ] - # prevent numpy attaching docstrings to the scalar types - assert 'numpy._core._add_newdocs_scalars' not in sys.modules - sys.modules['numpy._core._add_newdocs_scalars'] = object() - import numpy # change the __name__ of the scalar types @@ -58,9 +54,6 @@ class PyTypeObject(ctypes.Structure): c_typ = PyTypeObject.from_address(id(typ)) c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8') - # now generate the docstrings as usual - del sys.modules['numpy._core._add_newdocs_scalars'] - import numpy._core._add_newdocs_scalars replace_scalar_type_names() @@ -143,6 +136,10 @@ class PyTypeObject(ctypes.Structure): # for source files. exclude_dirs = [] +exclude_patterns = [] +if sys.version_info[:2] >= (3, 12): + exclude_patterns += ["reference/distutils.rst"] + # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -239,23 +236,29 @@ def setup(app): switcher_version = f"{version}" html_theme_options = { - "logo": { - "image_light": "_static/numpylogo.svg", - "image_dark": "_static/numpylogo_dark.svg", - }, - "github_url": "https://github.com/numpy/numpy", - "collapse_navigation": True, - "external_links": [ - {"name": "Learn", "url": "https://numpy.org/numpy-tutorials/"}, - {"name": "NEPs", "url": "https://numpy.org/neps"} - ], - "header_links_before_dropdown": 6, - # Add light/dark mode and documentation version switcher: - "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], - "switcher": { - "version_match": switcher_version, - "json_url": "https://numpy.org/doc/_static/versions.json", - }, + "logo": { + "image_light": "_static/numpylogo.svg", + "image_dark": "_static/numpylogo_dark.svg", + }, + "github_url": "https://github.com/numpy/numpy", + "collapse_navigation": True, + "external_links": [ + {"name": "Learn", "url": "https://numpy.org/numpy-tutorials/"}, + {"name": "NEPs", "url": "https://numpy.org/neps"}, + ], + "header_links_before_dropdown": 6, + # Add light/dark mode and documentation version switcher: + "navbar_end": [ + "search-button", + "theme-switcher", + "version-switcher", + "navbar-icon-links" + ], + "navbar_persistent": [], + "switcher": { + "version_match": switcher_version, + "json_url": "https://numpy.org/doc/_static/versions.json", + }, } html_title = "%s v%s Manual" % (project, version) @@ -585,3 +588,12 @@ class NumPyLexer(CLexer): breathe_projects = dict(numpy=os.path.join("..", "build", "doxygen", "xml")) breathe_default_project = "numpy" breathe_default_members = ("members", "undoc-members", "protected-members") + +# See https://github.com/breathe-doc/breathe/issues/696 +nitpick_ignore = [ + ('c:identifier', 'FILE'), + ('c:identifier', 'size_t'), + ('c:identifier', 'PyHeapTypeObject'), +] + + diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 6164eef4db26..b1cc7d96ffe2 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -247,7 +247,10 @@ of Python is encouraged, see :ref:`advanced_debugging`. In terms of debugging, NumPy also needs to be built in a debug mode. You need to use ``debug`` build type and disable optimizations to make sure ``-O0`` flag is used -during object building. To generate source-level debug information within the build process run:: +during object building. Note that NumPy should NOT be installed in your environment +before you build with the ``spin build`` command. + +To generate source-level debug information within the build process run:: $ spin build --clean -- -Dbuildtype=debug -Ddisable-optimization=true @@ -271,13 +274,14 @@ you want to debug. For instance ``mytest.py``:: x = np.arange(5) np.empty_like(x) -Now, you can run:: +Note that your test file needs to be outside the NumPy clone you have. Now, you can +run:: - $ spin gdb mytest.py + $ spin gdb /path/to/mytest.py In case you are using clang toolchain:: - $ lldb python mytest.py + $ spin lldb /path/to/mytest.py And then in the debugger:: diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 5f23d544145f..097456fad0b4 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -376,7 +376,7 @@ membergroups and members-only options: :outline: :no-link: -Checkout the `doxygenclass documentation _` +Checkout the `doxygenclass documentation `__ for more details and to see it in action. ``doxygennamespace`` diff --git a/doc/source/index.rst b/doc/source/index.rst index b80d65ce2c4e..02f3a8dc12b0 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,10 +21,10 @@ NumPy documentation `Historical versions of documentation `_ **Useful links**: -`Installation `_ | -`Source Repository `_ | -`Issue Tracker `_ | -`Q&A Support `_ | +`Installation `_ | +`Source Repository `_ | +`Issue Tracker `_ | +`Q&A Support `_ | `Mailing List `_ NumPy is the fundamental package for scientific computing in Python. It is a @@ -36,13 +36,15 @@ basic statistical operations, random simulation and much more. -.. grid:: 2 +.. grid:: 1 1 2 2 + :gutter: 2 3 4 4 .. grid-item-card:: :img-top: ../source/_static/index-images/getting_started.svg + :text-align: center Getting started - ^^^^^^^^^^^^^^^ + ^^^ New to NumPy? Check out the Absolute Beginner's Guide. It contains an introduction to NumPy's main concepts and links to additional tutorials. @@ -58,9 +60,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/user_guide.svg + :text-align: center User guide - ^^^^^^^^^^ + ^^^ The user guide provides in-depth information on the key concepts of NumPy with useful background information and explanation. @@ -76,9 +79,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/api.svg + :text-align: center API reference - ^^^^^^^^^^^^^ + ^^^ The reference guide contains a detailed description of the functions, modules, and objects included in NumPy. The reference describes how the @@ -96,9 +100,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/contributor.svg + :text-align: center Contributor's guide - ^^^^^^^^^^^^^^^^^^^ + ^^^ Want to add to the codebase? Can help add translation or a flowchart to the documentation? The contributing guidelines will guide you through the diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 0bbd68242524..ac113bff0a7f 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -19,11 +19,11 @@ Ruff plugin =========== Many of the changes covered in the 2.0 release notes and in this migration -guide can be automatically adapted to in downstream code with a dedicated +guide can be automatically adapted in downstream code with a dedicated `Ruff `__ rule, namely rule `NPY201 `__. -You should install ``ruff>=0.2.0`` and add the ``NPY201`` rule to your +You should install ``ruff>=0.4.8`` and add the ``NPY201`` rule to your ``pyproject.toml``:: [tool.ruff.lint] @@ -43,9 +43,8 @@ NumPy 2.0 changes promotion (the result of combining dissimilar data types) as per :ref:`NEP 50 `. Please see the NEP for details on this change. It includes a table of example changes and a backwards compatibility section. -The largest backwards compatibility change of this is that it means that -the precision of scalars is now preserved consistently. -Two examples are: +The largest backwards compatibility change is that the precision of scalars +is now preserved consistently. Two examples are: * ``np.float32(3) + 3.`` now returns a float32 when it previously returned a float64. @@ -97,7 +96,7 @@ C-API Changes ============= Some definitions were removed or replaced due to being outdated or -unmaintainable. Some new API definition will evaluate differently at +unmaintainable. Some new API definitions will evaluate differently at runtime between NumPy 2.0 and NumPy 1.x. Some are defined in ``numpy/_core/include/numpy/npy_2_compat.h`` (for example ``NPY_DEFAULT_INT``) which can be vendored in full or part @@ -116,10 +115,10 @@ The ``PyArray_Descr`` struct has been changed One of the most impactful C-API changes is that the ``PyArray_Descr`` struct is now more opaque to allow us to add additional flags and have itemsizes not limited by the size of ``int`` as well as allow improving -structured dtypes in the future and not burdon new dtypes with their fields. +structured dtypes in the future and not burden new dtypes with their fields. Code which only uses the type number and other initial fields is unaffected. -Most code will hopefull mainly access the ``->elsize`` field, when the +Most code will hopefully mainly access the ``->elsize`` field, when the dtype/descriptor itself is attached to an array (e.g. ``arr->descr->elsize``) this is best replaced with ``PyArray_ITEMSIZE(arr)``. @@ -127,7 +126,7 @@ Where not possible, new accessor functions are required: * ``PyDataType_ELSIZE`` and ``PyDataType_SET_ELSIZE`` (note that the result is now ``npy_intp`` and not ``int``). -* ``PyDataType_ALIGNENT`` +* ``PyDataType_ALIGNMENT`` * ``PyDataType_FIELDS``, ``PyDataType_NAMES``, ``PyDataType_SUBARRAY`` * ``PyDataType_C_METADATA`` @@ -146,12 +145,12 @@ or adding ``npy2_compat.h`` into your code base and explicitly include it when compiling with NumPy 1.x (as they are new API). Including the file has no effect on NumPy 2. -Please do not hesitate to open a NumPy issue, if you require assistence or +Please do not hesitate to open a NumPy issue, if you require assistance or the provided functions are not sufficient. **Custom User DTypes:** -Existing user dtypes must now use ``PyArray_DescrProto`` to define their -dtype and slightly modify the code. See note in `PyArray_RegisterDataType`. +Existing user dtypes must now use :c:type:`PyArray_DescrProto` to define +their dtype and slightly modify the code. See note in :c:func:`PyArray_RegisterDataType`. Functionality moved to headers requiring ``import_array()`` ----------------------------------------------------------- @@ -180,7 +179,7 @@ Functionality which previously did not require import includes: Increased maximum number of dimensions -------------------------------------- -The maximum number of dimensions (and arguments) was increased to 64, this +The maximum number of dimensions (and arguments) was increased to 64. This affects the ``NPY_MAXDIMS`` and ``NPY_MAXARGS`` macros. It may be good to review their use, and we generally encourage you to not use these macros (especially ``NPY_MAXARGS``), so that a future version of @@ -203,17 +202,37 @@ native C99 types. While the memory layout of those types remains identical to the types used in NumPy 1.x, the API is slightly different, since direct field access (like ``c.real`` or ``c.imag``) is no longer possible. -It is recommended to use the functions `npy_creal` and `npy_cimag` (and the -corresponding float and long double variants) to retrieve +It is recommended to use the functions ``npy_creal`` and ``npy_cimag`` +(and the corresponding float and long double variants) to retrieve the real or imaginary part of a complex number, as these will work with both -NumPy 1.x and with NumPy 2.x. New functions `npy_csetreal` and `npy_csetimag`, -along with compatibility macros `NPY_CSETREAL` and `NPY_CSETIMAG` (and the -corresponding float and long double variants), have been -added for setting the real or imaginary part. +NumPy 1.x and with NumPy 2.x. New functions ``npy_csetreal`` and +``npy_csetimag``, along with compatibility macros ``NPY_CSETREAL`` and +``NPY_CSETIMAG`` (and the corresponding float and long double variants), +have been added for setting the real or imaginary part. The underlying type remains a struct under C++ (all of the above still remains valid). +This has implications for Cython. It is recommened to always use the native +typedefs ``cfloat_t``, ``cdouble_t``, ``clongdouble_t`` rather than the NumPy +types ``npy_cfloat``, etc, unless you have to interface with C code written +using the NumPy types. You can still write cython code using the ``c.real`` and +``c.imag`` attributes (using the native typedefs), but you can no longer use +in-place operators ``c.imag += 1`` in Cython's c++ mode. + +Because NumPy 2 now includes ``complex.h`` code that uses a variable named +``I`` may see an error such as + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +to use the name ``I`` requires an ``#undef I`` now. + +.. note:: + NumPy 2.0.1 briefly included the ``#undef I`` to help users not already + including ``complex.h``. + Changes to namespaces ===================== @@ -230,7 +249,7 @@ Please refer to `NEP 52 ` in `~numpy.asarray`, `~numpy.array` and `ndarray.__array__ ` may require these changes: -1. Code using ``np.array(..., copy=False)`` can in most cases be changed to - ``np.asarray(...)``. Older code tended to use ``np.array`` like this because - it had less overhead than the default ``np.asarray`` copy-if-needed - behavior. This is no longer true, and ``np.asarray`` is the preferred function. -2. For code that explicitly needs to pass ``None``/``False`` meaning "copy if - needed" in a way that's compatible with NumPy 1.x and 2.x, see - `scipy#20172 `__ for an example - of how to do so. -3. For any ``__array__`` method on a non-NumPy array-like object, a - ``copy=None`` keyword can be added to the signature - this will work with - older NumPy versions as well. +* Code using ``np.array(..., copy=False)`` can in most cases be changed to + ``np.asarray(...)``. Older code tended to use ``np.array`` like this because + it had less overhead than the default ``np.asarray`` copy-if-needed + behavior. This is no longer true, and ``np.asarray`` is the preferred function. +* For code that explicitly needs to pass ``None``/``False`` meaning "copy if + needed" in a way that's compatible with NumPy 1.x and 2.x, see + `scipy#20172 `__ for an example + of how to do so. +* For any ``__array__`` method on a non-NumPy array-like object, ``dtype=None`` + and ``copy=None`` keywords must be added to the signature - this will work with older + NumPy versions as well (although older numpy versions will never pass in ``copy`` keyword). + If the keywords are added to the ``__array__`` signature, then for: + + * ``copy=True`` and any ``dtype`` value always return a new copy, + * ``copy=None`` create a copy if required (for example by ``dtype``), + * ``copy=False`` a copy must never be made. If a copy is needed to return a numpy array + or satisfy ``dtype``, then raise an exception (``ValueError``). + +Writing numpy-version-dependent code +------------------------------------ + +It should be fairly rare to have to write code that explicitly branches on the +``numpy`` version - in most cases, code can be rewritten to be compatible with +1.x and 2.0 at the same time. However, if it is necessary, here is a suggested +code pattern to use, using `numpy.lib.NumpyVersion`:: + + # example with AxisError, which is no longer available in + # the main namespace in 2.0, and not available in the + # `exceptions` namespace in <1.25.0 (example uses <2.0.0b1 + # for illustrative purposes): + if np.lib.NumpyVersion(np.__version__) >= '2.0.0b1': + from numpy.exceptions import AxisError + else: + from numpy import AxisError + +This pattern will work correctly including with NumPy release candidates, which +is important during the 2.0.0 release period. diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 7789a47221b5..11b3bdc16c6c 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -37,9 +37,8 @@ of the flexible itemsize array types (:class:`str_`, **Figure:** Hierarchy of type objects representing the array data types. Not shown are the two integer types :class:`intp` and - :class:`uintp` which just point to the integer type that holds a - pointer for the platform. All the number types can be obtained - using bit-width names as well. + :class:`uintp` which are used for indexing (the same as the + default integer since NumPy 2). .. TODO - use something like this instead of the diagram above, as it generates @@ -377,21 +376,29 @@ are also provided. Alias for the signed integer type (one of `numpy.byte`, `numpy.short`, `numpy.intc`, `numpy.int_`, `numpy.long` and `numpy.longlong`) - that is the same size as a pointer. + that is used as a default integer and for indexing. - Compatible with the C ``intptr_t``. + Compatible with the C ``Py_ssize_t``. - :Character code: ``'p'`` + :Character code: ``'n'`` + + .. versionchanged:: 2.0 + Before NumPy 2, this had the same size as a pointer. In practice this + is almost always identical, but the character code ``'p'`` maps to the C + ``intptr_t``. The character code ``'n'`` was added in NumPy 2.0. .. attribute:: uintp - Alias for the unsigned integer type (one of `numpy.ubyte`, `numpy.ushort`, - `numpy.uintc`, `numpy.uint`, `numpy.ulong` and `numpy.ulonglong`) - that is the same size as a pointer. + Alias for the unsigned integer type that is the same size as ``intp``. + + Compatible with the C ``size_t``. - Compatible with the C ``uintptr_t``. + :Character code: ``'N'`` - :Character code: ``'P'`` + .. versionchanged:: 2.0 + Before NumPy 2, this had the same size as a pointer. In practice this + is almost always identical, but the character code ``'P'`` maps to the C + ``uintptr_t``. The character code ``'N'`` was added in NumPy 2.0. .. autoclass:: numpy.float16 diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 84549012e95b..eded7e184e04 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1682,8 +1682,8 @@ the functions that must be implemented for each slot. .. c:type:: NPY_CASTING (PyArrayMethod_ResolveDescriptors)( \ struct PyArrayMethodObject_tag *method, \ - PyArray_DTypeMeta **dtypes, \ - PyArray_Descr **given_descrs, \ + PyArray_DTypeMeta *const *dtypes, \ + PyArray_Descr *const *given_descrs, \ PyArray_Descr **loop_descrs, \ npy_intp *view_offset) @@ -1802,14 +1802,14 @@ the functions that must be implemented for each slot. "default" value that may differ from the "identity" value normally used. For example: - - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct - identity otherwise as it preserves the sign for ``sum([-0.0])``. - - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. - - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least - ``INT_MIN`` not a good *default* when there are no items. + - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct + identity otherwise as it preserves the sign for ``sum([-0.0])``. + - We use no identity for object, but return the default of ``0`` and + ``1`` for the empty ``sum([], dtype=object)`` and + ``prod([], dtype=object)``. + This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least + ``INT_MIN`` not a good *default* when there are no items. *initial* is a pointer to the data for the initial value, which should be filled in. Returns -1, 0, or 1 indicating error, no initial value, and the @@ -1857,7 +1857,7 @@ Typedefs for functions that users of the ArrayMethod API can implement are described below. .. c:type:: int (PyArrayMethod_TraverseLoop)( \ - void *traverse_context, PyArray_Descr *descr, char *data, \ + void *traverse_context, const PyArray_Descr *descr, char *data, \ npy_intp size, npy_intp stride, NpyAuxData *auxdata) A traverse loop working on a single array. This is similar to the general @@ -1880,7 +1880,7 @@ described below. passed through in the future (for structured dtypes). .. c:type:: int (PyArrayMethod_GetTraverseLoop)( \ - void *traverse_context, PyArray_Descr *descr, \ + void *traverse_context, const PyArray_Descr *descr, \ int aligned, npy_intp fixed_stride, \ PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, \ NPY_ARRAYMETHOD_FLAGS *flags) @@ -1920,7 +1920,8 @@ with the rest of the ArrayMethod API. attempt a new search for a matching loop/promoter. .. c:type:: int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, \ - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], \ + PyArray_DTypeMeta *const op_dtypes[], \ + PyArray_DTypeMeta *const signature[], \ PyArray_DTypeMeta *new_op_dtypes[]) Type of the promoter function, which must be wrapped into a @@ -3386,7 +3387,7 @@ Data Type Promotion and Inspection ---------------------------------- .. c:function:: PyArray_DTypeMeta *PyArray_CommonDType( \ - PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) + const PyArray_DTypeMeta *dtype1, const PyArray_DTypeMeta *dtype2) This function defines the common DType operator. Note that the common DType will not be ``object`` (unless one of the DTypes is ``object``). Similar to @@ -3413,7 +3414,7 @@ Data Type Promotion and Inspection For example promoting ``float16`` with any other float, integer, or unsigned integer again gives a floating point number. -.. c:function:: PyArray_Descr *PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType) +.. c:function:: PyArray_Descr *PyArray_GetDefaultDescr(const PyArray_DTypeMeta *DType) Given a DType class, returns the default instance (descriptor). This checks for a ``singleton`` first and only calls the ``default_descr`` function if @@ -3814,13 +3815,118 @@ Other conversions in the *vals* array. The sequence can be smaller then *maxvals* as the number of converted objects is returned. +.. _including-the-c-api: -Miscellaneous -------------- +Including and importing the C API +--------------------------------- +To use the NumPy C-API you typically need to include the +``numpy/ndarrayobject.h`` header and ``numpy/ufuncobject.h`` for some ufunc +related functionality (``arrayobject.h`` is an alias for ``ndarrayobject.h``). -Importing the API -~~~~~~~~~~~~~~~~~ +These two headers export most relevant functionality. In general any project +which uses the NumPy API must import NumPy using one of the functions +``PyArray_ImportNumPyAPI()`` or ``import_array()``. +In some places, functionality which requires ``import_array()`` is not +needed, because you only need type definitions. In this case, it is +sufficient to include ``numpy/ndarratypes.h``. + +For the typical Python project, multiple C or C++ files will be compiled into +a single shared object (the Python C-module) and ``PyArray_ImportNumPyAPI()`` +should be called inside it's module initialization. + +When you have a single C-file, this will consist of: + +.. code-block:: c + + #include "numpy/ndarrayobject.h" + + PyMODINIT_FUNC PyInit_my_module(void) + { + if (PyArray_ImportNumPyAPI() < 0) { + return NULL; + } + /* Other initialization code. */ + } + +However, most projects will have additional C files which are all +linked together into a single Python module. +In this case, the helper C files typically do not have a canonical place +where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and +fast to call it often). + +To solve this, NumPy provides the following pattern that the the main +file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: + +.. code-block:: c + + /* Main module file */ + #define PY_ARRAY_UNIQUE_SYMBOL MyModule + #include "numpy/ndarrayobject.h" + + PyMODINIT_FUNC PyInit_my_module(void) + { + if (PyArray_ImportNumPyAPI() < 0) { + return NULL; + } + /* Other initialization code. */ + } + +while the other files use: + +.. code-block:: C + + /* Second file without any import */ + #define NO_IMPORT_ARRAY + #define PY_ARRAY_UNIQUE_SYMBOL MyModule + #include "numpy/ndarrayobject.h" + +You can of course add the defines to a local header used throughout. +You just have to make sure that the main file does _not_ define +``NO_IMPORT_ARRAY``. + +For ``numpy/ufuncobject.h`` the same logic applies, but the unique symbol +mechanism is ``#define PY_UFUNC_UNIQUE_SYMBOL`` (both can match). + +Additionally, you will probably wish to add a +``#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION`` +to avoid warnings about possible use of old API. + +.. note:: + If you are experiencing access violations make sure that the NumPy API + was properly imported and the symbol ``PyArray_API`` is not ``NULL``. + When in a debugger, this symbols actual name will be + ``PY_ARRAY_UNIQUE_SYMBOL``+``PyArray_API``, so for example + ``MyModulePyArray_API`` in the above. + (E.g. even a ``printf("%p\n", PyArray_API);`` just before the crash.) + + +Mechanism details and dynamic linking +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The main part of the mechanism is that without NumPy needs to define +a ``void **PyArray_API`` table for you to look up all functions. +Depending on your macro setup, this takes different routes depending on +whether :c:macro:`NO_IMPORT_ARRAY` and :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` +are defined: + +* If neither is defined, the C-API is declared to + ``static void **PyArray_API``, so it is only visible within the + compilation unit/file using ``#include ``. +* If only ``PY_ARRAY_UNIQUE_SYMBOL`` is defined (it could be empty) then + the it is declared to a non-static ``void **`` allowing it to be used + by other files which are linked. +* If ``NO_IMPORT_ARRAY`` is defined, the table is declared as + ``extern void **``, meaning that it must be linked to a file which does not + use ``NO_IMPORT_ARRAY``. + +The ``PY_ARRAY_UNIQUE_SYMBOL`` mechanism additionally mangles the names to +avoid conflicts. + +.. versionchanged:: + NumPy 2.1 changed the headers to avoid sharing the table outside of a + single shared object/dll (this was always the case on Windows). + Please see :c:macro:`NPY_API_SYMBOL_ATTRIBUTE` for details. In order to make use of the C-API from another extension module, the :c:func:`import_array` function must be called. If the extension module is @@ -3844,31 +3950,46 @@ the C-API is needed then some additional steps must be taken. module that will make use of the C-API. It imports the module where the function-pointer table is stored and points the correct variable to it. + This macro includes a ``return NULL;`` on error, so that + ``PyArray_ImportNumPyAPI()`` is preferable for custom error checking. + You may also see use of ``_import_array()`` (a function, not + a macro, but you may want to raise a better error if it fails) and + the variations ``import_array1(ret)`` which customizes the return value. .. c:macro:: PY_ARRAY_UNIQUE_SYMBOL +.. c:macro:: NPY_API_SYMBOL_ATTRIBUTE + + .. versionadded:: 2.1 + + An additional symbol which can be used to share e.g. visibility beyond + shared object boundaries. + By default, NumPy adds the C visibility hidden attribute (if available): + ``void __attribute__((visibility("hidden"))) **PyArray_API;``. + You can change this by defining ``NPY_API_SYMBOL_ATTRIBUTE``, which will + make this: + ``void NPY_API_SYMBOL_ATTRIBUTE **PyArray_API;`` (with additional + name mangling via the unique symbol). + + Adding an empty ``#define NPY_API_SYMBOL_ATTRIBUTE`` will have the same + behavior as NumPy 1.x. + + .. note:: + Windows never had shared visbility although you can use this macro + to achieve it. We generally discourage sharing beyond shared boundary + lines since importing the array API includes NumPy version checks. + .. c:macro:: NO_IMPORT_ARRAY - Using these #defines you can use the C-API in multiple files for a - single extension module. In each file you must define - :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the - C-API (*e.g.* myextension_ARRAY_API). This must be done **before** - including the numpy/arrayobject.h file. In the module - initialization routine you call :c:func:`import_array`. In addition, - in the files that do not have the module initialization - sub_routine define :c:macro:`NO_IMPORT_ARRAY` prior to including - numpy/arrayobject.h. - - Suppose I have two files coolmodule.c and coolhelper.c which need - to be compiled and linked into a single extension module. Suppose - coolmodule.c contains the required initcool module initialization - function (with the import_array() function called). Then, - coolmodule.c would have at the top: + Defining ``NO_IMPORT_ARRAY`` before the ``ndarrayobject.h`` include + indicates that the NumPy C API import is handled in a different file + and the include mechanism will not be added here. + You must have one file without ``NO_IMPORT_ARRAY`` defined. .. code-block:: c #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h + #include On the other hand, coolhelper.c would contain at the top: @@ -3876,7 +3997,7 @@ the C-API is needed then some additional steps must be taken. #define NO_IMPORT_ARRAY #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h + #include You can also put the common two last lines into an extension-local header file as long as you make sure that NO_IMPORT_ARRAY is @@ -3900,6 +4021,7 @@ the C-API is needed then some additional steps must be taken. defaults to ``PyArray_API``, to whatever the macro is #defined to. + Checking the API Version ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -4032,15 +4154,12 @@ variables), the GIL should be released so that other Python threads can run while the time-consuming calculations are performed. This can be accomplished using two groups of macros. Typically, if one macro in a group is used in a code block, all of them must be used in the same -code block. Currently, :c:data:`NPY_ALLOW_THREADS` is defined to the -python-defined :c:data:`WITH_THREADS` constant unless the environment -variable ``NPY_NOSMP`` is set in which case -:c:data:`NPY_ALLOW_THREADS` is defined to be 0. +code block. :c:data:`NPY_ALLOW_THREADS` is true (defined as ``1``) unless the +build option ``-Ddisable-threading`` is set to ``true`` - in which case +:c:data:`NPY_ALLOW_THREADS` is false (``0``). .. c:macro:: NPY_ALLOW_THREADS -.. c:macro:: WITH_THREADS - Group 1 ^^^^^^^ diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index ef91ab28e6aa..f8e0efb34d24 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -1,7 +1,7 @@ NumPy core math library ======================= -The numpy core math library ('npymath') is a first step in this direction. This +The numpy core math library (``npymath``) is a first step in this direction. This library contains most math-related C99 functionality, which can be used on platforms where C99 is not well supported. The core math functions have the same API as the C99 ones, except for the ``npy_*`` prefix. @@ -304,7 +304,7 @@ Linking against the core math library in an extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use the core math library that NumPy ships as a static library in your own -Python extension, you need to add the npymath compile and link options to your +Python extension, you need to add the ``npymath`` compile and link options to your extension. The exact steps to take will depend on the build system you are using. The generic steps to take are: diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 1d521e39a832..ce23c51aa9ea 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -170,14 +170,26 @@ Enumerated types .. c:enumerator:: NPY_INTP - The enumeration value for a signed integer type which is the same - size as a (void \*) pointer. This is the type used by all + The enumeration value for a signed integer of type ``Py_ssize_t`` + (same as ``ssize_t`` if defined). This is the type used by all arrays of indices. + .. versionchanged:: 2.0 + Previously, this was the same as ``intptr_t`` (same size as a + pointer). In practice, this is identical except on very niche + platforms. + You can use the ``'p'`` character code for the pointer meaning. + .. c:enumerator:: NPY_UINTP - The enumeration value for an unsigned integer type which is the - same size as a (void \*) pointer. + The enumeration value for an unsigned integer type that is identical + to a ``size_t``. + + .. versionchanged:: 2.0 + Previously, this was the same as ``uintptr_t`` (same size as a + pointer). In practice, this is identical except on very niche + platforms. + You can use the ``'P'`` character code for the pointer meaning. .. c:enumerator:: NPY_MASK @@ -287,14 +299,20 @@ all platforms for all the kinds of numeric types. Commonly 8-, 16-, types are available. -Integer that can hold a pointer -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Further integer aliases +~~~~~~~~~~~~~~~~~~~~~~~ -The constants **NPY_INTP** and **NPY_UINTP** refer to an -enumerated integer type that is large enough to hold a pointer on the -platform. Index arrays should always be converted to **NPY_INTP** -, because the dimension of the array is of type npy_intp. +The constants **NPY_INTP** and **NPY_UINTP** refer to an ``Py_ssize_t`` +and ``size_t``. +Although in practice normally true, these types are strictly speaking not +pointer sized and the character codes ``'p'`` and ``'P'`` can be used for +pointer sized integers. +(Before NumPy 2, ``intp`` was pointer size, but this almost never matched +the actual use, which is the reason for the name.) +Since NumPy 2, **NPY_DEFAULT_INT** is additionally defined. +The value of the macro is runtime dependent: Since NumPy 2, it maps to +``NPY_INTP`` while on earlier versions it maps to ``NPY_LONG``. C-type names ------------ @@ -390,7 +408,7 @@ to the front of the integer name. This is the correct integer for lengths or indexing. In practice this is normally the size of a pointer, but this is not guaranteed. - ..note:: + .. note:: Before NumPy 2.0, this was the same as ``Py_intptr_t``. While a better match, this did not match actual usage in practice. On the Python side, we still support ``np.dtype('p')`` to fetch a dtype diff --git a/doc/source/reference/c-api/index.rst b/doc/source/reference/c-api/index.rst index e7f86d3ff7a8..2a7a627fde3e 100644 --- a/doc/source/reference/c-api/index.rst +++ b/doc/source/reference/c-api/index.rst @@ -47,6 +47,7 @@ code. iterator ufunc generalized-ufuncs + strings coremath datetimes deprecations diff --git a/doc/source/reference/c-api/strings.rst b/doc/source/reference/c-api/strings.rst new file mode 100644 index 000000000000..43d280d14e09 --- /dev/null +++ b/doc/source/reference/c-api/strings.rst @@ -0,0 +1,268 @@ +NpyString API +============= + +.. sectionauthor:: Nathan Goldbaum + +.. versionadded:: 2.0 + +This API allows access to the UTF-8 string data stored in NumPy StringDType +arrays. See `NEP-55 `_ for +more in-depth details into the design of StringDType. + +Examples +-------- + +Loading a String +^^^^^^^^^^^^^^^^ + +Say we are writing a ufunc implementation for ``StringDType``. If we are given +``const char *buf`` pointer to the beginning of a ``StringDType`` array entry, and a +``PyArray_Descr *`` pointer to the array descriptor, one can +access the underlying string data like so: + +.. code-block:: C + + npy_string_allocator *allocator = NpyString_acquire_allocator( + (PyArray_StringDTypeObject *)descr); + + npy_static_string sdata = {0, NULL}; + npy_packed_static_string *packed_string = (npy_packed_static_string *)buf; + int is_null = 0; + + is_null = NpyString_load(allocator, packed_string, &sdata); + + if (is_null == -1) { + // failed to load string, set error + return -1; + } + else if (is_null) { + // handle missing string + // sdata->buf is NULL + // sdata->size is 0 + } + else { + // sdata->buf is a pointer to the beginning of a string + // sdata->size is the size of the string + } + NpyString_release_allocator(allocator); + +Packing a String +^^^^^^^^^^^^^^^^ + +This example shows how to pack a new string entry into an array: + +.. code-block:: C + + char *str = "Hello world"; + size_t size = 11; + npy_packed_static_string *packed_string = (npy_packed_static_string *)buf; + + npy_string_allocator *allocator = NpyString_acquire_allocator( + (PyArray_StringDTypeObject *)descr); + + // copy contents of str into packed_string + if (NpyString_pack(allocator, packed_string, str, size) == -1) { + // string packing failed, set error + return -1; + } + + // packed_string contains a copy of "Hello world" + + NpyString_release_allocator(allocator); + +Types +----- + +.. c:type:: npy_packed_static_string + + An opaque struct that represents "packed" encoded strings. Individual + entries in array buffers are instances of this struct. Direct access + to the data in the struct is undefined and future version of the library may + change the packed representation of strings. + +.. c:type:: npy_static_string + + An unpacked string allowing access to the UTF-8 string data. + + .. code-block:: c + + typedef struct npy_unpacked_static_string { + size_t size; + const char *buf; + } npy_static_string; + + .. c:member:: size_t size + + The size of the string, in bytes. + + .. c:member:: const char *buf + + The string buffer. Holds UTF-8-encoded bytes. Does not currently end in + a null string but we may decide to add null termination in the + future, so do not rely on the presence or absence of null-termination. + + Note that this is a ``const`` buffer. If you want to alter an + entry in an array, you should create a new string and pack it + into the array entry. + +.. c:type:: npy_string_allocator + + An opaque pointer to an object that handles string allocation. + Before using the allocator, you must acquire the allocator lock and release + the lock after you are done interacting with strings managed by the + allocator. + +.. c:type:: PyArray_StringDTypeObject + + The C struct backing instances of StringDType in Python. Attributes store + the settings the object was created with, an instance of + ``npy_string_allocator`` that manages string allocations for arrays + associated with the DType instance, and several attributes caching + information about the missing string object that is commonly needed in cast + and ufunc loop implementations. + + .. code-block:: c + + typedef struct { + PyArray_Descr base; + PyObject *na_object; + char coerce; + char has_nan_na; + char has_string_na; + char array_owned; + npy_static_string default_string; + npy_static_string na_name; + npy_string_allocator *allocator; + } PyArray_StringDTypeObject; + + .. c:member:: PyArray_Descr base + + The base object. Use this member to access fields common to all + descriptor objects. + + .. c:member:: PyObject *na_object + + A reference to the object representing the null value. If there is no + null value (the default) this will be NULL. + + .. c:member:: char coerce + + 1 if string coercion is enabled, 0 otherwise. + + .. c:member:: char has_nan_na + + 1 if the missing string object (if any) is NaN-like, 0 otherwise. + + .. c:member:: char has_string_na + + 1 if the missing string object (if any) is a string, 0 otherwise. + + .. c:member:: char array_owned + + 1 if an array owns the StringDType instance, 0 otherwise. + + .. c:member:: npy_static_string default_string + + The default string to use in operations. If the missing string object + is a string, this will contain the string data for the missing string. + + .. c:member:: npy_static_string na_name + + The name of the missing string object, if any. An empty string + otherwise. + + .. c:member:: npy_string_allocator allocator + + The allocator instance associated with the array that owns this + descriptor instance. The allocator should only be directly accessed + after acquiring the allocator_lock and the lock should be released + immediately after the allocator is no longer needed + + +Functions +--------- + +.. c:function:: npy_string_allocator *NpyString_acquire_allocator( \ + const PyArray_StringDTypeObject *descr) + + Acquire the mutex locking the allocator attached to + ``descr``. ``NpyString_release_allocator`` must be called on the allocator + returned by this function exactly once. Note that functions requiring the + GIL should not be called while the allocator mutex is held, as doing so may + cause deadlocks. + +.. c:function:: void NpyString_acquire_allocators( \ + size_t n_descriptors, PyArray_Descr *const descrs[], \ + npy_string_allocator *allocators[]) + + Simultaneously acquire the mutexes locking the allocators attached to + multiple descriptors. Writes a pointer to the associated allocator in the + allocators array for each StringDType descriptor in the array. If any of + the descriptors are not StringDType instances, write NULL to the allocators + array for that entry. + + ``n_descriptors`` is the number of descriptors in the descrs array that + should be examined. Any descriptor after ``n_descriptors`` elements is + ignored. A buffer overflow will happen if the ``descrs`` array does not + contain n_descriptors elements. + + If pointers to the same descriptor are passed multiple times, only acquires + the allocator mutex once but sets identical allocator pointers appropriately. + The allocator mutexes must be released after this function returns, see + ``NpyString_release_allocators``. + + Note that functions requiring the GIL should not be called while the + allocator mutex is held, as doing so may cause deadlocks. + +.. c:function:: void NpyString_release_allocator( \ + npy_string_allocator *allocator) + + Release the mutex locking an allocator. This must be called exactly once + after acquiring the allocator mutex and all operations requiring the + allocator are done. + + If you need to release multiple allocators, see + NpyString_release_allocators, which can correctly handle releasing the + allocator once when given several references to the same allocator. + +.. c:function:: void NpyString_release_allocators( \ + size_t length, npy_string_allocator *allocators[]) + + Release the mutexes locking N allocators. ``length`` is the length of the + allocators array. NULL entries are ignored. + + If pointers to the same allocator are passed multiple times, only releases + the allocator mutex once. + +.. c:function:: int NpyString_load(npy_string_allocator *allocator, \ + const npy_packed_static_string *packed_string, \ + npy_static_string *unpacked_string) + + Extract the packed contents of ``packed_string`` into ``unpacked_string``. + + The ``unpacked_string`` is a read-only view onto the ``packed_string`` data + and should not be used to modify the string data. If ``packed_string`` is + the null string, sets ``unpacked_string.buf`` to the NULL + pointer. Returns -1 if unpacking the string fails, returns 1 if + ``packed_string`` is the null string, and returns 0 otherwise. + + A useful pattern is to define a stack-allocated npy_static_string instance + initialized to ``{0, NULL}`` and pass a pointer to the stack-allocated + unpacked string to this function. This function can be used to + simultaneously unpack a string and determine if it is a null string. + +.. c:function:: int NpyString_pack_null( \ + npy_string_allocator *allocator, \ + npy_packed_static_string *packed_string) + + Pack the null string into ``packed_string``. Returns 0 on success and -1 on + failure. + +.. c:function:: int NpyString_pack( \ + npy_string_allocator *allocator, \ + npy_packed_static_string *packed_string, \ + const char *buf, \ + size_t size) + + Copy and pack the first ``size`` entries of the buffer pointed to by ``buf`` + into the ``packed_string``. Returns 0 on success and -1 on failure. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index df32b3dfcd60..9514130598b3 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -728,7 +728,7 @@ PyArrayMethod_Context and PyArrayMethod_Spec typedef struct { PyObject *caller; struct PyArrayMethodObject_tag *method; - PyArray_Descr **descriptors; + PyArray_Descr *const *descriptors; } PyArrayMethod_Context .. c:member:: PyObject *caller @@ -906,6 +906,30 @@ PyArray_DTypeMeta and PyArrayDTypeMeta_Spec of functions in the DType API. Slot IDs must be one of the DType slot IDs enumerated in :ref:`dtype-slots`. +Exposed DTypes classes (``PyArray_DTypeMeta`` objects) +------------------------------------------------------ + +For use with promoters, NumPy exposes a number of Dtypes following the +pattern ``PyArray_DType`` corresponding to those found in `np.dtypes`. + +Additionally, the three DTypes, ``PyArray_PyLongDType``, +``PyArray_PyFloatDType``, ``PyArray_PyComplexDType`` correspond to the +Python scalar values. These cannot be used in all places, but do allow +for example the common dtype operation and implementing promotion with them +may be necessary. + +Further, the following abstract DTypes are defined which cover both the +builtin NumPy ones and the python ones, and users can in principle subclass +from them (this does not inherit any DType specific functionality): +* ``PyArray_IntAbstractDType`` +* ``PyArray_FloatAbstractDType`` +* ``PyArray_ComplexAbstractDType`` + +.. warning:: + As of NumPy 2.0, the *only* valid use for these DTypes is registering a + promoter conveniently to e.g. match "any integers" (and subclass checks). + Because of this, they are not exposed to Python. + PyUFunc_Type and PyUFuncObject ------------------------------ @@ -1286,7 +1310,7 @@ PyArrayMultiIter_Type and PyArrayMultiIterObject npy_intp index; int nd; npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; - PyArrayIterObject *iters[NPY_MAXDIMS_LEGACY_ITERS]; + PyArrayIterObject *iters[]; } PyArrayMultiIterObject; .. c:macro: PyObject_HEAD @@ -1588,3 +1612,29 @@ for completeness and assistance in understanding the code. ``arrayobject.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. + + +NumPy C-API and C complex +========================= +When you use the NumPy C-API, you will have access to complex real declarations +``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C +standard types from ``complex.h``. Unfortunately, ``complex.h`` contains +`#define I ...`` (where the actual definition depends on the compiler), which +means that any downstream user that does ``#include `` +could get ``I`` defined, and using something like declaring ``double I;`` in +their code will result in an obscure compiler error like + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +This error can be avoided by adding:: + + #undef I + +to your code. + +.. versionchanged:: 2.0 + The inclusion of ``complex.h`` was new in NumPy 2, so that code defining + a different ``I`` may not have required the ``#undef I`` on older versions. + NumPy 2.0.1 briefly included the ``#under I`` \ No newline at end of file diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index 9db0da787712..303327314f00 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -88,8 +88,6 @@ NumPy includes several constants: NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. - `NaN` and `NAN` are aliases of `nan`. - .. rubric:: Examples >>> np.nan diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst index d4640e65456f..72b61e3a94db 100644 --- a/doc/source/reference/distutils.rst +++ b/doc/source/reference/distutils.rst @@ -14,7 +14,7 @@ Packaging (:mod:`numpy.distutils`) .. warning:: Note that ``setuptools`` does major releases often and those may contain - changes that break ``numpy.distutils``, which will *not* be updated anymore + changes that break :mod:`numpy.distutils`, which will *not* be updated anymore for new ``setuptools`` versions. It is therefore recommended to set an upper version bound in your build configuration for the last known version of ``setuptools`` that works with your build. diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 2db9de7f03a8..01a5bcff7fbc 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -35,6 +35,7 @@ Special-purpose namespaces - :ref:`numpy.emath ` - mathematical functions with automatic domain - :ref:`numpy.lib ` - utilities & functionality which do not fit the main namespace - :ref:`numpy.rec ` - record arrays (largely superseded by dataframe libraries) +- :ref:`numpy.version ` - small module with more detailed version info Legacy namespaces ================= @@ -67,6 +68,7 @@ and/or this code is deprecated or isn't reliable. numpy.emath numpy.lib numpy.rec + numpy.version numpy.char numpy.distutils numpy.f2py <../f2py/index> diff --git a/doc/source/reference/random/compatibility.rst b/doc/source/reference/random/compatibility.rst index b45e195fbd71..455a2485ea4a 100644 --- a/doc/source/reference/random/compatibility.rst +++ b/doc/source/reference/random/compatibility.rst @@ -22,7 +22,7 @@ outside of NumPy's control that limit our ability to guarantee much more than this. For example, different CPUs implement floating point arithmetic differently, and this can cause differences in certain edge cases that cascade to the rest of the stream. `Generator.multivariate_normal`, for another -example, uses a matrix decomposition from ``numpy.linalg``. Even on the same +example, uses a matrix decomposition from `numpy.linalg`. Even on the same platform, a different build of ``numpy`` may use a different version of this matrix decomposition algorithm from the LAPACK that it links to, causing `Generator.multivariate_normal` to return completely different (but equally diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst index 26407bb2a3fa..9c7dc86b2825 100644 --- a/doc/source/reference/random/extending.rst +++ b/doc/source/reference/random/extending.rst @@ -4,15 +4,15 @@ Extending ========= -The BitGenerators have been designed to be extendable using standard tools for -high-performance Python -- numba and Cython. The `~Generator` object can also -be used with user-provided BitGenerators as long as these export a small set of -required functions. +The `BitGenerator`\ s have been designed to be extendable using standard tools +for high-performance Python -- numba and Cython. The `Generator` object can +also be used with user-provided `BitGenerator`\ s as long as these export a +small set of required functions. Numba ----- Numba can be used with either CTypes or CFFI. The current iteration of the -BitGenerators all export a small set of functions through both interfaces. +`BitGenerator`\ s all export a small set of functions through both interfaces. This example shows how numba can be used to produce gaussian samples using a pure Python implementation which is then compiled. The random numbers are @@ -32,7 +32,7 @@ the `Examples`_ section below. Cython ------ -Cython can be used to unpack the ``PyCapsule`` provided by a BitGenerator. +Cython can be used to unpack the ``PyCapsule`` provided by a `BitGenerator`. This example uses `PCG64` and the example from above. The usual caveats for writing high-performance code using Cython -- removing bounds checks and wrap around, providing array alignment information -- still apply. @@ -41,7 +41,7 @@ wrap around, providing array alignment information -- still apply. :language: cython :end-before: example 2 -The BitGenerator can also be directly accessed using the members of the ``bitgen_t`` +The `BitGenerator` can also be directly accessed using the members of the ``bitgen_t`` struct. .. literalinclude:: ../../../../numpy/random/_examples/cython/extending_distributions.pyx @@ -81,9 +81,9 @@ directly from the ``_generator`` shared object, using the `BitGenerator.cffi` in New BitGenerators ----------------- -`~Generator` can be used with user-provided `~BitGenerator`\ s. The simplest -way to write a new BitGenerator is to examine the pyx file of one of the -existing BitGenerators. The key structure that must be provided is the +`Generator` can be used with user-provided `BitGenerator`\ s. The simplest +way to write a new `BitGenerator` is to examine the pyx file of one of the +existing `BitGenerator`\ s. The key structure that must be provided is the ``capsule`` which contains a ``PyCapsule`` to a struct pointer of type ``bitgen_t``, @@ -98,11 +98,11 @@ existing BitGenerators. The key structure that must be provided is the } bitgen_t; which provides 5 pointers. The first is an opaque pointer to the data structure -used by the BitGenerators. The next three are function pointers which return -the next 64- and 32-bit unsigned integers, the next random double and the next -raw value. This final function is used for testing and so can be set to -the next 64-bit unsigned integer function if not needed. Functions inside -``Generator`` use this structure as in +used by the `BitGenerator`\ s. The next three are function pointers which +return the next 64- and 32-bit unsigned integers, the next random double and +the next raw value. This final function is used for testing and so can be set +to the next 64-bit unsigned integer function if not needed. Functions inside +`Generator` use this structure as in .. code-block:: c diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index c8662c56a788..eaa29feae57e 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -2,14 +2,14 @@ Random ``Generator`` ==================== -The `~Generator` provides access to +The `Generator` provides access to a wide range of distributions, and served as a replacement for :class:`~numpy.random.RandomState`. The main difference between -the two is that ``Generator`` relies on an additional BitGenerator to +the two is that `Generator` relies on an additional BitGenerator to manage state and generate the random bits, which are then transformed into random values from useful distributions. The default BitGenerator used by -``Generator`` is `~PCG64`. The BitGenerator -can be changed by passing an instantized BitGenerator to ``Generator``. +`Generator` is `PCG64`. The BitGenerator +can be changed by passing an instantized BitGenerator to `Generator`. .. autofunction:: default_rng diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 682d02c31cd2..a2f508c58bbf 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -56,6 +56,8 @@ pseudo-randomness was good for in the first place. or cryptographic purposes. See the :py:mod:`secrets` module from the standard library for such use cases. +.. _recommend-secrets-randbits: + Seeds should be large positive integers. `default_rng` can take positive integers of any size. We recommend using very large, unique numbers to ensure that your seed is different from anyone else's. This is good practice to ensure diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 09a048561e25..99b7ec781b55 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -104,8 +104,8 @@ that does not use an existing array due to array creation overhead. Out[6]: 125 ms ± 309 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) -Note that if `threads` is not set by the user, it will be determined by -`multiprocessing.cpu_count()`. +Note that if ``threads`` is not set by the user, it will be determined by +``multiprocessing.cpu_count()``. .. code-block:: ipython diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index 0fcd6f4c9dd3..44cf7aa11013 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -9,38 +9,40 @@ NumPy 1.17.0 introduced `Generator` as an improved replacement for the :ref:`legacy ` `RandomState`. Here is a quick comparison of the two implementations. -================== ==================== ============= -Feature Older Equivalent Notes ------------------- -------------------- ------------- -`~.Generator` `~.RandomState` ``Generator`` requires a stream - source, called a `BitGenerator` - A number of these are provided. - ``RandomState`` uses - the Mersenne Twister `~.MT19937` by - default, but can also be instantiated - with any BitGenerator. ------------------- -------------------- ------------- -``random`` ``random_sample``, Access the values in a BitGenerator, - ``rand`` convert them to ``float64`` in the - interval ``[0.0.,`` `` 1.0)``. - In addition to the ``size`` kwarg, now - supports ``dtype='d'`` or ``dtype='f'``, - and an ``out`` kwarg to fill a user- - supplied array. - - Many other distributions are also - supported. ------------------- -------------------- ------------- -``integers`` ``randint``, Use the ``endpoint`` kwarg to adjust - ``random_integers`` the inclusion or exclusion of the - ``high`` interval endpoint -================== ==================== ============= +======================= ================== ============= +Feature Older Equivalent Notes +----------------------- ------------------ ------------- +`Generator` `RandomState` `Generator` requires a stream + source, called a `BitGenerator` + A number of these are provided. + `RandomState` uses the Mersenne + Twister `MT19937` by default, + but can also be instantiated + with any BitGenerator. +----------------------- ------------------ ------------- +`~.Generator.random` `random_sample`, Access the values in a + `rand` BitGenerator, convert them to + ``float64`` in the interval + ``[0.0., 1.0)``. In addition + to the ``size`` kwarg, now + supports ``dtype='d'`` or + ``dtype='f'``, and an ``out`` + kwarg to fill a user-supplied + array. + + Many other distributions are also + supported. +----------------------- ------------------ ------------- +`~.Generator.integers` `randint`, Use the ``endpoint`` kwarg to + `random_integers` adjust the inclusion or exclusion + of the ``high`` interval endpoint. +======================= ================== ============= * The normal, exponential and gamma generators use 256-step Ziggurat methods which are 2-10 times faster than NumPy's default implementation in `~.Generator.standard_normal`, `~.Generator.standard_exponential` or `~.Generator.standard_gamma`. Because of the change in algorithms, it is not - possible to reproduce the exact random values using ``Generator`` for these + possible to reproduce the exact random values using `Generator` for these distributions or any distribution method that relies on them. .. ipython:: python @@ -63,8 +65,8 @@ Feature Older Equivalent Notes * `~.Generator.integers` is now the canonical way to generate integer random numbers from a discrete uniform distribution. This replaces both - ``randint`` and the deprecated ``random_integers``. -* The ``rand`` and ``randn`` methods are only available through the legacy + `randint` and the deprecated `random_integers`. +* The `rand` and `randn` methods are only available through the legacy `~.RandomState`. * `Generator.random` is now the canonical way to generate floating-point random numbers, which replaces `RandomState.random_sample`, diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index af2aac82f480..892ceb3d1698 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -13,38 +13,42 @@ or distributed). ------------------------ NumPy allows you to spawn new (with very high probability) independent -`~BitGenerator` and `~Generator` instances via their ``spawn()`` method. -This spawning is implemented by the `~SeedSequence` used for initializing +`BitGenerator` and `Generator` instances via their ``spawn()`` method. +This spawning is implemented by the `SeedSequence` used for initializing the bit generators random stream. -`~SeedSequence` `implements an algorithm`_ to process a user-provided seed, +`SeedSequence` `implements an algorithm`_ to process a user-provided seed, typically as an integer of some size, and to convert it into an initial state for -a `~BitGenerator`. It uses hashing techniques to ensure that low-quality seeds +a `BitGenerator`. It uses hashing techniques to ensure that low-quality seeds are turned into high quality initial states (at least, with very high probability). -For example, `MT19937` has a state consisting of 624 -`uint32` integers. A naive way to take a 32-bit integer seed would be to just set +For example, `MT19937` has a state consisting of 624 ``uint32`` +integers. A naive way to take a 32-bit integer seed would be to just set the last element of the state to the 32-bit seed and leave the rest 0s. This is a valid state for `MT19937`, but not a good one. The Mersenne Twister algorithm `suffers if there are too many 0s`_. Similarly, two adjacent 32-bit integer seeds (i.e. ``12345`` and ``12346``) would produce very similar streams. -`~SeedSequence` avoids these problems by using successions of integer hashes +`SeedSequence` avoids these problems by using successions of integer hashes with good `avalanche properties`_ to ensure that flipping any bit in the input has about a 50% chance of flipping any bit in the output. Two input seeds that are very close to each other will produce initial states that are very far from each other (with very high probability). It is also constructed in such a way that you can provide arbitrary-sized integers or lists of integers. -`~SeedSequence` will take all of the bits that you provide and mix them -together to produce however many bits the consuming `~BitGenerator` needs to +`SeedSequence` will take all of the bits that you provide and mix them +together to produce however many bits the consuming `BitGenerator` needs to initialize itself. These properties together mean that we can safely mix together the usual -user-provided seed with simple incrementing counters to get `~BitGenerator` +user-provided seed with simple incrementing counters to get `BitGenerator` states that are (to very high probability) independent of each other. We can wrap this together into an API that is easy to use and difficult to misuse. +Note that while `SeedSequence` attempts to solve many of the issues related to +user-provided small seeds, we still :ref:`recommend` +using :py:func:`secrets.randbits` to generate seeds with 128 bits of entropy to +avoid the remaining biases introduced by human-chosen seeds. .. code-block:: python @@ -58,7 +62,7 @@ wrap this together into an API that is easy to use and difficult to misuse. .. end_block -For convenience the direct use of `~SeedSequence` is not necessary. +For convenience the direct use of `SeedSequence` is not necessary. The above ``streams`` can be spawned directly from a parent generator via `~Generator.spawn`: @@ -70,7 +74,7 @@ via `~Generator.spawn`: .. end_block Child objects can also spawn to make grandchildren, and so on. -Each child has a `~SeedSequence` with its position in the tree of spawned +Each child has a `SeedSequence` with its position in the tree of spawned child objects mixed in with the user-provided seed to generate independent (with very high probability) streams. @@ -88,7 +92,7 @@ Python has increasingly-flexible mechanisms for parallelization available, and this scheme fits in very well with that kind of use. Using this scheme, an upper bound on the probability of a collision can be -estimated if one knows the number of streams that you derive. `~SeedSequence` +estimated if one knows the number of streams that you derive. `SeedSequence` hashes its inputs, both the seed and the spawn-tree-path, down to a 128-bit pool by default. The probability that there is a collision in that pool, pessimistically-estimated ([1]_), will be about :math:`n^2*2^{-128}` where @@ -106,7 +110,7 @@ territory ([2]_). .. [2] In this calculation, we can mostly ignore the amount of numbers drawn from each stream. See :ref:`upgrading-pcg64` for the technical details about `PCG64`. The other PRNGs we provide have some extra protection built in - that avoids overlaps if the `~SeedSequence` pools differ in the + that avoids overlaps if the `SeedSequence` pools differ in the slightest bit. `PCG64DXSM` has :math:`2^{127}` separate cycles determined by the seed in addition to the position in the :math:`2^{128}` long period for each cycle, so one has to both get on or @@ -129,7 +133,7 @@ territory ([2]_). Sequence of integer seeds ------------------------- -As discussed in the previous section, `~SeedSequence` can not only take an +As discussed in the previous section, `SeedSequence` can not only take an integer seed, it can also take an arbitrary-length sequence of (non-negative) integers. If one exercises a little care, one can use this feature to design *ad hoc* schemes for getting safe parallel PRNG streams with similar safety @@ -160,7 +164,7 @@ integer in a list. This can be used to replace a number of unsafe strategies that have been used in the past which try to combine the root seed and the ID back into a single integer seed value. For example, it is common to see users add the worker ID to -the root seed, especially with the legacy `~RandomState` code. +the root seed, especially with the legacy `RandomState` code. .. code-block:: python @@ -249,13 +253,13 @@ are listed below. +-----------------+-------------------------+-------------------------+-------------------------+ | BitGenerator | Period | Jump Size | Bits per Draw | +=================+=========================+=========================+=========================+ -| MT19937 | :math:`2^{19937}-1` | :math:`2^{128}` | 32 | +| `MT19937` | :math:`2^{19937}-1` | :math:`2^{128}` | 32 | +-----------------+-------------------------+-------------------------+-------------------------+ -| PCG64 | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +| `PCG64` | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ -| PCG64DXSM | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +| `PCG64DXSM` | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ -| Philox | :math:`2^{256}` | :math:`2^{128}` | 64 | +| `Philox` | :math:`2^{256}` | :math:`2^{128}` | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ .. [3] The jump size is :math:`(\phi-1)*2^{128}` where :math:`\phi` is the diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst index 7fe383f24bdd..7043734f24c8 100644 --- a/doc/source/reference/random/performance.rst +++ b/doc/source/reference/random/performance.rst @@ -24,7 +24,7 @@ even on 32-bit processes, this is your choice. `MT19937` `fails some statistical tests`_ and is not especially fast compared to modern PRNGs. For these reasons, we mostly do not recommend -using it on its own, only through the legacy `~.RandomState` for +using it on its own, only through the legacy `RandomState` for reproducing old results. That said, it has a very long history as a default in many systems. diff --git a/doc/source/reference/random/upgrading-pcg64.rst b/doc/source/reference/random/upgrading-pcg64.rst index 79be8440ef5c..79432ac578f1 100644 --- a/doc/source/reference/random/upgrading-pcg64.rst +++ b/doc/source/reference/random/upgrading-pcg64.rst @@ -2,7 +2,7 @@ .. currentmodule:: numpy.random -Upgrading ``PCG64`` with ``PCG64DXSM`` +Upgrading `PCG64` with `PCG64DXSM` ====================================== Uses of the `PCG64` `BitGenerator` in a massively-parallel context have been diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 607a15b91f1e..2b1b5dac1710 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -416,10 +416,25 @@ Miscellanea ma.allequal ma.allclose + ma.amax + ma.amin ma.apply_along_axis ma.apply_over_axes ma.arange ma.choose + ma.compress_nd + ma.convolve + ma.correlate ma.ediff1d + ma.flatten_mask + ma.flatten_structured_array + ma.fromflex ma.indices + ma.left_shift + ma.ndim + ma.put + ma.putmask + ma.right_shift + ma.round_ + ma.take ma.where diff --git a/doc/source/reference/routines.strings.rst b/doc/source/reference/routines.strings.rst index 635a01fa1254..52bc210e3b86 100644 --- a/doc/source/reference/routines.strings.rst +++ b/doc/source/reference/routines.strings.rst @@ -31,9 +31,24 @@ String operations :toctree: generated/ add + center + capitalize + decode + encode + ljust + lower lstrip + mod + multiply + replace + rjust rstrip strip + swapcase + title + translate + upper + zfill Comparison ---------- @@ -60,11 +75,17 @@ String information count endswith find + index + isalnum isalpha isdecimal isdigit + islower isnumeric isspace + istitle + isupper rfind + rindex startswith str_len diff --git a/doc/source/reference/routines.version.rst b/doc/source/reference/routines.version.rst new file mode 100644 index 000000000000..72c48a752cf6 --- /dev/null +++ b/doc/source/reference/routines.version.rst @@ -0,0 +1,38 @@ +.. currentmodule:: numpy.version + +.. _routines.version: + +******************* +Version information +******************* + +The ``numpy.version`` submodule includes several constants that expose more +detailed information about the exact version of the installed ``numpy`` +package: + +.. data:: version + + Version string for the installed package - matches ``numpy.__version__``. + +.. data:: full_version + + Version string - the same as ``numpy.version.version``. + +.. data:: short_version + + Version string without any local build identifiers. + + .. rubric:: Examples + + >>> np.__version__ + '2.1.0.dev0+git20240319.2ea7ce0' # may vary + >>> np.version.short_version + '2.1.0.dev0' # may vary + +.. data:: git_revision + + String containing the git hash of the commit from which ``numpy`` was built. + +.. data:: release + + ``True`` if this version is a ``numpy`` release, ``False`` if a dev version. diff --git a/doc/source/release.rst b/doc/source/release.rst index 41eeac87bf64..155750b91d69 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,9 @@ Release notes .. toctree:: :maxdepth: 2 + 2.0.3 + 2.0.2 + 2.0.1 2.0.0 1.26.4 1.26.3 diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index e43e54fb9cbc..929bbab4f517 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -12,10 +12,10 @@ NumPy 2.0.0 Release Notes and those full notes should be complete (if not copy-edited well enough yet). -NumPy 2.0.0 is the first major release since 2006. It is the result of X months -of development since the last feature release by Y contributors, and contains a -large amount of exciting new features as well as a large amount of changes to -both the Python and C APIs. +NumPy 2.0.0 is the first major release since 2006. It is the result of 11 +months of development since the last feature release and is the work of 212 +contributors spread over 1078 pull requests. It contains a large number of +exciting new features as well as changes to both the Python and C APIs. This major release includes breaking changes that could not happen in a regular minor (feature) release - including an ABI break, changes to type promotion @@ -50,10 +50,13 @@ Highlights of this release include: that are about 3 times smaller, - `numpy.char` fixed-length string operations have been accelerated by implementing ufuncs that also support `~numpy.dtypes.StringDType` in - addition to the the fixed-length string dtypes, + addition to the fixed-length string dtypes, - A new tracing and introspection API, `~numpy.lib.introspect.opt_func_info`, to determine which hardware-specific kernels are available and will be dispatched to. + - `numpy.save` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. - Python API improvements: @@ -76,8 +79,8 @@ Highlights of this release include: - Improved behavior: - - Improvements to type promotion behavior was changed by adopting `NEP - 50 `_. This fixes many user surprises about promotions which + - Improvements to type promotion behavior was changed by adopting :ref:`NEP + 50 `. This fixes many user surprises about promotions which previously often depended on data values of input arrays rather than only their dtypes. Please see the NEP and the :ref:`numpy-2-migration-guide` for details as this change can lead to changes in output dtypes and lower @@ -88,7 +91,7 @@ Highlights of this release include: - Documentation: - - The reference guide navigation was signficantly improved, and there is now + - The reference guide navigation was significantly improved, and there is now documentation on NumPy's :ref:`module structure `, - The :ref:`building from source ` documentation was completely rewritten, @@ -112,7 +115,7 @@ API and behavior improvements and better future extensibility. This price is: 2. Breaking changes to the NumPy ABI. As a result, binaries of packages that use the NumPy C API and were built against a NumPy 1.xx release will not work with NumPy 2.0. On import, such packages will see an ``ImportError`` - with a message about binary incompatibiliy. + with a message about binary incompatibility. It is possible to build binaries against NumPy 2.0 that will work at runtime with both NumPy 2.0 and 1.x. See :ref:`numpy-2-abi-handling` for more details. @@ -149,6 +152,10 @@ NumPy 2.0 Python API removals (`gh-24321 `__) +* Warnings and exceptions present in `numpy.exceptions` (e.g, + `~numpy.exceptions.ComplexWarning`, + `~numpy.exceptions.VisibleDeprecationWarning`) are no longer exposed in the + main namespace. * Multiple niche enums, expired members and functions have been removed from the main namespace, such as: ``ERR_*``, ``SHIFT_*``, ``np.fastCopyAndTranspose``, ``np.kernel_version``, ``np.numarray``, ``np.oldnumeric`` and ``np.set_numeric_ops``. @@ -202,7 +209,8 @@ NumPy 2.0 Python API removals * ``np.tracemalloc_domain`` is now only available from ``np.lib``. -* ``np.recfromcsv`` and ``recfromtxt`` are now only available from ``np.lib.npyio``. +* ``np.recfromcsv`` and ``np.recfromtxt`` were removed from the main namespace. + Use ``np.genfromtxt`` with comma delimiter instead. * ``np.issctype``, ``np.maximum_sctype``, ``np.obj2sctype``, ``np.sctype2char``, ``np.sctypes``, ``np.issubsctype`` were all removed from the @@ -251,9 +259,9 @@ NumPy 2.0 Python API removals (`gh-25911 `__) + ``__array_prepare__`` is removed -------------------------------- - UFuncs called ``__array_prepare__`` before running computations for normal ufunc calls (not generalized ufuncs, reductions, etc.). The function was also called instead of ``__array_wrap__`` on the @@ -272,6 +280,15 @@ Deprecations * ``np.compat`` has been deprecated, as Python 2 is no longer supported. +* ``numpy.int8`` and similar classes will no longer support conversion of + out of bounds python integers to integer arrays. For example, + conversion of 255 to int8 will not return -1. + ``numpy.iinfo(dtype)`` can be used to check the machine limits for data types. + For example, ``np.iinfo(np.uint16)`` returns min = 0 and max = 65535. + + ``np.array(value).astype(dtype)`` will give the desired result. + + * ``np.safe_eval`` has been deprecated. ``ast.literal_eval`` should be used instead. (`gh-23830 `__) @@ -294,7 +311,7 @@ Deprecations support for implementations not accepting all three are deprecated. Its signature should be ``__array_wrap__(self, arr, context=None, return_scalar=False)`` - (`gh-25408 `__) + (`gh-25409 `__) * Arrays of 2-dimensional vectors for ``np.cross`` have been deprecated. Use arrays of 3-dimensional vectors instead. @@ -312,9 +329,9 @@ Deprecations (`gh-24978 `__) -`numpy.fft` deprecations for n-D transforms with None values in arguments -------------------------------------------------------------------------- +``numpy.fft`` deprecations for n-D transforms with None values in arguments +--------------------------------------------------------------------------- Using ``fftn``, ``ifftn``, ``rfftn``, ``irfftn``, ``fft2``, ``ifft2``, ``rfft2`` or ``irfft2`` with the ``s`` parameter set to a value that is not ``None`` and the ``axes`` parameter set to ``None`` has been deprecated, in @@ -330,9 +347,9 @@ axis, the ``s`` argument can be omitted. (`gh-25495 `__) + ``np.linalg.lstsq`` now defaults to a new ``rcond`` value --------------------------------------------------------- - `~numpy.linalg.lstsq` now uses the new rcond value of the machine precision times ``max(M, N)``. Previously, the machine precision was used but a FutureWarning was given to notify that this change will happen eventually. @@ -396,7 +413,6 @@ Compatibility notes ``loadtxt`` and ``genfromtxt`` default encoding changed ------------------------------------------------------- - ``loadtxt`` and ``genfromtxt`` now both default to ``encoding=None`` which may mainly modify how ``converters`` work. These will now be passed ``str`` rather than ``bytes``. Pass the @@ -406,48 +422,39 @@ unicode strings rather than bytes. (`gh-25158 `__) + ``f2py`` compatibility notes ---------------------------- +* ``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI + combinations. When more than one ``.pyf`` file is passed, an error is + raised. When both ``-m`` and a ``.pyf`` is passed, a warning is emitted and + the ``-m`` provided name is ignored. -``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI combinations. -When more than one ``.pyf`` file is passed, an error is raised. When both ``-m`` -and a ``.pyf`` is passed, a warning is emitted and the ``-m`` provided name is -ignored. + (`gh-25181 `__) -(`gh-25181 `__) +* The ``f2py.compile()`` helper has been removed because it leaked memory, has + been marked as experimental for several years now, and was implemented as a + thin ``subprocess.run`` wrapper. It was also one of the test bottlenecks. See + `gh-25122 `_ for the full + rationale. It also used several ``np.distutils`` features which are too + fragile to be ported to work with ``meson``. -The ``f2py.compile()`` helper has been removed because it leaked memory, has -been marked as experimental for several years now, and was implemented as a thin -``subprocess.run`` wrapper. It is also one of the test bottlenecks. See -`gh-25122 `_ for the full -rationale. It also used several ``np.distutils`` features which are too fragile -to be ported to work with ``meson``. +* Users are urged to replace calls to ``f2py.compile`` with calls to + ``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use + environment variables to interact with ``meson``. `Native files + `_ are also an option. -Users are urged to replace calls to ``f2py.compile`` with calls to -``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use -environment variables to interact with ``meson``. `Native files -`_ are also an option. + (`gh-25193 `__) -(`gh-25193 `__) - -``arange``'s ``start`` argument is positional-only --------------------------------------------------- -The first argument of ``arange`` is now positional only. This way, -specifying a ``start`` argument as a keyword, e.g. ``arange(start=0, stop=4)``, -raises a TypeError. Other behaviors, are unchanged so ``arange(stop=4)``, -``arange(2, stop=4)`` and so on, are still valid and have the same meaning as -before. - -(`gh-25336 `__) Minor changes in behavior of sorting functions ---------------------------------------------- - Due to algorithmic changes and use of SIMD code, sorting functions with methods that aren't stable may return slightly different results in 2.0.0 compared to 1.26.x. This includes the default method of `~numpy.argsort` and `~numpy.argpartition`. + Removed ambiguity when broadcasting in ``np.solve`` --------------------------------------------------- The broadcasting rules for ``np.solve(a, b)`` were ambiguous when ``b`` had 1 @@ -457,6 +464,7 @@ reconstructed by using ``np.solve(a, b[..., None])[..., 0]``. (`gh-25914 `__) + Modified representation for ``Polynomial`` ------------------------------------------ The representation method for `~numpy.polynomial.polynomial.Polynomial` was @@ -473,6 +481,7 @@ C API changes * The ``PyArray_CGT``, ``PyArray_CLT``, ``PyArray_CGE``, ``PyArray_CLE``, ``PyArray_CEQ``, ``PyArray_CNE`` macros have been removed. + * ``PyArray_MIN`` and ``PyArray_MAX`` have been moved from ``ndarraytypes.h`` to ``npy_math.h``. @@ -482,6 +491,7 @@ C API changes This includes functions for acquiring and releasing mutexes which lock access to the string data, as well as packing and unpacking UTF-8 bytestreams from array entries. + * ``NPY_NTYPES`` has been renamed to ``NPY_NTYPES_LEGACY`` as it does not include new NumPy built-in DTypes. In particular the new string DType will likely not work correctly with code that handles legacy DTypes. @@ -515,6 +525,7 @@ C API changes after including ``numpy/ndarrayobject.h`` as it requires ``import_array()``. This includes ``PyDataType_FLAGCHK``, ``PyDataType_REFCHK`` and ``NPY_BEGIN_THREADS_DESCR``. + * The dtype flags on ``PyArray_Descr`` must now be accessed through the ``PyDataType_FLAGS`` inline function to be compatible with both 1.x and 2.x. This function is defined in ``npy_2_compat.h`` to allow backporting. @@ -525,9 +536,9 @@ C API changes (`gh-25816 `__) + Datetime functionality exposed in the C API and Cython bindings --------------------------------------------------------------- - The functions ``NpyDatetime_ConvertDatetime64ToDatetimeStruct``, ``NpyDatetime_ConvertDatetimeStructToDatetime64``, ``NpyDatetime_ConvertPyDateTimeToDatetimeStruct``, @@ -538,9 +549,9 @@ external libraries. (`gh-21199 `__) + Const correctness for the generalized ufunc C API ------------------------------------------------- - The NumPy C API's functions for constructing generalized ufuncs (``PyUFunc_FromFuncAndData``, ``PyUFunc_FromFuncAndDataAndSignature``, ``PyUFunc_FromFuncAndDataAndSignatureAndIdentity``) take ``types`` and ``data`` @@ -553,9 +564,9 @@ code may be. (`gh-23847 `__) + Larger ``NPY_MAXDIMS`` and ``NPY_MAXARGS``, ``NPY_RAVEL_AXIS`` introduced ------------------------------------------------------------------------- - ``NPY_MAXDIMS`` is now 64, you may want to review its use. This is usually used in a stack allocation, where the increase should be safe. However, we do encourage generally to remove any use of ``NPY_MAXDIMS`` and @@ -566,9 +577,9 @@ replaced with ``NPY_RAVEL_AXIS``. See also :ref:`migration_maxdims`. (`gh-25149 `__) + ``NPY_MAXARGS`` not constant and ``PyArrayMultiIterObject`` size change ----------------------------------------------------------------------- - Since ``NPY_MAXARGS`` was increased, it is now a runtime constant and not compile-time constant anymore. We expect almost no users to notice this. But if used for stack allocations @@ -581,9 +592,9 @@ to avoid issues with Cython. (`gh-25271 `__) + Required changes for custom legacy user dtypes ---------------------------------------------- - In order to improve our DTypes it is unfortunately necessary to break the ABI, which requires some changes for dtypes registered with ``PyArray_RegisterDataType``. @@ -592,9 +603,9 @@ to adapt your code and achieve compatibility with both 1.x and 2.x. (`gh-25792 `__) + New Public DType API -------------------- - The C implementation of the NEP 42 DType API is now public. While the DType API has shipped in NumPy for a few versions, it was only usable in sessions with a special environment variable set. It is now possible to write custom DTypes @@ -608,9 +619,9 @@ be updated to work correctly with new DTypes. (`gh-25754 `__) + New C-API import functions -------------------------- - We have now added ``PyArray_ImportNumPyAPI`` and ``PyUFunc_ImportUFuncAPI`` as static inline functions to import the NumPy C-API tables. The new functions have two advantages over ``import_array`` and @@ -659,6 +670,7 @@ NumPy 2.0 C API removals have been removed. We recommend querying ``PyErr_CheckSignals()`` or ``PyOS_InterruptOccurred()`` periodically (these do currently require holding the GIL though). + * The ``noprefix.h`` header has been removed. Replace missing symbols with their prefixed counterparts (usually an added ``NPY_`` or ``npy_``). @@ -712,56 +724,58 @@ NumPy 2.0 C API removals * ``PyArrayFlags_Type`` and ``PyArray_NewFlagsObject`` as well as ``PyArrayFlagsObject`` are private now. There is no known use-case; use the Python API if needed. + * ``PyArray_MoveInto``, ``PyArray_CastTo``, ``PyArray_CastAnyTo`` are removed use ``PyArray_CopyInto`` and if absolutely needed ``PyArray_CopyAnyInto`` (the latter does a flat copy). -* ``PyArray_FillObjectArray`` is removed, its only true use is for + +* ``PyArray_FillObjectArray`` is removed, its only true use was for implementing ``np.empty``. Create a new empty array or use ``PyArray_FillWithScalar()`` (decrefs existing objects). + * ``PyArray_CompareUCS4`` and ``PyArray_CompareString`` are removed. Use the standard C string comparison functions. + * ``PyArray_ISPYTHON`` is removed as it is misleading, has no known use-cases, and is easy to replace. + * ``PyArray_FieldNames`` is removed, as it is unclear what it would be useful for. It also has incorrect semantics in some possible use-cases. + * ``PyArray_TypestrConvert`` is removed, since it seems a misnomer and unlikely to be used by anyone. If you know the size or are limited to few types, just use it explicitly, otherwise go via Python strings. (`gh-25292 `__) - -* ``PyDataType_GetDatetimeMetaData`` has been removed, it did not actually +* ``PyDataType_GetDatetimeMetaData`` is removed, it did not actually do anything since at least NumPy 1.7. (`gh-25802 `__) -``PyArray_GetCastFunc`` was removed ------------------------------------ +* ``PyArray_GetCastFunc`` is removed. Note that custom legacy user dtypes + can still provide a castfunc as their implementation, but any access to them + is now removed. The reason for this is that NumPy never used these + internally for many years. If you use simple numeric types, please just use + C casts directly. In case you require an alternative, please let us know so + we can create new API such as ``PyArray_CastBuffer()`` which could use old or + new cast functions depending on the NumPy version. -Note that custom legacy user dtypes can still provide a castfunc -as their implementation, but any access to them is now removed. -The reason for this is that NumPy never used these internally -for many years. -If you use simple numeric types, please just use C casts directly. -In case you require an alternative, please let us know so we can -create new API such as ``PyArray_CastBuffer()`` which could -use old or new cast functions depending on the NumPy version. - -(`gh-25161 `__) + (`gh-25161 `__) New Features ============ -* ``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +---------------------------------------------------------------------- (`gh-24858 `__) + A new ``bitwise_count`` function -------------------------------- - This new function counts the number of 1-bits in a number. `~numpy.bitwise_count` works on all the numpy integer types and integer-like objects. @@ -775,9 +789,9 @@ integer-like objects. (`gh-19355 `__) + macOS Accelerate support, including the ILP64 --------------------------------------------- - Support for the updated Accelerate BLAS/LAPACK library, including ILP64 (64-bit integer) support, in macOS 13.3 has been added. This brings arm64 support, and significant performance improvements of up to 10x for commonly used linear @@ -792,18 +806,18 @@ PyPI will get wheels built against Accelerate rather than OpenBLAS. (`gh-25255 `__) + Option to use weights for quantile and percentile functions ----------------------------------------------------------- - A ``weights`` keyword is now available for `~numpy.quantile`, `~numpy.percentile`, `~numpy.nanquantile` and `~numpy.nanpercentile`. Only ``method="inverted_cdf"`` supports weights. (`gh-24254 `__) + Improved CPU optimization tracking ---------------------------------- - A new tracer mechanism is available which enables tracking of the enabled targets for each optimized function (i.e., that uses hardware-specific SIMD instructions) in the NumPy library. With this enhancement, it becomes possible @@ -817,9 +831,9 @@ and data type signatures. (`gh-24420 `__) + A new Meson backend for ``f2py`` -------------------------------- - ``f2py`` in compile mode (i.e. ``f2py -c``) now accepts the ``--backend meson`` option. This is the default option for Python >=3.12. For older Python versions, ``f2py`` will still default to ``--backend distutils``. @@ -832,9 +846,9 @@ There are no changes for users of ``f2py`` only as a code generator, i.e. withou (`gh-24532 `__) + ``bind(c)`` support for ``f2py`` -------------------------------- - Both functions and subroutines can be annotated with ``bind(c)``. ``f2py`` will handle both the correct type mapping, and preserve the unique label for other C interfaces. @@ -846,9 +860,9 @@ Fortran. (`gh-24555 `__) + A new ``strict`` option for several testing functions ----------------------------------------------------- - The ``strict`` keyword is now available for `~numpy.testing.assert_allclose`, `~numpy.testing.assert_equal`, and `~numpy.testing.assert_array_less`. Setting ``strict=True`` will disable the broadcasting behaviour for scalars @@ -858,6 +872,7 @@ and ensure that input arrays have the same data type. `gh-24770 `__, `gh-24775 `__) + Add ``np.core.umath.find`` and ``np.core.umath.rfind`` UFuncs ------------------------------------------------------------- Add two ``find`` and ``rfind`` UFuncs that operate on unicode or byte strings @@ -866,9 +881,9 @@ and are used in ``np.char``. They operate similar to ``str.find`` and (`gh-24868 `__) -``diagonal`` and ``trace`` for `numpy.linalg` ---------------------------------------------- +``diagonal`` and ``trace`` for ``numpy.linalg`` +----------------------------------------------- `numpy.linalg.diagonal` and `numpy.linalg.trace` have been added, which are array API standard-compatible variants of `numpy.diagonal` and `numpy.trace`. They differ in the default axis selection which define 2-D @@ -876,18 +891,18 @@ sub-arrays. (`gh-24887 `__) + New ``long`` and ``ulong`` dtypes --------------------------------- - `numpy.long` and `numpy.ulong` have been added as NumPy integers mapping to C's ``long`` and ``unsigned long``. Prior to NumPy 1.24, ``numpy.long`` was an alias to Python's ``int``. (`gh-24922 `__) -``svdvals`` for `numpy.linalg` ------------------------------- +``svdvals`` for ``numpy.linalg`` +-------------------------------- `numpy.linalg.svdvals` has been added. It computes singular values for (a stack of) matrices. Executing ``np.svdvals(x)`` is the same as calling ``np.svd(x, compute_uv=False, hermitian=False)``. @@ -895,25 +910,25 @@ This function is compatible with the array API standard. (`gh-24940 `__) + A new ``isdtype`` function -------------------------- - `numpy.isdtype` was added to provide a canonical way to classify NumPy's dtypes in compliance with the array API standard. (`gh-25054 `__) + A new ``astype`` function ------------------------- - `numpy.astype` was added to provide an array API standard-compatible alternative to the `numpy.ndarray.astype` method. (`gh-25079 `__) + Array API compatible functions' aliases --------------------------------------- - 13 aliases for existing functions were added to improve compatibility with the array API standard: * Trigonometry: ``acos``, ``acosh``, ``asin``, ``asinh``, ``atan``, ``atanh``, ``atan2``. @@ -926,9 +941,9 @@ Array API compatible functions' aliases (`gh-25086 `__) + New ``unique_*`` functions -------------------------- - The `~numpy.unique_all`, `~numpy.unique_counts`, `~numpy.unique_inverse`, and `~numpy.unique_values` functions have been added. They provide functionality of `~numpy.unique` with different sets of flags. They are array API @@ -938,9 +953,9 @@ compilation. (`gh-25088 `__) + Matrix transpose support for ndarrays ------------------------------------- - NumPy now offers support for calculating the matrix transpose of an array (or stack of arrays). The matrix transpose is equivalent to swapping the last two axes of an array. Both ``np.ndarray`` and ``np.ma.MaskedArray`` now expose a @@ -949,9 +964,9 @@ function. (`gh-23762 `__) + Array API compatible functions for ``numpy.linalg`` --------------------------------------------------- - Six new functions and two aliases were added to improve compatibility with the Array API standard for `numpy.linalg`: @@ -980,18 +995,18 @@ the Array API standard for `numpy.linalg`: (`gh-25145 `__) + A ``correction`` argument for ``var`` and ``std`` ------------------------------------------------- - A ``correction`` argument was added to `~numpy.var` and `~numpy.std`, which is an array API standard compatible alternative to ``ddof``. As both arguments serve a similar purpose, only one of them can be provided at the same time. (`gh-25169 `__) + ``ndarray.device`` and ``ndarray.to_device`` -------------------------------------------- - An ``ndarray.device`` attribute and ``ndarray.to_device`` method were added to ``numpy.ndarray`` for array API standard compatibility. @@ -1004,9 +1019,9 @@ For all these new arguments, only ``device="cpu"`` is supported. (`gh-25233 `__) + StringDType has been added to NumPy ----------------------------------- - We have added a new variable-width UTF-8 encoded string data type, implementing a "NumPy array of Python strings", including support for a user-provided missing data sentinel. It is intended as a drop-in replacement for arrays of Python @@ -1016,9 +1031,9 @@ documentation ` for more details. (`gh-25347 `__) + New keywords for ``cholesky`` and ``pinv`` ------------------------------------------ - The ``upper`` and ``rtol`` keywords were added to `numpy.linalg.cholesky` and `numpy.linalg.pinv`, respectively, to improve array API standard compatibility. @@ -1028,9 +1043,9 @@ the future. (`gh-25388 `__) + New keywords for ``sort``, ``argsort`` and ``linalg.matrix_rank`` ----------------------------------------------------------------- - New keyword parameters were added to improve array API standard compatibility: * ``rtol`` was added to `~numpy.linalg.matrix_rank`. @@ -1039,9 +1054,9 @@ New keyword parameters were added to improve array API standard compatibility: (`gh-25437 `__) + New ``numpy.strings`` namespace for string ufuncs ------------------------------------------------- - NumPy now implements some string operations as ufuncs. The old ``np.char`` namespace is still available, and where possible the string manipulation functions in that namespace have been updated to use the new ufuncs, @@ -1053,9 +1068,9 @@ instead of ``np.char``. In the future we may deprecate ``np.char`` in favor of (`gh-25463 `__) -`numpy.fft` support for different precisions and in-place calculations ----------------------------------------------------------------------- +``numpy.fft`` support for different precisions and in-place calculations +------------------------------------------------------------------------ The various FFT routines in `numpy.fft` now do their calculations natively in float, double, or long double precision, depending on the input precision, instead of always calculating in double precision. Hence, the calculation will @@ -1067,9 +1082,9 @@ for in-place calculations. (`gh-25536 `__) + configtool and pkg-config support --------------------------------- - A new ``numpy-config`` CLI script is available that can be queried for the NumPy version and for compile flags needed to use the NumPy C API. This will allow build systems to better support the use of NumPy as a dependency. @@ -1079,9 +1094,9 @@ find its location for use with ``PKG_CONFIG_PATH``, use (`gh-25730 `__) + Array API standard support in the main namespace ------------------------------------------------ - The main ``numpy`` namespace now supports the array API standard. See :ref:`array-api-standard-compatibility` for details. @@ -1090,40 +1105,41 @@ The main ``numpy`` namespace now supports the array API standard. See Improvements ============ -* Strings are now supported by ``any``, ``all``, and the logical ufuncs. +Strings are now supported by ``any``, ``all``, and the logical ufuncs. +---------------------------------------------------------------------- (`gh-25651 `__) + Integer sequences as the shape argument for ``memmap`` ------------------------------------------------------ - `numpy.memmap` can now be created with any integer sequence as the ``shape`` argument, such as a list or numpy array of integers. Previously, only the types of tuple and int could be used without raising an error. (`gh-23729 `__) + ``errstate`` is now faster and context safe ------------------------------------------- - The `numpy.errstate` context manager/decorator is now faster and safer. Previously, it was not context safe and had (rare) issues with thread-safety. (`gh-23936 `__) + AArch64 quicksort speed improved by using Highway's VQSort ---------------------------------------------------------- - The first introduction of the Google Highway library, using VQSort on AArch64. Execution time is improved by up to 16x in some cases, see the PR for benchmark results. Extensions to other platforms will be done in the future. (`gh-24018 `__) + Complex types - underlying C type changes ----------------------------------------- - * The underlying C types for all of NumPy's complex types have been changed to use C99 complex types. @@ -1149,9 +1165,9 @@ Complex types - underlying C type changes (`gh-24085 `__) + ``iso_c_binding`` support and improved common blocks for ``f2py`` ----------------------------------------------------------------- - Previously, users would have to define their own custom ``f2cmap`` file to use type mappings defined by the Fortran2003 ``iso_c_binding`` intrinsic module. These type maps are now natively supported by ``f2py`` @@ -1164,27 +1180,27 @@ modules. This further expands the usability of intrinsics like (`gh-25186 `__) + Call ``str`` automatically on third argument to functions like ``assert_equal`` ------------------------------------------------------------------------------- - The third argument to functions like `~numpy.testing.assert_equal` now has ``str`` called on it automatically. This way it mimics the built-in ``assert`` statement, where ``assert_equal(a, b, obj)`` works like ``assert a == b, obj``. (`gh-24877 `__) + Support for array-like ``atol``/``rtol`` in ``isclose``, ``allclose`` --------------------------------------------------------------------- - The keywords ``atol`` and ``rtol`` in `~numpy.isclose` and `~numpy.allclose` now accept both scalars and arrays. An array, if given, must broadcast to the shapes of the first two array arguments. (`gh-24878 `__) + Consistent failure messages in test functions --------------------------------------------- - Previously, some `numpy.testing` assertions printed messages that referred to the actual and desired results as ``x`` and ``y``. Now, these values are consistently referred to as ``ACTUAL`` and @@ -1192,9 +1208,9 @@ Now, these values are consistently referred to as ``ACTUAL`` and (`gh-24931 `__) + n-D FFT transforms allow ``s[i] == -1`` --------------------------------------- - The `~numpy.fft.fftn`, `~numpy.fft.ifftn`, `~numpy.fft.rfftn`, `~numpy.fft.irfftn`, `~numpy.fft.fft2`, `~numpy.fft.ifft2`, `~numpy.fft.rfft2` and `~numpy.fft.irfft2` functions now use the whole input array along the axis @@ -1202,9 +1218,9 @@ and `~numpy.fft.irfft2` functions now use the whole input array along the axis (`gh-25495 `__) + Guard PyArrayScalar_VAL and PyUnicodeScalarObject for the limited API --------------------------------------------------------------------- - ``PyUnicodeScalarObject`` holds a ``PyUnicodeObject``, which is not available when using ``Py_LIMITED_API``. Add guards to hide it and consequently also make the ``PyArrayScalar_VAL`` macro hidden. @@ -1222,6 +1238,7 @@ Changes * Being fully context and thread-safe, ``np.errstate`` can only be entered once now. + * ``np.setbufsize`` is now tied to ``np.errstate()``: leaving an ``np.errstate`` context will also reset the ``bufsize``. @@ -1248,9 +1265,9 @@ Changes (`gh-25816 `__) + Representation of NumPy scalars changed --------------------------------------- - As per :ref:`NEP 51 `, the scalar representation has been updated to include the type information to avoid confusion with Python scalars. @@ -1268,9 +1285,9 @@ to facilitate updates. (`gh-22449 `__) + Truthiness of NumPy strings changed ----------------------------------- - NumPy strings previously were inconsistent about how they defined if the string is ``True`` or ``False`` and the definition did not match the one used by Python. @@ -1298,9 +1315,9 @@ The change does affect ``np.fromregex`` as it uses direct assignments. (`gh-23871 `__) + A ``mean`` keyword was added to var and std function ---------------------------------------------------- - Often when the standard deviation is needed the mean is also needed. The same holds for the variance and the mean. Until now the mean is then calculated twice, the change introduced here for the `~numpy.var` and `~numpy.std` functions @@ -1309,18 +1326,18 @@ docstrings for details and an example illustrating the speed-up. (`gh-24126 `__) + Remove datetime64 deprecation warning when constructing with timezone --------------------------------------------------------------------- - The `numpy.datetime64` method now issues a UserWarning rather than a DeprecationWarning whenever a timezone is included in the datetime string that is provided. (`gh-24193 `__) + Default integer dtype is now 64-bit on 64-bit Windows ----------------------------------------------------- - The default NumPy integer is now 64-bit on all 64-bit systems as the historic 32-bit default on Windows was a common source of issues. Most users should not notice this. The main issues may occur with code interfacing with libraries @@ -1329,6 +1346,7 @@ written in a compiled language like C. For more information see (`gh-24224 `__) + Renamed ``numpy.core`` to ``numpy._core`` ----------------------------------------- Accessing ``numpy.core`` now emits a DeprecationWarning. In practice @@ -1349,9 +1367,9 @@ the ``NPY_RELAXED_STRIDES_DEBUG`` environment variable or the (`gh-24717 `__) + Redefinition of ``np.intp``/``np.uintp`` (almost never a change) ---------------------------------------------------------------- - Due to the actual use of these types almost always matching the use of ``size_t``/``Py_ssize_t`` this is now the definition in C. Previously, it matched ``intptr_t`` and ``uintptr_t`` which would often @@ -1371,24 +1389,25 @@ However, it means that: (`gh-24888 `__) + ``numpy.fft.helper`` made private --------------------------------- - ``numpy.fft.helper`` was renamed to ``numpy.fft._helper`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.fft`. (`gh-24945 `__) + ``numpy.linalg.linalg`` made private ------------------------------------ - ``numpy.linalg.linalg`` was renamed to ``numpy.linalg._linalg`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.linalg`. (`gh-24946 `__) + Out-of-bound axis not the same as ``axis=None`` ----------------------------------------------- In some cases ``axis=32`` or for concatenate any large value @@ -1401,9 +1420,9 @@ Any out of bound axis value will now error, make sure to use .. _copy-keyword-changes-2.0: + New ``copy`` keyword meaning for ``array`` and ``asarray`` constructors ----------------------------------------------------------------------- - Now `numpy.array` and `numpy.asarray` support three values for ``copy`` parameter: * ``None`` - A copy will only be made if it is necessary. @@ -1414,9 +1433,9 @@ The meaning of ``False`` changed as it now raises an exception if a copy is need (`gh-25168 `__) + The ``__array__`` special method now takes a ``copy`` keyword argument. ----------------------------------------------------------------------- - NumPy will pass ``copy`` to the ``__array__`` special method in situations where it would be set to a non-default value (e.g. in a call to ``np.asarray(some_object, copy=False)``). Currently, if an @@ -1428,9 +1447,9 @@ argument with the same meaning as when passed to `numpy.array` or (`gh-25168 `__) + Cleanup of initialization of ``numpy.dtype`` with strings with commas --------------------------------------------------------------------- - The interpretation of strings with commas is changed slightly, in that a trailing comma will now always create a structured dtype. E.g., where previously ``np.dtype("i")`` and ``np.dtype("i,")`` were treated as identical, @@ -1447,9 +1466,9 @@ case for initializations without a comma, like ``np.dtype("(2)i")``. (`gh-25434 `__) + Change in how complex sign is calculated ---------------------------------------- - Following the array API standard, the complex sign is now calculated as ``z / |z|`` (instead of the rather less logical case where the sign of the real part was taken, unless the real part was zero, in which case @@ -1458,9 +1477,9 @@ zero is returned if ``z==0``. (`gh-25441 `__) + Return types of functions that returned a list of arrays -------------------------------------------------------- - Functions that returned a list of ndarrays have been changed to return a tuple of ndarrays instead. Returning tuples consistently whenever a sequence of arrays is returned makes it easier for JIT compilers like Numba, as well as for @@ -1469,20 +1488,26 @@ functions are: `~numpy.atleast_1d`, `~numpy.atleast_2d`, `~numpy.atleast_3d`, `~numpy.broadcast_arrays`, `~numpy.meshgrid`, `~numpy.ogrid`, `~numpy.histogramdd`. + ``np.unique`` ``return_inverse`` shape for multi-dimensional inputs ------------------------------------------------------------------- - When multi-dimensional inputs are passed to ``np.unique`` with ``return_inverse=True``, the ``unique_inverse`` output is now shaped such that the input can be reconstructed directly using ``np.take(unique, unique_inverse)`` when ``axis=None``, and ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. -(`gh-25553 `__, +.. note:: + This change was reverted in 2.0.1 except for ``axis=None``. The correct + reconstruction is always ``np.take(unique, unique_inverse, axis=axis)``. + When 2.0.0 needs to be supported, add ``unique_inverse.reshape(-1)`` + to code. + +(`gh-25553 `__, `gh-25570 `__) + ``any`` and ``all`` return booleans for object arrays ----------------------------------------------------- - The ``any`` and ``all`` functions and methods now return booleans also for object arrays. Previously, they did a reduction which behaved like the Python ``or`` and @@ -1492,8 +1517,16 @@ to achieve the previous behavior. (`gh-25712 `__) +``np.can_cast`` cannot be called on Python int, float, or complex +----------------------------------------------------------------- +``np.can_cast`` cannot be called with Python int, float, or complex instances +anymore. This is because NEP 50 means that the result of ``can_cast`` must +not depend on the value passed in. +Unfortunately, for Python scalars whether a cast should be considered +``"same_kind"`` or ``"safe"`` may depend on the context and value so that +this is currently not implemented. +In some cases, this means you may have to add a specific path for: +``if type(obj) in (int, float, complex): ...``. +(`gh-26393 `__) -**Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.0.1-notes.rst b/doc/source/release/2.0.1-notes.rst new file mode 100644 index 000000000000..a49f2ee36abd --- /dev/null +++ b/doc/source/release/2.0.1-notes.rst @@ -0,0 +1,74 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.1 Release Notes +========================== + +NumPy 2.0.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.0 release. NumPy 2.0.1 is the last planned +release in the 2.0.x series, 2.1.0rc1 should be out shortly. + +The Python versions supported by this release are 3.9-3.12. + +Improvements +============ + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API diff --git a/doc/source/release/2.0.2-notes.rst b/doc/source/release/2.0.2-notes.rst new file mode 100644 index 000000000000..ae5c26250ba7 --- /dev/null +++ b/doc/source/release/2.0.2-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.2 Release Notes +========================== + +NumPy 2.0.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.1 release. + +The Python versions supported by this release are 3.9-3.12. + + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types + diff --git a/doc/source/release/2.0.3-notes.rst b/doc/source/release/2.0.3-notes.rst new file mode 100644 index 000000000000..6573d76291b3 --- /dev/null +++ b/doc/source/release/2.0.3-notes.rst @@ -0,0 +1,11 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.3 Release Notes +========================== + +NumPy 2.0.3 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.2 release. + +The Python versions supported by this release are 3.9-3.12. + diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 93b009628571..730dd0ef7c1f 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -235,7 +235,7 @@ only one "data type". The data type is recorded in the ``dtype`` attribute. >>> a.dtype dtype('int64') # "int" for integer, "64" for 64-bit -ref:`Read more about array attributes here ` and learn about +:ref:`Read more about array attributes here ` and learn about :ref:`array objects here `. How to create a basic array diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 29b9eae06481..daea7474aa1a 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -23,6 +23,10 @@ example that has rather narrow utility but illustrates the concepts involved. ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) Our custom array can be instantiated like: @@ -85,6 +89,10 @@ For this example we will only handle the method ``__call__`` ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': @@ -136,6 +144,10 @@ conveniently by inheriting from the mixin ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': @@ -174,6 +186,10 @@ functions to our custom variants. ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index e0faf0c052c9..ca0c39d7081f 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -113,6 +113,8 @@ We can check that ``arr`` and ``new_arr`` share the same data buffer: array([1000, 2, 3, 4]) +.. _dunder_array.interface: + The ``__array__()`` method ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 38baa28c7307..6b1aca65ed00 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -182,21 +182,16 @@ site-packages directory. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + import numpy as np - from distutils.core import setup, Extension + module1 = Extension('spam', sources=['spammodule.c']) - module1 = Extension('spam', sources=['spammodule.c'], - include_dirs=['/usr/local/lib']) - - setup(name = 'spam', - version='1.0', - description='This is my spam package', - ext_modules = [module1]) + setup(name='spam', version='1.0', ext_modules=[module1]) Once the spam module is imported into python, you can call logit @@ -355,8 +350,8 @@ using ``python setup.py build_ext --inplace``. ''' setup.py file for single_type_logit.c Note that since this is a numpy extension - we use numpy.distutils instead of - distutils from the python standard library. + we add an include_dirs=[get_include()] so that the + extension is built with numpy's C/C++ header files. Calling $python setup.py build_ext --inplace @@ -373,33 +368,26 @@ using ``python setup.py build_ext --inplace``. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org and the documentation - on numpy.distutils for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + from numpy import get_include - def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('npufunc_directory', - parent_package, - top_path) - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) - return config + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) -0.0 +np.float64(0.0) >>> a = np.linspace(0,1,5) >>> npufunc.logit(a) array([ -inf, -1.09861229, 0. , 1.09861229, inf]) @@ -607,8 +595,10 @@ or installed to site-packages via ``python setup.py install``. ''' setup.py file for multi_type_logit.c Note that since this is a numpy extension - we use numpy.distutils instead of - distutils from the python standard library. + we add an include_dirs=[get_include()] so that the + extension is built with numpy's C/C++ header files. + Furthermore, we also have to include the npymath + lib for half-float d-type. Calling $python setup.py build_ext --inplace @@ -625,38 +615,31 @@ or installed to site-packages via ``python setup.py install``. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org and the documentation - on numpy.distutils for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + from numpy import get_include + from os import path - def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration, get_info - - #Necessary for the half-float d-type. - info = get_info('npymath') + path_to_npymath = path.join(get_include(), '..', 'lib') + npufunc = Extension('npufunc', + sources=['multi_type_logit.c'], + include_dirs=[get_include()], + # Necessary for the half-float d-type. + library_dirs=[path_to_npymath], + libraries=["npymath"]) - config = Configuration('npufunc_directory', - parent_package, - top_path) - config.add_extension('npufunc', - ['multi_type_logit.c'], - extra_info=info) + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) -0.0 +np.float64(0.0) >>> a = np.linspace(0,1,5) >>> npufunc.logit(a) array([ -inf, -1.09861229, 0. , 1.09861229, inf]) @@ -678,13 +661,17 @@ the line .. code-block:: python - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) is replaced with .. code-block:: python - config.add_extension('npufunc', ['multi_arg_logit.c']) + npufunc = Extension('npufunc', + sources=['multi_arg_logit.c'], + include_dirs=[get_include()]) The C file is given below. The ufunc generated takes two arguments ``A`` and ``B``. It returns a tuple whose first element is ``A * B`` and whose second @@ -809,13 +796,17 @@ the line .. code-block:: python - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) is replaced with .. code-block:: python - config.add_extension('npufunc', ['add_triplet.c']) + npufunc = Extension('npufunc', + sources=['add_triplet.c'], + include_dirs=[get_include()]) The C file is given below. @@ -892,7 +883,7 @@ The C file is given below. NULL }; - PyMODINIT_FUNC PyInit_struct_ufunc_test(void) + PyMODINIT_FUNC PyInit_npufunc(void) { PyObject *m, *add_triplet, *d; PyObject *dtype_dict; diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index 9b3a71fa40bb..ca4abcd13746 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -327,7 +327,7 @@ created with NumPy 1.26. Convert from a pandas DataFrame to a NumPy array ================================================ -See :meth:`pandas.DataFrame.to_numpy`. +See :meth:`pandas.Series.to_numpy`. Save/restore using `~numpy.ndarray.tofile` and `~numpy.fromfile` ================================================================ diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 232f9f7e2bf2..adbc9d898846 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -183,6 +183,34 @@ that usually works is to upgrade the NumPy version:: pip install numpy --upgrade + +Downstream ImportError or AttributeError +======================================== + +If you see a message such as:: + + A module that was compiled using NumPy 1.x cannot be run in + NumPy 2.0.0 as it may crash. To support both 1.x and 2.x + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. + +Either as an ``ImportError`` or with:: + + AttributeError: _ARRAY_API not found + +Then you are using NumPy 2 together with a module that was build with NumPy 1. +NumPy 2 made some changes that require rebuilding such modules to avoid +possibly incorrect results or crashes. + +As the error message suggests, the easiest solution is likely to downgrade +NumPy to `numpy<2`. +Alternatively, you can search the traceback (from the back) to find the first +line that isn't inside NumPy to see which module needs to be updated. + +NumPy 2 was released in the first half of 2024 and especially smaller +modules downstream are expected need time to adapt and publish a new version. + + Segfaults or crashes ==================== diff --git a/environment.yml b/environment.yml index 0690d6fdac6c..7e347bccb6c9 100644 --- a/environment.yml +++ b/environment.yml @@ -17,7 +17,7 @@ dependencies: - pkg-config - meson-python - pip - - spin + - spin=0.8 # Unpin when spin 0.9.1 is released - ccache # For testing - pytest @@ -26,7 +26,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.7.1 + - mypy=1.10.0 # For building docs - sphinx>=4.5.0 - sphinx-design @@ -35,8 +35,9 @@ dependencies: - scipy - pandas - matplotlib - - pydata-sphinx-theme=0.13.3 + - pydata-sphinx-theme>=0.15.2 - doxygen + - towncrier # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 1afbe3d8ebd0..0270f0ee988f 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -68,36 +68,28 @@ cdef extern from "numpy/arrayobject.h": ctypedef long double npy_float128 ctypedef struct npy_cfloat: - float real - float imag + pass ctypedef struct npy_cdouble: - double real - double imag + pass ctypedef struct npy_clongdouble: - long double real - long double imag + pass ctypedef struct npy_complex64: - float real - float imag + pass ctypedef struct npy_complex128: - double real - double imag + pass ctypedef struct npy_complex160: - long double real - long double imag + pass ctypedef struct npy_complex192: - long double real - long double imag + pass ctypedef struct npy_complex256: - long double real - long double imag + pass ctypedef struct PyArray_Dims: npy_intp *ptr @@ -291,11 +283,11 @@ cdef extern from "numpy/arrayobject.h": cdef int type_num @property - cdef inline npy_intp itemsize(self) nogil: + cdef inline npy_intp itemsize(self) noexcept nogil: return PyDataType_ELSIZE(self) @property - cdef inline npy_intp alignment(self) nogil: + cdef inline npy_intp alignment(self) noexcept nogil: return PyDataType_ALIGNMENT(self) # Use fields/names with care as they may be NULL. You must check @@ -312,11 +304,11 @@ cdef extern from "numpy/arrayobject.h": # valid (the pointer can be NULL). Most users should access # this field via the inline helper method PyDataType_SHAPE. @property - cdef inline PyArray_ArrayDescr* subarray(self) nogil: + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: return PyDataType_SUBARRAY(self) @property - cdef inline npy_uint64 flags(self) nogil: + cdef inline npy_uint64 flags(self) noexcept nogil: """The data types flags.""" return PyDataType_FLAGS(self) @@ -328,32 +320,32 @@ cdef extern from "numpy/arrayobject.h": ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: @property - cdef inline int numiter(self) nogil: + cdef inline int numiter(self) noexcept nogil: """The number of arrays that need to be broadcast to the same shape.""" return PyArray_MultiIter_NUMITER(self) @property - cdef inline npy_intp size(self) nogil: + cdef inline npy_intp size(self) noexcept nogil: """The total broadcasted size.""" return PyArray_MultiIter_SIZE(self) @property - cdef inline npy_intp index(self) nogil: + cdef inline npy_intp index(self) noexcept nogil: """The current (1-d) index into the broadcasted result.""" return PyArray_MultiIter_INDEX(self) @property - cdef inline int nd(self) nogil: + cdef inline int nd(self) noexcept nogil: """The number of dimensions in the broadcasted result.""" return PyArray_MultiIter_NDIM(self) @property - cdef inline npy_intp* dimensions(self) nogil: + cdef inline npy_intp* dimensions(self) noexcept nogil: """The shape of the broadcasted result.""" return PyArray_MultiIter_DIMS(self) @property - cdef inline void** iters(self) nogil: + cdef inline void** iters(self) noexcept nogil: """An array of iterator objects that holds the iterators for the arrays to be broadcast together. On return, the iterators are adjusted for broadcasting.""" return PyArray_MultiIter_ITERS(self) @@ -371,7 +363,7 @@ cdef extern from "numpy/arrayobject.h": # Instead, we use properties that map to the corresponding C-API functions. @property - cdef inline PyObject* base(self) nogil: + cdef inline PyObject* base(self) noexcept nogil: """Returns a borrowed reference to the object owning the data/memory. """ return PyArray_BASE(self) @@ -383,13 +375,13 @@ cdef extern from "numpy/arrayobject.h": return PyArray_DESCR(self) @property - cdef inline int ndim(self) nogil: + cdef inline int ndim(self) noexcept nogil: """Returns the number of dimensions in the array. """ return PyArray_NDIM(self) @property - cdef inline npy_intp *shape(self) nogil: + cdef inline npy_intp *shape(self) noexcept nogil: """Returns a pointer to the dimensions/shape of the array. The number of elements matches the number of dimensions of the array (ndim). Can return NULL for 0-dimensional arrays. @@ -397,20 +389,20 @@ cdef extern from "numpy/arrayobject.h": return PyArray_DIMS(self) @property - cdef inline npy_intp *strides(self) nogil: + cdef inline npy_intp *strides(self) noexcept nogil: """Returns a pointer to the strides of the array. The number of elements matches the number of dimensions of the array (ndim). """ return PyArray_STRIDES(self) @property - cdef inline npy_intp size(self) nogil: + cdef inline npy_intp size(self) noexcept nogil: """Returns the total size (in number of elements) of the array. """ return PyArray_SIZE(self) @property - cdef inline char* data(self) nogil: + cdef inline char* data(self) noexcept nogil: """The pointer to the data buffer as a char*. This is provided for legacy reasons to avoid direct struct field access. For new code that needs this access, you probably want to cast the result @@ -562,7 +554,6 @@ cdef extern from "numpy/arrayobject.h": object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) void PyArray_FILLWBYTE(ndarray, int val) - npy_intp PyArray_REFCOUNT(object) object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) bint PyArray_EquivByteorders(int b1, int b2) nogil @@ -808,11 +799,10 @@ ctypedef npy_double float_t ctypedef npy_double double_t ctypedef npy_longdouble longdouble_t -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t cdef inline object PyArray_MultiIterNew1(a): return PyArray_MultiIterNew(1, a) @@ -851,6 +841,7 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + cdef extern from "numpy/arrayscalars.h": # abstract types @@ -1016,7 +1007,7 @@ cdef extern from "numpy/ufuncobject.h": int _import_umath() except -1 -cdef inline void set_array_base(ndarray arr, object base): +cdef inline void set_array_base(ndarray arr, object base) except *: Py_INCREF(base) # important to do this before stealing the reference below! PyArray_SetBaseObject(arr, base) @@ -1047,7 +1038,7 @@ cdef inline int import_ufunc() except -1: raise ImportError("numpy._core.umath failed to import") -cdef inline bint is_timedelta64_object(object obj): +cdef inline bint is_timedelta64_object(object obj) noexcept: """ Cython equivalent of `isinstance(obj, np.timedelta64)` @@ -1062,7 +1053,7 @@ cdef inline bint is_timedelta64_object(object obj): return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) -cdef inline bint is_datetime64_object(object obj): +cdef inline bint is_datetime64_object(object obj) noexcept: """ Cython equivalent of `isinstance(obj, np.datetime64)` @@ -1077,7 +1068,7 @@ cdef inline bint is_datetime64_object(object obj): return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) -cdef inline npy_datetime get_datetime64_value(object obj) nogil: +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: """ returns the int64 value underlying scalar numpy datetime64 object @@ -1087,14 +1078,14 @@ cdef inline npy_datetime get_datetime64_value(object obj) nogil: return (obj).obval -cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: """ returns the int64 value underlying scalar numpy timedelta64 object """ return (obj).obval -cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: """ returns the unit part of the dtype for a numpy datetime64 object. """ diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 096714f6d7cd..aebb71fffa9c 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -16,13 +16,27 @@ from cpython.buffer cimport PyObject_GetBuffer from cpython.type cimport type cimport libc.stdio as stdio + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + cdef extern from "Python.h": ctypedef int Py_intptr_t bint PyObject_TypeCheck(object obj, PyTypeObject* type) cdef extern from "numpy/arrayobject.h": - ctypedef Py_intptr_t npy_intp - ctypedef size_t npy_uintp + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp ctypedef unsigned char npy_bool @@ -63,36 +77,28 @@ cdef extern from "numpy/arrayobject.h": ctypedef long double npy_float128 ctypedef struct npy_cfloat: - float real - float imag + pass ctypedef struct npy_cdouble: - double real - double imag + pass ctypedef struct npy_clongdouble: - long double real - long double imag + pass ctypedef struct npy_complex64: - float real - float imag + pass ctypedef struct npy_complex128: - double real - double imag + pass ctypedef struct npy_complex160: - long double real - long double imag + pass ctypedef struct npy_complex192: - long double real - long double imag + pass ctypedef struct npy_complex256: - long double real - long double imag + pass ctypedef struct PyArray_Dims: npy_intp *ptr @@ -154,7 +160,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP - NPY_DEFAULT_INT + NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: NPY_ANYORDER @@ -350,7 +356,10 @@ cdef extern from "numpy/arrayobject.h": PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 npy_intp PyArray_ITEMSIZE(ndarray) nogil int PyArray_TYPE(ndarray arr) nogil @@ -371,7 +380,6 @@ cdef extern from "numpy/arrayobject.h": bint PyTypeNum_ISOBJECT(int) nogil npy_intp PyDataType_ELSIZE(dtype) nogil - void PyDataType_SET_ELSIZE(dtype, npy_intp) nogil npy_intp PyDataType_ALIGNMENT(dtype) nogil PyObject* PyDataType_METADATA(dtype) nogil PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil @@ -501,6 +509,12 @@ cdef extern from "numpy/arrayobject.h": void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil # Functions from __multiarray_api.h @@ -700,11 +714,10 @@ ctypedef npy_double float_t ctypedef npy_double double_t ctypedef npy_longdouble longdouble_t -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t cdef inline object PyArray_MultiIterNew1(a): return PyArray_MultiIterNew(1, a) @@ -939,13 +952,6 @@ cdef inline int import_ufunc() except -1: except Exception: raise ImportError("numpy._core.umath failed to import") -cdef extern from *: - # Leave a marker that the NumPy declarations came from this file - # See https://github.com/cython/cython/issues/3573 - """ - /* NumPy API declarations from "numpy/__init__.pxd" */ - """ - cdef inline bint is_timedelta64_object(object obj): """ @@ -999,3 +1005,137 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: returns the unit part of the dtype for a numpy datetime64 object. """ return (obj).obmeta.base + + +# Iterator API added in v1.6 +ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil +ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 51103aaa991f..fb29a758dce5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2817,6 +2817,8 @@ class bool(generic): __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] +bool_ = bool + class object_(generic): def __init__(self, value: object = ..., /) -> None: ... @property diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index 4ee6e00bbd65..defc704c41eb 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -70,6 +70,9 @@ def git_version(version): # For NumPy 2.0, this should only have one field: `version` template = textwrap.dedent(f''' + """ + Module to expose more detailed version info for the installed `numpy` + """ version = "{version}" __version__ = version full_version = version diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 0a0322ae292a..df1eed2f1fb8 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -948,6 +948,8 @@ the other requirements (``dtype``, ``order``, etc.). For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. Default: ``None``. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -2943,16 +2945,20 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ - a.__array__([dtype], /, *, copy=None) + a.__array__([dtype], *, copy=None) - For ``dtype`` parameter it returns either a new reference to self if - ``dtype`` is not given or a new array of provided data type if ``dtype`` + For ``dtype`` parameter it returns a new reference to self if + ``dtype`` is not given or it matches array's data type. + A new array of provided data type is returned if ``dtype`` is different from the current data type of the array. For ``copy`` parameter it returns a new reference to self if ``copy=False`` or ``copy=None`` and copying isn't enforced by ``dtype`` parameter. The method returns a new array for ``copy=True``, regardless of ``dtype`` parameter. + A more detailed explanation of the ``__array__`` interface + can be found in :ref:`dunder_array.interface`. + """)) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 8d6dc04851b5..058e93644dec 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -560,7 +560,7 @@ def _view_is_safe(oldtype, newtype): return if newtype.hasobject or oldtype.hasobject: - raise TypeError("Cannot change data-type for object array.") + raise TypeError("Cannot change data-type for array of references.") return diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index 13f39a11cb9b..80a59e7b3f52 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -93,9 +93,10 @@ # Building `sctypes` #################### -sctypes = {"int": [], "uint": [], "float": [], "complex": [], "others": []} +sctypes = {"int": set(), "uint": set(), "float": set(), + "complex": set(), "others": set()} -for type_info in set(typeinfo.values()): +for type_info in typeinfo.values(): if type_info.kind in ["M", "m"]: # exclude timedelta and datetime continue @@ -108,9 +109,11 @@ ("others", ma.generic) ]: if issubclass(concrete_type, abstract_type): - sctypes[type_group].append(concrete_type) + sctypes[type_group].add(concrete_type) break # sort sctype groups by bitsize -for sctype_list in sctypes.values(): +for sctype_key in sctypes.keys(): + sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) + sctypes[sctype_key] = sctype_list diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index ec11beae3f58..fbc19f3f2a6e 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -217,6 +217,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, by not inserting spaces after commas that separate fields and after colons. + If set to ``'1.25'`` approximates printing of 1.25 which mainly means + that numeric scalars are printed without their type information, e.g. + as ``3.0`` rather than ``np.float64(3.0)``. + If set to `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward @@ -224,6 +228,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, .. versionadded:: 1.14.0 .. versionchanged:: 1.22.0 + .. versionchanged:: 2.0 See Also -------- diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 27e42bcb4c14..6c348c3e0c6a 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -159,7 +159,7 @@ def __str__(self): return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr) def api_hash(self): - m = hashlib.md5() + m = hashlib.md5(usedforsecurity=False) m.update(remove_whitespace(self.return_type)) m.update('\000') m.update(self.name) @@ -532,7 +532,9 @@ def fullapi_hash(api_dicts): a.extend(name) a.extend(','.join(map(str, data))) - return hashlib.md5(''.join(a).encode('ascii')).hexdigest() + return hashlib.md5( + ''.join(a).encode('ascii'), usedforsecurity=False + ).hexdigest() # To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and # checksum a 128 bits md5 checksum (hex format as well) @@ -554,7 +556,7 @@ def main(): tagname = sys.argv[1] order_file = sys.argv[2] functions = get_api_functions(tagname, order_file) - m = hashlib.md5(tagname) + m = hashlib.md5(tagname, usedforsecurity=False) for func in functions: print(func) ah = func.api_hash() diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index d69725e581aa..9e86c9499546 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -227,6 +227,7 @@ def do_generate_api(targets, sources): # Check multiarray api indexes multiarray_api_index = genapi.merge_api_dicts(multiarray_api) + unused_index_max = max(multiarray_api_index.get("__unused_indices__", 0)) genapi.check_api_dict(multiarray_api_index) numpyapi_list = genapi.get_api_functions('NUMPY_API', @@ -278,6 +279,10 @@ def do_generate_api(targets, sources): init_list.append(api_item.array_api_define()) module_list.append(api_item.internal_define()) + # In case we end with a "hole", append more NULLs + while len(init_list) <= unused_index_max: + init_list.append(" NULL") + # Write to header s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) genapi.write_file(header_file, s) diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index 7dbaeff4940b..30e2222e557e 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -94,6 +94,7 @@ def get_annotations(): # NOTE: The Slots 320-360 are defined in `_experimental_dtype_api.h` # and filled explicitly outside the code generator as the metaclass # makes them tricky to expose. (This may be refactored.) + # Slot 366, 367, 368 are the abstract DTypes # End 2.0 API } @@ -107,7 +108,8 @@ def get_annotations(): 103, 115, 117, 122, 163, 164, 171, 173, 197, 201, 202, 208, 219, 220, 221, 222, 223, 278, 291, 293, 294, 295, 301] - + list(range(320, 361)) # range reserved DType class slots + # range/slots reserved DType classes (see _public_dtype_api_table.h): + + list(range(320, 361)) + [366, 367, 368] ), 'PyArray_GetNDArrayCVersion': (0,), # Unused slot 40, was `PyArray_SetNumericOps` diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 6a8946be3dee..864bc4420451 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3618,8 +3618,10 @@ def add_newdoc(place, name, doc): Notes ----- There is more than one definition of sign in common use for complex - numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}` - which is different from a common alternative, :math:`x/|x|`. + numbers. The definition used here, :math:`x/|x|`, is the more common + and useful one, but is different from the one used in numpy prior to + version 2.0, :math:`x/\\sqrt{x*x}`, which is equivalent to + ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. Examples -------- @@ -4448,7 +4450,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4477,7 +4479,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4507,7 +4509,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4537,7 +4539,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 96dec7543101..44754a747cec 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -24,6 +24,14 @@ from numpy._core import overrides from numpy.strings import * from numpy.strings import multiply as strings_multiply +from numpy._core.strings import ( + _partition as partition, + _rpartition as rpartition, + _split as split, + _rsplit as rsplit, + _splitlines as splitlines, + _join as join, +) __all__ = [ 'equal', 'not_equal', 'greater_equal', 'less_equal', diff --git a/numpy/_core/include/numpy/_numpyconfig.h.in b/numpy/_core/include/numpy/_numpyconfig.h.in index 0491877e3164..79b2ee3449a5 100644 --- a/numpy/_core/include/numpy/_numpyconfig.h.in +++ b/numpy/_core/include/numpy/_numpyconfig.h.in @@ -17,9 +17,11 @@ #mesondefine NPY_SIZEOF_PY_LONG_LONG #mesondefine NPY_SIZEOF_LONGLONG -#mesondefine NPY_USE_C99_FORMATS - -#mesondefine NPY_NO_SIGNAL +/* + * Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines + * in this header) for better cross-compilation, so don't rename them without a + * good reason. + */ #mesondefine NPY_NO_SMP #mesondefine NPY_VISIBILITY_HIDDEN diff --git a/numpy/_core/include/numpy/_public_dtype_api_table.h b/numpy/_core/include/numpy/_public_dtype_api_table.h index 5fbbdd785e4e..51f390540627 100644 --- a/numpy/_core/include/numpy/_public_dtype_api_table.h +++ b/numpy/_core/include/numpy/_public_dtype_api_table.h @@ -4,6 +4,9 @@ * * These definitions are only relevant for the public API and we reserve * the slots 320-360 in the API table generation for this (currently). + * + * TODO: This file should be consolidated with the API table generation + * (although not sure the current generation is worth preserving). */ #ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ #define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ @@ -61,17 +64,21 @@ /* Object/Void */ #define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33]) #define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34]) -/* Abstract */ -#define PyArray_PyIntAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) -#define PyArray_PyFloatAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) -#define PyArray_PyComplexAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Python types (used as markers for scalars) */ +#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) +#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) +#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Default integer type */ #define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38]) /* New non-legacy DTypes follow in the order they were added */ #define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39]) -/* NOTE: offset 40 is free, after that a new range will need to be used */ + +/* NOTE: offset 40 is free */ + +/* Need to start with a larger offset again for the abstract classes: */ +#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366]) +#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367]) +#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368]) #endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */ diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index f21d0e6558f3..9dd3effa3a80 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -106,7 +106,7 @@ typedef struct PyArrayMethod_Context_tag { struct PyArrayMethodObject_tag *method; /* Operand descriptors, filled in by resolve_descriptors */ - PyArray_Descr **descriptors; + PyArray_Descr *const *descriptors; /* Structure may grow (this is harmless for DType authors) */ } PyArrayMethod_Context; @@ -159,9 +159,9 @@ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( /* "method" is currently opaque (necessary e.g. to wrap Python) */ struct PyArrayMethodObject_tag *method, /* DTypes the method was created for */ - PyArray_DTypeMeta **dtypes, + PyArray_DTypeMeta *const *dtypes, /* Input descriptors (instances). Outputs may be NULL. */ - PyArray_Descr **given_descrs, + PyArray_Descr *const *given_descrs, /* Exact loop descriptors to use, must not hold references on error */ PyArray_Descr **loop_descrs, npy_intp *view_offset); @@ -177,9 +177,9 @@ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( */ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)( struct PyArrayMethodObject_tag *method, - PyArray_DTypeMeta **dtypes, + PyArray_DTypeMeta *const *dtypes, /* Unlike above, these can have any DType and we may allow NULL. */ - PyArray_Descr **given_descrs, + PyArray_Descr *const *given_descrs, /* * Input scalars or NULL. Only ever passed for python scalars. * WARNING: In some cases, a loop may be explicitly selected and the @@ -227,7 +227,7 @@ typedef int (PyArrayMethod_GetLoop)( */ typedef int (PyArrayMethod_GetReductionInitial)( PyArrayMethod_Context *context, npy_bool reduction_is_empty, - char *initial); + void *initial); /* * The following functions are only used by the wrapping array method defined @@ -256,8 +256,8 @@ typedef int (PyArrayMethod_GetReductionInitial)( * `resolve_descriptors`, so that it can be filled there if not NULL.) */ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, - PyArray_DTypeMeta *wrapped_dtypes[], - PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]); + PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]); /** * The function to convert the actual loop descriptors (as returned by the @@ -278,7 +278,7 @@ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, * @returns 0 on success, -1 on failure. */ typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, - PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[], PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); @@ -303,7 +303,7 @@ typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, * */ typedef int (PyArrayMethod_TraverseLoop)( - void *traverse_context, PyArray_Descr *descr, char *data, + void *traverse_context, const PyArray_Descr *descr, char *data, npy_intp size, npy_intp stride, NpyAuxData *auxdata); @@ -317,7 +317,7 @@ typedef int (PyArrayMethod_TraverseLoop)( * */ typedef int (PyArrayMethod_GetTraverseLoop)( - void *traverse_context, PyArray_Descr *descr, + void *traverse_context, const PyArray_Descr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -334,7 +334,7 @@ typedef int (PyArrayMethod_GetTraverseLoop)( * (There are potential use-cases, these are currently unsupported.) */ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]); /* @@ -449,7 +449,7 @@ typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)( static inline PyArray_DTypeMeta * NPY_DT_NewRef(PyArray_DTypeMeta *o) { - Py_INCREF(o); + Py_INCREF((PyObject *)o); return o; } diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 31aa3e4d330e..95821b0baff2 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -8,8 +8,8 @@ #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP +/* Always allow threading unless it was explicitly disabled at build time */ +#if !NPY_NO_SMP #define NPY_ALLOW_THREADS 1 #else #define NPY_ALLOW_THREADS 0 @@ -1298,9 +1298,16 @@ typedef struct { * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS * to be runtime dependent. */ -#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD - PyArrayIterObject *iters[64]; /* 64 is NPY_MAXARGS */ -#else /* not internal build */ +#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + PyArrayIterObject *iters[64]; +#elif defined(__cplusplus) + /* + * C++ doesn't stricly support flexible members and gives compilers + * warnings (pedantic only), so we lie. We can't make it 64 because + * then Cython is unhappy (larger struct at runtime is OK smaller not). + */ + PyArrayIterObject *iters[32]; +#else PyArrayIterObject *iters[]; #endif } PyArrayMultiIterObject; diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 1d6d512f95b5..2f1555397faf 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -46,7 +46,7 @@ #error "The NumPy 2 compat header requires `import_array()` for which " \ "the `ndarraytypes.h` header include is not sufficient. Please " \ "include it after `numpy/ndarrayobject.h` or similar.\n" \ - "To simplify includsion, you may use `PyArray_ImportNumPy()` " \ + "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \ "which is defined in the compat header and is lightweight (can be)." #endif @@ -74,7 +74,7 @@ #ifdef import_array1 static inline int -PyArray_ImportNumPyAPI() +PyArray_ImportNumPyAPI(void) { if (NPY_UNLIKELY(PyArray_API == NULL)) { import_array1(-1); @@ -125,7 +125,7 @@ PyArray_ImportNumPyAPI() #define NPY_DEFAULT_INT \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) #define NPY_RAVEL_AXIS \ - (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? -1 : 32) + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) #define NPY_MAXARGS \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) #endif @@ -220,19 +220,19 @@ DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) #if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return _PyDataType_GetArrFuncs(descr); } #elif NPY_ABI_VERSION < 0x02000000 static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return descr->f; } #else static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { return _PyDataType_GetArrFuncs(descr); diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 9fb3f6b3f51f..d11bee123286 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -375,6 +375,7 @@ typedef struct #include + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; typedef _Fcomplex npy_cfloat; diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 5679b826aa6e..d5b2c6bed41b 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -105,7 +105,7 @@ if use_highway highway_lib = static_library('highway', [ # required for hwy::Abort symbol - 'src/highway/hwy/targets.cc' + 'src/highway/hwy/abort.cc' ], cpp_args: '-DTOOLCHAIN_MISS_ASM_HWCAP_H', include_directories: ['src/highway'], @@ -488,18 +488,10 @@ endif if cc.has_header('sys/endian.h') cdata.set10('NPY_HAVE_SYS_ENDIAN_H', true) endif -if is_windows - cdata.set10('NPY_NO_SIGNAL', true) -endif -# Command-line switch; distutils build checked for `NPY_NOSMP` env var instead -# TODO: document this (search for NPY_NOSMP in C API docs) +# Build-time option to disable threading is stored and exposed in numpyconfig.h +# Note: SMP is an old acronym for threading (Symmetric/Shared-memory MultiProcessing) cdata.set10('NPY_NO_SMP', get_option('disable-threading')) -# Check whether we can use inttypes (C99) formats -if cc.has_header_symbol('inttypes.h', 'PRIdPTR') - cdata.set10('NPY_USE_C99_FORMATS', true) -endif - visibility_hidden = '' if cc.has_function_attribute('visibility:hidden') and host_machine.system() != 'cygwin' visibility_hidden = '__attribute__((visibility("hidden")))' @@ -1203,8 +1195,7 @@ py.extension_module('_multiarray_umath', src_numpy_api[1], # __multiarray_api.h src_umath_doc_h, npy_math_internal_h, - ], - objects: svml_objects, + ] + svml_objects, c_args: c_args_common, cpp_args: cpp_args_common, include_directories: [ @@ -1213,6 +1204,7 @@ py.extension_module('_multiarray_umath', 'src/multiarray', 'src/npymath', 'src/umath', + 'src/highway' ], dependencies: [blas_dep], link_with: [npymath_lib, multiarray_umath_mtargets.static_lib('_multiarray_umath_mtargets')] + highway_lib, diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 429620da5359..d310b12d143f 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1565,6 +1565,12 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Supports full broadcasting of the inputs. + Dimension-2 input arrays were deprecated in 2.0.0. If you do need this + functionality, you can use:: + + def cross2d(x, y): + return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] + Examples -------- Vector cross-product. @@ -2142,7 +2148,7 @@ def base_repr(number, base=2, padding=0): elif base < 2: raise ValueError("Bases less than 2 not handled in base_repr.") - num = abs(number) + num = abs(int(number)) res = [] while num: res.append(digits[num % base]) @@ -2497,17 +2503,17 @@ def array_equal(a1, a2, equal_nan=False): if a1.shape != a2.shape: return False if not equal_nan: - return builtins.bool((a1 == a2).all()) - cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) - and _dtype_cannot_hold_nan(a2.dtype)) - if cannot_have_nan: - if a1 is a2: - return True - return builtins.bool((a1 == a2).all()) + return builtins.bool((asanyarray(a1 == a2)).all()) if a1 is a2: # nan will compare equal so an array will compare equal to itself. return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + # Handling NaN values if equal_nan is True a1nan, a2nan = isnan(a1), isnan(a2) # NaN's occur at different locations @@ -2566,7 +2572,7 @@ def array_equiv(a1, a2): except Exception: return False - return builtins.bool((a1 == a2).all()) + return builtins.bool(asanyarray(a1 == a2).all()) def _astype_dispatcher(x, dtype, /, *, copy=None): diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index a24c368cbd08..0ad6b8fcc71a 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,3 +1,4 @@ +import sys from collections.abc import Callable, Sequence from typing import ( Any, diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 602ba9a051dd..ac52cff49db2 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -360,7 +360,11 @@ def issubsctype(arg1, arg2): return issubclass(obj2sctype(arg1), obj2sctype(arg2)) -def _preprocess_dtype(dtype, err_msg): +class _PreprocessDTypeError(Exception): + pass + + +def _preprocess_dtype(dtype): """ Preprocess dtype argument by: 1. fetching type from a data type @@ -369,7 +373,7 @@ def _preprocess_dtype(dtype, err_msg): if isinstance(dtype, ma.dtype): dtype = dtype.type if isinstance(dtype, ndarray) or dtype not in allTypes.values(): - raise TypeError(f"{err_msg}, but it is a {type(dtype)}.") + raise _PreprocessDTypeError() return dtype @@ -414,9 +418,13 @@ def isdtype(dtype, kind): True """ - dtype = _preprocess_dtype( - dtype, err_msg="dtype argument must be a NumPy dtype" - ) + try: + dtype = _preprocess_dtype(dtype) + except _PreprocessDTypeError: + raise TypeError( + "dtype argument must be a NumPy dtype, " + f"but it is a {type(dtype)}." + ) from None input_kinds = kind if isinstance(kind, tuple) else (kind,) @@ -440,12 +448,20 @@ def isdtype(dtype, kind): sctypes["int"] + sctypes["uint"] + sctypes["float"] + sctypes["complex"] ) - else: - kind = _preprocess_dtype( - kind, - err_msg="kind argument must be comprised of " - "NumPy dtypes or strings only" + elif isinstance(kind, str): + raise ValueError( + "kind argument is a string, but" + f" {repr(kind)} is not a known kind name." ) + else: + try: + kind = _preprocess_dtype(kind) + except _PreprocessDTypeError: + raise TypeError( + "kind argument must be comprised of " + "NumPy dtypes or strings only, " + f"but is a {type(kind)}." + ) from None processed_kinds.add(kind) return dtype in processed_kinds diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 79755e09bb40..8bdeec15c6d2 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -583,7 +583,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, ``arrayList[0]``. formats, names, titles, aligned, byteorder : If `dtype` is ``None``, these arguments are passed to - `numpy.format_parser` to construct a dtype. See that function for + `numpy.rec.format_parser` to construct a dtype. See that function for detailed documentation. Returns diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 6424f100f8c5..e7de3d10c521 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -174,7 +174,7 @@ def fromrecords( dtype: None = ..., shape: None | _ShapeLike = ..., *, - formats: DTypeLike, + formats: DTypeLike = ..., names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., diff --git a/numpy/_core/src/common/python_xerbla.c b/numpy/_core/src/common/python_xerbla.c index 37a41408be22..71a4c81edbf1 100644 --- a/numpy/_core/src/common/python_xerbla.c +++ b/numpy/_core/src/common/python_xerbla.c @@ -28,22 +28,16 @@ CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info) char buf[sizeof(format) + 6 + 4]; /* 6 for name, 4 for param. num. */ int len = 0; /* length of subroutine name*/ -#ifdef WITH_THREAD PyGILState_STATE save; -#endif while( len<6 && srname[len]!='\0' ) len++; while( len && srname[len-1]==' ' ) len--; -#ifdef WITH_THREAD save = PyGILState_Ensure(); -#endif PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info); PyErr_SetString(PyExc_ValueError, buf); -#ifdef WITH_THREAD PyGILState_Release(save); -#endif return 0; } diff --git a/numpy/_core/src/common/simd/avx512/avx512.h b/numpy/_core/src/common/simd/avx512/avx512.h index aa6abe256424..2a4a20b2970d 100644 --- a/numpy/_core/src/common/simd/avx512/avx512.h +++ b/numpy/_core/src/common/simd/avx512/avx512.h @@ -11,6 +11,8 @@ // Enough limit to allow us to use _mm512_i32gather_* and _mm512_i32scatter_* #define NPY_SIMD_MAXLOAD_STRIDE32 (0x7fffffff / 16) #define NPY_SIMD_MAXSTORE_STRIDE32 (0x7fffffff / 16) +#define NPY_SIMD_MAXLOAD_STRIDE64 (0x7fffffff / 16) +#define NPY_SIMD_MAXSTORE_STRIDE64 (0x7fffffff / 16) typedef __m512i npyv_u8; typedef __m512i npyv_s8; diff --git a/numpy/_core/src/common/simd/avx512/conversion.h b/numpy/_core/src/common/simd/avx512/conversion.h index 474aee446b6a..3b29b6729f20 100644 --- a/numpy/_core/src/common/simd/avx512/conversion.h +++ b/numpy/_core/src/common/simd/avx512/conversion.h @@ -131,20 +131,44 @@ npyv_pack_b8_b64(npyv_b64 a, npyv_b64 b, npyv_b64 c, npyv_b64 d, __mmask16 gh = _mm512_kunpackb((__mmask16)h, (__mmask16)g); return npyv_pack_b8_b32(ab, cd, ef, gh); } - +/* + * A compiler bug workaround on Intel Compiler Classic. + * The bug manifests specifically when the + * scalar result of _cvtmask64_u64 is compared against the constant -1. This + * comparison uniquely triggers a bug under conditions of equality (==) or + * inequality (!=) checks, which are typically used in reduction operations like + * np.logical_or. + * + * The underlying issue arises from the compiler's optimizer. When the last + * vector comparison instruction operates on zmm, the optimizer erroneously + * emits a duplicate of this instruction but on the lower half register ymm. It + * then performs a bitwise XOR operation between the mask produced by this + * duplicated instruction and the mask from the original comparison instruction. + * This erroneous behavior leads to incorrect results. + * + * See https://github.com/numpy/numpy/issues/26197#issuecomment-2056750975 + */ +#ifdef __INTEL_COMPILER +#define NPYV__VOLATILE_CVTMASK64 volatile +#else +#define NPYV__VOLATILE_CVTMASK64 +#endif // convert boolean vectors to integer bitfield -NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) -{ +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) { #ifdef NPY_HAVE_AVX512BW_MASK - return (npy_uint64)_cvtmask64_u64(a); + npy_uint64 NPYV__VOLATILE_CVTMASK64 t = (npy_uint64)_cvtmask64_u64(a); + return t; #elif defined(NPY_HAVE_AVX512BW) - return (npy_uint64)a; + npy_uint64 NPYV__VOLATILE_CVTMASK64 t = (npy_uint64)a; + return t; #else int mask_lo = _mm256_movemask_epi8(npyv512_lower_si256(a)); int mask_hi = _mm256_movemask_epi8(npyv512_higher_si256(a)); return (unsigned)mask_lo | ((npy_uint64)(unsigned)mask_hi << 32); #endif } +#undef NPYV__VOLATILE_CVTMASK64 + NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) { #ifdef NPY_HAVE_AVX512BW_MASK diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 58b52a717469..5975f5ef76c3 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 58b52a717469e62b2d9b8eaa2f5dddb44d4a4cbf +Subproject commit 5975f5ef76c3e4364844d869454046f0f8420ef8 diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 3142411b2b61..8d00084f0efe 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -21,7 +21,7 @@ int_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls)) } static PyArray_Descr * -discover_descriptor_from_pyint( +discover_descriptor_from_pylong( PyArray_DTypeMeta *NPY_UNUSED(cls), PyObject *obj) { assert(PyLong_Check(obj)); @@ -85,33 +85,49 @@ discover_descriptor_from_pycomplex( NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes() { - ((PyTypeObject *)&PyArray_PyIntAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyIntAbstractDType.scalar_type = &PyLong_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyIntAbstractDType) < 0) { + if (PyType_Ready((PyTypeObject *)&PyArray_IntAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyFloatAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyFloatAbstractDType.scalar_type = &PyFloat_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatAbstractDType) < 0) { + if (PyType_Ready((PyTypeObject *)&PyArray_FloatAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyComplexAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyComplexAbstractDType.scalar_type = &PyComplex_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexAbstractDType) < 0) { + if (PyType_Ready((PyTypeObject *)&PyArray_ComplexAbstractDType) < 0) { + return -1; + } + /* + * Delayed assignments to avoid "error C2099: initializer is not a constant" + * in windows compilers. Can hopefully be done in structs in the future. + */ + ((PyTypeObject *)&PyArray_PyLongDType)->tp_base = + (PyTypeObject *)&PyArray_IntAbstractDType; + PyArray_PyLongDType.scalar_type = &PyLong_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyLongDType) < 0) { + return -1; + } + ((PyTypeObject *)&PyArray_PyFloatDType)->tp_base = + (PyTypeObject *)&PyArray_FloatAbstractDType; + PyArray_PyFloatDType.scalar_type = &PyFloat_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatDType) < 0) { + return -1; + } + ((PyTypeObject *)&PyArray_PyComplexDType)->tp_base = + (PyTypeObject *)&PyArray_ComplexAbstractDType; + PyArray_PyComplexDType.scalar_type = &PyComplex_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexDType) < 0) { return -1; } /* Register the new DTypes for discovery */ if (_PyArray_MapPyTypeToDType( - &PyArray_PyIntAbstractDType, &PyLong_Type, NPY_FALSE) < 0) { + &PyArray_PyLongDType, &PyLong_Type, NPY_FALSE) < 0) { return -1; } if (_PyArray_MapPyTypeToDType( - &PyArray_PyFloatAbstractDType, &PyFloat_Type, NPY_FALSE) < 0) { + &PyArray_PyFloatDType, &PyFloat_Type, NPY_FALSE) < 0) { return -1; } if (_PyArray_MapPyTypeToDType( - &PyArray_PyComplexAbstractDType, &PyComplex_Type, NPY_FALSE) < 0) { + &PyArray_PyComplexDType, &PyComplex_Type, NPY_FALSE) < 0) { return -1; } @@ -205,7 +221,7 @@ float_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return NPY_DT_NewRef(&PyArray_DoubleDType); } } - else if (other == &PyArray_PyIntAbstractDType) { + else if (other == &PyArray_PyLongDType) { Py_INCREF(cls); return cls; } @@ -261,8 +277,8 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return res; } - else if (other == &PyArray_PyIntAbstractDType || - other == &PyArray_PyFloatAbstractDType) { + else if (other == &PyArray_PyLongDType || + other == &PyArray_PyFloatDType) { Py_INCREF(cls); return cls; } @@ -272,59 +288,93 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) /* - * TODO: These abstract DTypes also carry the dual role of representing - * `Floating`, `Complex`, and `Integer` (both signed and unsigned). - * They will have to be renamed and exposed in that capacity. + * Define abstract numerical DTypes that all regular ones can inherit from + * (in arraytypes.c.src). + * Here, also define types corresponding to the python scalars. */ -NPY_DType_Slots pyintabstractdtype_slots = { - .discover_descr_from_pyobject = discover_descriptor_from_pyint, +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_IntAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy.dtypes._IntegerAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; + +NPY_DType_Slots pylongdtype_slots = { + .discover_descr_from_pyobject = discover_descriptor_from_pylong, .default_descr = int_default_descriptor, .common_dtype = int_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyLongDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._IntegerAbstractDType", + .tp_name = "numpy.dtypes._PyLongDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, - .dt_slots = &pyintabstractdtype_slots, + .dt_slots = &pylongdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy.dtypes._FloatAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; -NPY_DType_Slots pyfloatabstractdtype_slots = { +NPY_DType_Slots pyfloatdtype_slots = { .discover_descr_from_pyobject = discover_descriptor_from_pyfloat, .default_descr = float_default_descriptor, .common_dtype = float_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._FloatAbstractDType", + .tp_name = "numpy.dtypes._PyFloatDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, - .dt_slots = &pyfloatabstractdtype_slots, + .dt_slots = &pyfloatdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_ComplexAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy.dtypes._ComplexAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; -NPY_DType_Slots pycomplexabstractdtype_slots = { +NPY_DType_Slots pycomplexdtype_slots = { .discover_descr_from_pyobject = discover_descriptor_from_pycomplex, .default_descr = complex_default_descriptor, .common_dtype = complex_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._ComplexAbstractDType", + .tp_name = "numpy.dtypes._PyComplexDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, - .dt_slots = &pycomplexabstractdtype_slots, + .dt_slots = &pycomplexdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 212994a422ea..b4cf1a13f673 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -14,9 +14,12 @@ extern "C" { * may be necessary to make them (partially) public, to allow user-defined * dtypes to perform value based casting. */ -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyIntAbstractDType; -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatAbstractDType; -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_IntAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_FloatAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_ComplexAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyLongDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexDType; NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes(void); @@ -44,12 +47,14 @@ npy_mark_tmp_array_if_pyscalar( * a custom DType registered, and then we should use that. * Further, `np.float64` is a double subclass, so must reject it. */ + // TODO,NOTE: This function should be changed to do exact long checks + // For NumPy 2.1! if (PyLong_Check(obj) && (PyArray_ISINTEGER(arr) || PyArray_ISOBJECT(arr))) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; if (dtype != NULL) { - Py_INCREF(&PyArray_PyIntAbstractDType); - Py_SETREF(*dtype, &PyArray_PyIntAbstractDType); + Py_INCREF(&PyArray_PyLongDType); + Py_SETREF(*dtype, &PyArray_PyLongDType); } return 1; } @@ -57,8 +62,8 @@ npy_mark_tmp_array_if_pyscalar( && PyArray_TYPE(arr) == NPY_DOUBLE) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT; if (dtype != NULL) { - Py_INCREF(&PyArray_PyFloatAbstractDType); - Py_SETREF(*dtype, &PyArray_PyFloatAbstractDType); + Py_INCREF(&PyArray_PyFloatDType); + Py_SETREF(*dtype, &PyArray_PyFloatDType); } return 1; } @@ -66,8 +71,8 @@ npy_mark_tmp_array_if_pyscalar( && PyArray_TYPE(arr) == NPY_CDOUBLE) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX; if (dtype != NULL) { - Py_INCREF(&PyArray_PyComplexAbstractDType); - Py_SETREF(*dtype, &PyArray_PyComplexAbstractDType); + Py_INCREF(&PyArray_PyComplexDType); + Py_SETREF(*dtype, &PyArray_PyComplexDType); } return 1; } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index c2b924e093b5..f63dbbc77e1f 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -226,10 +226,10 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) DType = Py_None; } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatAbstractDType; + DType = (PyObject *)&PyArray_PyFloatDType; } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyIntAbstractDType; + DType = (PyObject *)&PyArray_PyLongDType; } else { DType = PyDict_GetItem(_global_pytype_to_type_dict, diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index fd7ccd767056..8d31a8ac2a06 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -351,8 +351,8 @@ array_converter_result_type(PyArrayArrayConverterObject *self, "extra_dtype and ensure_inexact are mutually exclusive."); goto finish; } - Py_INCREF(&PyArray_PyFloatAbstractDType); - dt_info.dtype = &PyArray_PyFloatAbstractDType; + Py_INCREF(&PyArray_PyFloatDType); + dt_info.dtype = &PyArray_PyFloatDType; } if (dt_info.dtype != NULL) { diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index b262c1f263c2..e12a5d615792 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -59,8 +59,8 @@ static NPY_CASTING default_resolve_descriptors( PyArrayMethodObject *method, - PyArray_DTypeMeta **dtypes, - PyArray_Descr **input_descrs, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *input_descrs, PyArray_Descr **output_descrs, npy_intp *view_offset) { @@ -139,7 +139,7 @@ npy_default_get_strided_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArrayMethodObject *meth = context->method; *flags = meth->flags & NPY_METH_RUNTIME_FLAGS; *out_transferdata = NULL; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index c266979c6f6f..12beace71529 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -18,6 +18,7 @@ #include "npy_config.h" #include "npy_sort.h" +#include "abstractdtypes.h" #include "common.h" #include "ctors.h" #include "convert_datatype.h" @@ -4221,6 +4222,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType(int type) { PyArray_Descr *ret = NULL; + npy_bool is_stringdtype = (type == NPY_VSTRING || type == NPY_VSTRINGLTR); if (type < 0) { /* @@ -4232,7 +4234,7 @@ PyArray_DescrFromType(int type) */ ret = NULL; } - else if (type == NPY_VSTRING || type == NPY_VSTRINGLTR) { + else if (is_stringdtype) { ret = (PyArray_Descr *)new_stringdtype_instance(NULL, 1); } // builtin legacy dtypes @@ -4279,7 +4281,7 @@ PyArray_DescrFromType(int type) PyErr_SetString(PyExc_ValueError, "Invalid data-type for array"); } - else { + else if (!is_stringdtype) { Py_INCREF(ret); } @@ -4351,10 +4353,16 @@ set_typeinfo(PyObject *dict) * CFloat, CDouble, CLongDouble, * Object, String, Unicode, Void, * Datetime, Timedelta# + * #scls = PyArrayDescr_Type, + * PyArray_IntAbstractDType*10, + * PyArray_FloatAbstractDType*4, + * PyArray_ComplexAbstractDType*3, + * PyArrayDescr_Type*6 # */ if (dtypemeta_wrap_legacy_descriptor( _builtin_descrs[NPY_@NAME@], &_Py@Name@_ArrFuncs, + (PyTypeObject *)&@scls@, "numpy.dtypes." NPY_@NAME@_Name "DType", #ifdef NPY_@NAME@_alias "numpy.dtypes." NPY_@NAME@_Alias "DType" diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index d72fab0e4c98..8b37798f983b 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -159,7 +159,7 @@ npy_apply_wrap( } else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance( + new_wrap = PyArray_LookupSpecial_OnInstance( original_out, npy_ma_str_array_wrap); if (new_wrap != NULL) { wrap = new_wrap; @@ -177,11 +177,13 @@ npy_apply_wrap( */ if (!return_scalar && !force_wrap && (PyObject *)Py_TYPE(obj) == wrap_type) { + Py_XDECREF(new_wrap); Py_INCREF(obj); return obj; } if (wrap == Py_None) { + Py_XDECREF(new_wrap); Py_INCREF(obj); if (return_scalar) { /* @@ -239,8 +241,9 @@ npy_apply_wrap( wrap, arr, py_context, (return_scalar && PyArray_NDIM(arr) == 0) ? Py_True : Py_False, NULL); - if (res != NULL) + if (res != NULL) { goto finish; + } else if (!PyErr_ExceptionMatches(PyExc_TypeError)) { goto finish; } diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 5804c9cc9148..6759fbf19b53 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -48,7 +48,7 @@ _array_find_python_scalar_type(PyObject *op) } else if (PyLong_Check(op)) { return NPY_DT_CALL_discover_descr_from_pyobject( - &PyArray_PyIntAbstractDType, op); + &PyArray_PyLongDType, op); } return NULL; } diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index d58fee3823ee..da80d8b258a1 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -249,6 +249,12 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { return NPY_FAIL; } } + else if(PyUnicode_Check(obj)) { + PyErr_SetString(PyExc_ValueError, + "strings are not allowed for 'copy' keyword. " + "Use True/False/None instead."); + return NPY_FAIL; + } else { npy_bool bool_copymode; if (!PyArray_BoolConverter(obj, &bool_copymode)) { @@ -1341,13 +1347,19 @@ PyArray_TypestrConvert(int itemsize, int gentype) break; case NPY_DEPRECATED_STRINGLTR2: - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); - newtype = NPY_STRING; + { + /* + * raise a deprecation warning, which might be an exception + * if warnings are errors, so leave newtype unset in that + * case + */ + int ret = DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead."); + if (ret == 0) { + newtype = NPY_STRING; + } break; - + } case NPY_UNICODELTR: newtype = NPY_UNICODE; break; diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index c6b164d7f4e9..2b6f46d0b566 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -408,9 +408,9 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) char *value = (char *)value_buffer_stack; PyArray_Descr *descr = PyArray_DESCR(arr); - if ((size_t)descr->elsize > sizeof(value_buffer_stack)) { + if (PyDataType_ELSIZE(descr) > sizeof(value_buffer_stack)) { /* We need a large temporary buffer... */ - value_buffer_heap = PyObject_Calloc(1, descr->elsize); + value_buffer_heap = PyMem_Calloc(1, PyDataType_ELSIZE(descr)); if (value_buffer_heap == NULL) { PyErr_NoMemory(); return -1; diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 2225ee94859c..f6f06ef4fb37 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -465,14 +465,17 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, /* * Check for less harmful non-standard returns. The following two returns * should never happen: - * 1. No-casting must imply a view offset of 0. + * 1. No-casting must imply a view offset of 0 unless the DType + defines a finalization function, which implies it stores data + on the descriptor * 2. Equivalent-casting + 0 view offset is (usually) the definition * of a "no" cast. However, changing the order of fields can also * create descriptors that are not equivalent but views. * Note that unsafe casts can have a view offset. For example, in * principle, casting `finalize_descr == NULL)) { assert(casting != NPY_NO_CASTING); } else { @@ -648,6 +651,35 @@ PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) } +/* + * This function returns true if the two types can be safely cast at + * *minimum_safety* casting level. Sets the *view_offset* if that is set + * for the cast. If ignore_error is set, the error indicator is cleared + * if there are any errors in cast setup and returns false, otherwise + * the error indicator is left set and returns -1. + */ +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_error) +{ + if (type1 == type2) { + *view_offset = 0; + return 1; + } + + NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, view_offset); + if (safety < 0) { + if (ignore_error) { + PyErr_Clear(); + return 0; + } + return -1; + } + return PyArray_MinCastSafety(safety, minimum_safety) == minimum_safety; +} + + /* Provides an ordering for the dtype 'kind' character codes */ NPY_NO_EXPORT int dtype_kind_to_ordering(char kind) @@ -1796,17 +1828,17 @@ PyArray_ResultType( all_descriptors[i_all] = NULL; /* no descriptor for py-scalars */ if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_INT) { /* This could even be an object dtype here for large ints */ - all_DTypes[i_all] = &PyArray_PyIntAbstractDType; + all_DTypes[i_all] = &PyArray_PyLongDType; if (PyArray_TYPE(arrs[i]) != NPY_LONG) { /* Not a "normal" scalar, so we cannot avoid the legacy path */ all_pyscalar = 0; } } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) { - all_DTypes[i_all] = &PyArray_PyFloatAbstractDType; + all_DTypes[i_all] = &PyArray_PyFloatDType; } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_COMPLEX) { - all_DTypes[i_all] = &PyArray_PyComplexAbstractDType; + all_DTypes[i_all] = &PyArray_PyComplexDType; } else { all_descriptors[i_all] = PyArray_DTYPE(arrs[i]); @@ -2440,8 +2472,8 @@ PyArray_AddCastingImplementation_FromSpec(PyArrayMethod_Spec *spec, int private) NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2483,7 +2515,7 @@ legacy_cast_get_strided_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; int out_needs_api = 0; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; @@ -2507,8 +2539,8 @@ legacy_cast_get_strided_loop( NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2548,7 +2580,7 @@ get_byteswap_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; assert(descrs[0]->kind == descrs[1]->kind); assert(descrs[0]->elsize == descrs[1]->elsize); int itemsize = descrs[0]->elsize; @@ -2727,8 +2759,8 @@ PyArray_InitializeNumericCasts(void) static int cast_to_string_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -2879,8 +2911,8 @@ add_other_to_and_from_string_cast( NPY_NO_EXPORT NPY_CASTING string_to_string_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2932,7 +2964,7 @@ string_to_string_get_loop( NPY_ARRAYMETHOD_FLAGS *flags) { int unicode_swap = 0; - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; assert(NPY_DTYPE(descrs[0]) == NPY_DTYPE(descrs[1])); *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; @@ -3033,7 +3065,7 @@ PyArray_InitializeStringCasts(void) */ static NPY_CASTING cast_to_void_dtype_class( - PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs, + PyArray_Descr *const *given_descrs, PyArray_Descr **loop_descrs, npy_intp *view_offset) { /* `dtype="V"` means unstructured currently (compare final path) */ @@ -3058,8 +3090,8 @@ cast_to_void_dtype_class( static NPY_CASTING nonstructured_to_structured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3251,8 +3283,8 @@ PyArray_GetGenericToVoidCastingImpl(void) static NPY_CASTING structured_to_nonstructured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3521,8 +3553,8 @@ can_cast_fields_safety( static NPY_CASTING void_to_void_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3720,8 +3752,8 @@ PyArray_InitializeVoidToVoidCast(void) static NPY_CASTING object_to_any_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -3794,8 +3826,8 @@ PyArray_GetObjectToGenericCastingImpl(void) static NPY_CASTING any_to_object_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index b32b637d8e55..d1493e6997bd 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -102,6 +102,11 @@ PyArray_GetCastInfo( PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype, npy_intp *view_offset); +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_errors); + NPY_NO_EXPORT int PyArray_CheckCastSafety(NPY_CASTING casting, PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype); @@ -109,8 +114,8 @@ PyArray_CheckCastSafety(NPY_CASTING casting, NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset); @@ -124,8 +129,8 @@ legacy_cast_get_strided_loop( NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *input_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const input_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset); diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index a475f3986759..58bc6b8b6aa2 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -39,6 +39,13 @@ #include "umathmodule.h" + +NPY_NO_EXPORT const char *npy_no_copy_err_msg = ( + "Unable to avoid copy while creating an array as requested.\n" + "If using `np.array(obj, copy=False)` replace it with `np.asarray(obj)` " + "to allow a copy when needed (no behavior change in NumPy 1.x).\n" + "For more details, see https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword."); + /* * Reading from a file or a string. * @@ -1637,9 +1644,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * If we got this far, we definitely have to create a copy, since we are * converting either from a scalar (cache == NULL) or a (nested) sequence. */ - if (flags & NPY_ARRAY_ENSURENOCOPY ) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array."); + if (flags & NPY_ARRAY_ENSURENOCOPY) { + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(dtype); npy_free_coercion_cache(cache); return NULL; @@ -1847,8 +1853,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, && !PyArray_ElementStrides(obj)) { PyObject *ret; if (requires & NPY_ARRAY_ENSURENOCOPY) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating a new array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); return NULL; } ret = PyArray_NewCopy((PyArrayObject *)obj, NPY_ANYORDER); @@ -1908,8 +1913,10 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) } arrflags = PyArray_FLAGS(arr); - /* If a guaranteed copy was requested */ - copy = (flags & NPY_ARRAY_ENSURECOPY) || + + + copy = /* If a guaranteed copy was requested */ + (flags & NPY_ARRAY_ENSURECOPY) || /* If C contiguous was requested, and arr is not */ ((flags & NPY_ARRAY_C_CONTIGUOUS) && (!(arrflags & NPY_ARRAY_C_CONTIGUOUS))) || @@ -1921,13 +1928,17 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) (!(arrflags & NPY_ARRAY_F_CONTIGUOUS))) || /* If a writeable array was requested, and arr is not */ ((flags & NPY_ARRAY_WRITEABLE) && - (!(arrflags & NPY_ARRAY_WRITEABLE))) || - !PyArray_EquivTypes(oldtype, newtype); + (!(arrflags & NPY_ARRAY_WRITEABLE))); + + if (!copy) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(oldtype, newtype, &view_offset, NPY_NO_CASTING, 1); + copy = !(is_safe && (view_offset != NPY_MIN_INTP)); + } if (copy) { if (flags & NPY_ARRAY_ENSURENOCOPY ) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array from given array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(newtype); return NULL; } @@ -2405,6 +2416,63 @@ PyArray_FromInterface(PyObject *origin) } + +/* + * Returns -1 and an error set or 0 with the original error cleared, must + * be called with an error set. + */ +static inline int +check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) +{ + if (kwnames == NULL) { + return -1; /* didn't pass kwnames, can't possibly be the reason */ + } + if (!PyErr_ExceptionMatches(PyExc_TypeError)) { + return -1; + } + + /* + * In most cases, if we fail, we assume the error was unrelated to the + * copy kwarg and simply restore the original one. + */ + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (value == NULL) { + goto restore_error; + } + + PyObject *str_value = PyObject_Str(value); + if (str_value == NULL) { + goto restore_error; + } + int copy_kwarg_unsupported = PyUnicode_Contains( + str_value, npy_ma_str_array_err_msg_substr); + Py_DECREF(str_value); + if (copy_kwarg_unsupported == -1) { + goto restore_error; + } + if (copy_kwarg_unsupported) { + /* + * TODO: As of now NumPy 2.0, the this warning is only triggered with + * `copy=False` allowing downstream to not notice it. + */ + Py_DECREF(type); + Py_DECREF(value); + Py_XDECREF(traceback); + if (DEPRECATE("__array__ implementation doesn't accept a copy keyword, " + "so passing copy=False failed. __array__ must implement " + "'dtype' and 'copy' keyword arguments.") < 0) { + return -1; + } + return 0; + } + + restore_error: + PyErr_Restore(type, value, traceback); + return -1; +} + + /** * Check for an __array__ attribute and call it when it exists. * @@ -2447,67 +2515,60 @@ PyArray_FromArrayAttr_int( return Py_NotImplemented; } - PyObject *kwargs = PyDict_New(); + static PyObject *kwnames_is_copy = NULL; + if (kwnames_is_copy == NULL) { + kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (kwnames_is_copy == NULL) { + Py_DECREF(array_meth); + return NULL; + } + } + + Py_ssize_t nargs = 0; + PyObject *arguments[2]; + PyObject *kwnames = NULL; + + if (descr != NULL) { + arguments[0] = (PyObject *)descr; + nargs++; + } /* * Only if the value of `copy` isn't the default one, we try to pass it * along; for backwards compatibility we then retry if it fails because the * signature of the __array__ method being called does not have `copy`. */ - int copy_passed = 0; if (copy != -1) { - copy_passed = 1; - PyObject *copy_obj = copy == 1 ? Py_True : Py_False; - PyDict_SetItemString(kwargs, "copy", copy_obj); + kwnames = kwnames_is_copy; + arguments[nargs] = copy == 1 ? Py_True : Py_False; } - PyObject *args = descr != NULL ? PyTuple_Pack(1, descr) : PyTuple_New(0); - - new = PyObject_Call(array_meth, args, kwargs); + int must_copy_but_copy_kwarg_unimplemented = 0; + new = PyObject_Vectorcall(array_meth, arguments, nargs, kwnames); if (new == NULL) { - if (npy_ma_str_array_err_msg_substr == NULL) { + if (check_or_clear_and_warn_error_if_due_to_copy_kwarg(kwnames) < 0) { + /* Error was not cleared (or a new error set) */ + Py_DECREF(array_meth); return NULL; } - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (value != NULL) { - PyObject *str_value = PyObject_Str(value); - if (PyUnicode_Contains( - str_value, npy_ma_str_array_err_msg_substr) > 0) { - Py_DECREF(type); - Py_DECREF(value); - Py_XDECREF(traceback); - if (PyErr_WarnEx(PyExc_UserWarning, - "__array__ should implement 'dtype' and " - "'copy' keywords", 1) < 0) { - Py_DECREF(str_value); - Py_DECREF(args); - Py_DECREF(kwargs); - return NULL; - } - if (copy_passed) { /* try again */ - PyDict_DelItemString(kwargs, "copy"); - new = PyObject_Call(array_meth, args, kwargs); - if (new == NULL) { - Py_DECREF(str_value); - Py_DECREF(args); - Py_DECREF(kwargs); - return NULL; - } - } - } - Py_DECREF(str_value); + if (copy == 0) { + /* Cannot possibly avoid a copy, so error out. */ + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(array_meth); + return NULL; } + /* + * The error seems to have been due to passing copy. We try to see + * more precisely what the message is and may try again. + */ + must_copy_but_copy_kwarg_unimplemented = 1; + new = PyObject_Vectorcall(array_meth, arguments, nargs, NULL); if (new == NULL) { - PyErr_Restore(type, value, traceback); - Py_DECREF(args); - Py_DECREF(kwargs); + Py_DECREF(array_meth); return NULL; } } - Py_DECREF(args); - Py_DECREF(kwargs); Py_DECREF(array_meth); if (!PyArray_Check(new)) { @@ -2517,6 +2578,10 @@ PyArray_FromArrayAttr_int( Py_DECREF(new); return NULL; } + if (must_copy_but_copy_kwarg_unimplemented) { + /* TODO: As of NumPy 2.0 this path is only reachable by C-API. */ + Py_SETREF(new, PyArray_NewCopy((PyArrayObject *)new, NPY_KEEPORDER)); + } return new; } diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index d2577f83ef96..fa1cd72e1478 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -1,6 +1,9 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ +extern NPY_NO_EXPORT const char *npy_no_copy_err_msg; + + NPY_NO_EXPORT PyObject * PyArray_NewFromDescr( PyTypeObject *subtype, PyArray_Descr *descr, int nd, diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 7397381daf91..b340cf6cf496 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -3780,7 +3780,7 @@ time_to_time_get_loop( { int requires_wrap = 0; int inner_aligned = aligned; - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; PyArray_DatetimeMetaData *meta1 = get_datetime_metadata_from_dtype(descrs[0]); @@ -3929,7 +3929,7 @@ datetime_to_string_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; if (descrs[1]->type_num == NPY_STRING) { @@ -3989,7 +3989,7 @@ string_to_datetime_cast_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; if (descrs[0]->type_num == NPY_STRING) { diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index c77b380e9386..f7524473c633 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -6,6 +6,8 @@ #include #include +#include + #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" @@ -13,13 +15,14 @@ #include "npy_config.h" #include "npy_ctypes.h" #include "npy_import.h" -#include "npy_pycompat.h" + #include "_datetime.h" #include "common.h" #include "conversion_utils.h" /* for PyArray_TypestrConvert */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "descriptor.h" +#include "multiarraymodule.h" #include "alloc.h" #include "assert.h" #include "npy_buffer.h" @@ -1806,14 +1809,27 @@ _convert_from_str(PyObject *obj, int align) /* Python byte string characters are unsigned */ check_num = (unsigned char) type[0]; } - /* A kind + size like 'f8' */ + /* Possibly a kind + size like 'f8' but also could be 'bool' */ else { char *typeend = NULL; int kind; - /* Parse the integer, make sure it's the rest of the string */ - elsize = (int)strtol(type + 1, &typeend, 10); - if (typeend - type == len) { + /* Attempt to parse the integer, make sure it's the rest of the string */ + errno = 0; + long result = strtol(type + 1, &typeend, 10); + npy_bool some_parsing_happened = !(type == typeend); + npy_bool entire_string_consumed = *typeend == '\0'; + npy_bool parsing_succeeded = + (errno == 0) && some_parsing_happened && entire_string_consumed; + // make sure it doesn't overflow or go negative + if (result > INT_MAX || result < 0) { + goto fail; + } + + elsize = (int)result; + + + if (parsing_succeeded && typeend - type == len) { kind = type[0]; switch (kind) { @@ -1822,10 +1838,10 @@ _convert_from_str(PyObject *obj, int align) break; case NPY_DEPRECATED_STRINGLTR2: - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); + if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead.") < 0) { + return NULL; + } check_num = NPY_STRING; break; @@ -1859,6 +1875,9 @@ _convert_from_str(PyObject *obj, int align) } } } + else if (parsing_succeeded) { + goto fail; + } } if (PyErr_Occurred()) { @@ -1894,10 +1913,10 @@ _convert_from_str(PyObject *obj, int align) } if (strcmp(type, "a") == 0) { - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); + if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead.") < 0) { + return NULL; + } } /* @@ -2019,6 +2038,7 @@ arraydescr_dealloc(PyArray_Descr *self) { Py_XDECREF(self->typeobj); if (!PyDataType_ISLEGACY(self)) { + Py_TYPE(self)->tp_free((PyObject *)self); return; } _PyArray_LegacyDescr *lself = (_PyArray_LegacyDescr *)self; @@ -2696,7 +2716,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) Py_DECREF(ret); return NULL; } - obj = PyObject_GetAttrString(mod, "dtype"); + obj = PyObject_GetAttr(mod, npy_ma_str_dtype); Py_DECREF(mod); if (obj == NULL) { Py_DECREF(ret); diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 0402ad2c084d..91b1889b7d1f 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -32,7 +32,7 @@ typedef int get_traverse_func_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *clear_info, NPY_ARRAYMETHOD_FLAGS *flags); @@ -42,7 +42,7 @@ typedef int get_traverse_func_function( static int get_clear_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *clear_info, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -99,7 +99,7 @@ PyArray_GetClearFunction( static int get_zerofill_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *zerofill_info, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -136,7 +136,7 @@ get_zerofill_function( static int clear_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { @@ -156,7 +156,7 @@ clear_object_strided_loop( NPY_NO_EXPORT int npy_get_clear_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) @@ -171,7 +171,7 @@ npy_get_clear_object_strided_loop( static int fill_zero_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { @@ -188,7 +188,7 @@ fill_zero_object_strided_loop( NPY_NO_EXPORT int npy_object_get_fill_zero_loop(void *NPY_UNUSED(traverse_context), - PyArray_Descr *NPY_UNUSED(descr), + const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, @@ -275,7 +275,7 @@ fields_traverse_data_clone(NpyAuxData *data) static int traverse_fields_function( - void *traverse_context, _PyArray_LegacyDescr *NPY_UNUSED(descr), + void *traverse_context, const _PyArray_LegacyDescr *NPY_UNUSED(descr), char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { @@ -315,7 +315,7 @@ traverse_fields_function( static int get_fields_traverse_function( - void *traverse_context, _PyArray_LegacyDescr *dtype, int NPY_UNUSED(aligned), + void *traverse_context, const _PyArray_LegacyDescr *dtype, int NPY_UNUSED(aligned), npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags, get_traverse_func_function *get_traverse_func) @@ -398,13 +398,6 @@ subarray_traverse_data_free(NpyAuxData *data) } -/* - * We seem to be neither using nor exposing this right now, so leave it NULL. - * (The implementation below should be functional.) - */ -#define subarray_traverse_data_clone NULL - -#ifndef subarray_traverse_data_clone /* traverse data copy function */ static NpyAuxData * subarray_traverse_data_clone(NpyAuxData *data) @@ -426,19 +419,18 @@ subarray_traverse_data_clone(NpyAuxData *data) return (NpyAuxData *)newdata; } -#endif static int traverse_subarray_func( - void *traverse_context, PyArray_Descr *NPY_UNUSED(descr), + void *traverse_context, const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { subarray_traverse_data *subarr_data = (subarray_traverse_data *)auxdata; PyArrayMethod_TraverseLoop *func = subarr_data->info.func; - PyArray_Descr *sub_descr = subarr_data->info.descr; + const PyArray_Descr *sub_descr = subarr_data->info.descr; npy_intp sub_N = subarr_data->count; NpyAuxData *sub_auxdata = subarr_data->info.auxdata; npy_intp sub_stride = sub_descr->elsize; @@ -456,7 +448,7 @@ traverse_subarray_func( static int get_subarray_traverse_func( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp size, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags, get_traverse_func_function *get_traverse_func) @@ -469,7 +461,7 @@ get_subarray_traverse_func( auxdata->count = size; auxdata->base.free = &subarray_traverse_data_free; - auxdata->base.clone = subarray_traverse_data_clone; + auxdata->base.clone = &subarray_traverse_data_clone; if (get_traverse_func( traverse_context, dtype, aligned, @@ -493,7 +485,7 @@ get_subarray_traverse_func( static int clear_no_op( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *NPY_UNUSED(data), npy_intp NPY_UNUSED(size), npy_intp NPY_UNUSED(stride), NpyAuxData *NPY_UNUSED(auxdata)) { @@ -502,7 +494,7 @@ clear_no_op( NPY_NO_EXPORT int npy_get_clear_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -569,7 +561,7 @@ npy_get_clear_void_and_legacy_user_dtype_loop( static int zerofill_fields_function( - void *traverse_context, _PyArray_LegacyDescr *descr, + void *traverse_context, const _PyArray_LegacyDescr *descr, char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { @@ -598,7 +590,7 @@ zerofill_fields_function( */ NPY_NO_EXPORT int npy_get_zerofill_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) { diff --git a/numpy/_core/src/multiarray/dtype_traversal.h b/numpy/_core/src/multiarray/dtype_traversal.h index bd3918ba4b65..5e915ba4d40e 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.h +++ b/numpy/_core/src/multiarray/dtype_traversal.h @@ -7,14 +7,14 @@ NPY_NO_EXPORT int npy_get_clear_object_strided_loop( - void *traverse_context, PyArray_Descr *descr, int aligned, + void *traverse_context, const PyArray_Descr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_traversedata, NPY_ARRAYMETHOD_FLAGS *flags); NPY_NO_EXPORT int npy_get_clear_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *descr, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_traversedata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -23,14 +23,14 @@ npy_get_clear_void_and_legacy_user_dtype_loop( NPY_NO_EXPORT int npy_object_get_fill_zero_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **NPY_UNUSED(out_auxdata), NPY_ARRAYMETHOD_FLAGS *flags); NPY_NO_EXPORT int npy_get_zerofill_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -40,7 +40,7 @@ npy_get_zerofill_void_and_legacy_user_dtype_loop( typedef struct { PyArrayMethod_TraverseLoop *func; NpyAuxData *auxdata; - PyArray_Descr *descr; + const PyArray_Descr *descr; } NPY_traverse_info; @@ -69,18 +69,22 @@ static inline int NPY_traverse_info_copy( NPY_traverse_info *traverse_info, NPY_traverse_info *original) { - traverse_info->func = NULL; + /* Note that original may be identical to traverse_info! */ if (original->func == NULL) { /* Allow copying also of unused clear info */ + traverse_info->func = NULL; return 0; } - traverse_info->auxdata = NULL; if (original->auxdata != NULL) { traverse_info->auxdata = NPY_AUXDATA_CLONE(original->auxdata); if (traverse_info->auxdata == NULL) { + traverse_info->func = NULL; return -1; } } + else { + traverse_info->auxdata = NULL; + } Py_INCREF(original->descr); traverse_info->descr = original->descr; traverse_info->func = original->func; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 626b3bde1032..a4c28088ee08 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -444,12 +444,6 @@ string_unicode_new(PyArray_DTypeMeta *self, PyObject *args, PyObject *kwargs) return NULL; } - PyArray_Descr *res = PyArray_DescrNewFromType(self->type_num); - - if (res == NULL) { - return NULL; - } - if (self->type_num == NPY_UNICODE) { // unicode strings are 4 bytes per character if (npy_mul_sizes_with_overflow(&size, size, 4)) { @@ -466,6 +460,12 @@ string_unicode_new(PyArray_DTypeMeta *self, PyObject *args, PyObject *kwargs) return NULL; } + PyArray_Descr *res = PyArray_DescrNewFromType(self->type_num); + + if (res == NULL) { + return NULL; + } + res->elsize = (int)size; return (PyObject *)res; } @@ -838,22 +838,13 @@ python_builtins_are_known_scalar_types( * This is necessary only for python scalar classes which we discover * as valid DTypes. */ - if (pytype == &PyFloat_Type) { - return 1; - } - if (pytype == &PyLong_Type) { - return 1; - } - if (pytype == &PyBool_Type) { - return 1; - } - if (pytype == &PyComplex_Type) { - return 1; - } - if (pytype == &PyUnicode_Type) { - return 1; - } - if (pytype == &PyBytes_Type) { + if (pytype == &PyFloat_Type || + pytype == &PyLong_Type || + pytype == &PyBool_Type || + pytype == &PyComplex_Type || + pytype == &PyUnicode_Type || + pytype == &PyBytes_Type) + { return 1; } return 0; @@ -920,12 +911,15 @@ static PyArray_DTypeMeta * default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) { assert(cls->type_num < NPY_NTYPES_LEGACY); - if (NPY_UNLIKELY(NPY_DT_is_abstract(other))) { + if (NPY_UNLIKELY(!NPY_DT_is_legacy(other))) { /* - * The abstract complex has a lower priority than the concrete inexact - * types to ensure the correct promotion with integers. + * Deal with the non-legacy types we understand: python scalars. + * These may have lower priority than the concrete inexact types, + * but can change the type of the result (complex, float, int). + * If our own DType is not numerical or has lower priority (e.g. + * integer but abstract one is float), signal not implemented. */ - if (other == &PyArray_PyComplexAbstractDType) { + if (other == &PyArray_PyComplexDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num)) { Py_INCREF(cls); return cls; @@ -940,14 +934,14 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return NPY_DT_NewRef(&PyArray_CLongDoubleDType); } } - else if (other == &PyArray_PyFloatAbstractDType) { + else if (other == &PyArray_PyFloatDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num) || PyTypeNum_ISFLOAT(cls->type_num)) { Py_INCREF(cls); return cls; } } - else if (other == &PyArray_PyIntAbstractDType) { + else if (other == &PyArray_PyLongDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num) || PyTypeNum_ISFLOAT(cls->type_num) || PyTypeNum_ISINTEGER(cls->type_num) @@ -956,8 +950,10 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return cls; } } + Py_INCREF(Py_NotImplemented); + return (PyArray_DTypeMeta *)Py_NotImplemented; } - if (!NPY_DT_is_legacy(other) || other->type_num > cls->type_num) { + if (other->type_num > cls->type_num) { /* * Let the more generic (larger type number) DType handle this * (note that half is after all others, which works out here.) @@ -1072,8 +1068,9 @@ object_common_dtype( * @returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, - PyArray_ArrFuncs *arr_funcs, const char *name, const char *alias) +dtypemeta_wrap_legacy_descriptor( + _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, + PyTypeObject *dtype_super_class, const char *name, const char *alias) { int has_type_set = Py_TYPE(descr) == &PyArrayDescr_Type; @@ -1127,7 +1124,7 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, .tp_name = NULL, /* set below */ .tp_basicsize = sizeof(_PyArray_LegacyDescr), .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_base = &PyArrayDescr_Type, + .tp_base = NULL, /* set below */ .tp_new = (newfunc)legacy_dtype_default_new, .tp_doc = ( "DType class corresponding to the scalar type and dtype of " @@ -1140,11 +1137,12 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, /* Further fields are not common between DTypes */ }; memcpy(dtype_class, &prototype, sizeof(PyArray_DTypeMeta)); - /* Fix name of the Type*/ + /* Fix name and superclass of the Type*/ ((PyTypeObject *)dtype_class)->tp_name = name; + ((PyTypeObject *)dtype_class)->tp_base = dtype_super_class, dtype_class->dt_slots = dt_slots; - /* Let python finish the initialization (probably unnecessary) */ + /* Let python finish the initialization */ if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { Py_DECREF(dtype_class); return -1; @@ -1409,7 +1407,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; * for convenience as we are allowed to access the `DType` slots directly. */ NPY_NO_EXPORT PyArray_ArrFuncs * -_PyDataType_GetArrFuncs(PyArray_Descr *descr) +_PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return PyDataType_GetArrFuncs(descr); } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 667f9280eb13..344b440b38e8 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -155,8 +155,8 @@ python_builtins_are_known_scalar_types( NPY_NO_EXPORT int dtypemeta_wrap_legacy_descriptor( - _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, - const char *name, const char *alias); + _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, + PyTypeObject *dtype_super_class, const char *name, const char *alias); NPY_NO_EXPORT void initialize_legacy_dtypemeta_aliases(_PyArray_LegacyDescr **_builtin_descrs); @@ -261,7 +261,7 @@ extern PyArray_DTypeMeta PyArray_StringDType; /* Internal version see dtypmeta.c for more information. */ static inline PyArray_ArrFuncs * -PyDataType_GetArrFuncs(PyArray_Descr *descr) +PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return &NPY_DT_SLOTS(NPY_DTYPE(descr))->f; } diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 7f7d8394d6f3..b6c7031f9321 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -394,6 +394,11 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, goto fail; } ni = PyArray_SIZE(indices); + if ((ni > 0) && (PyArray_Size((PyObject *)self) == 0)) { + PyErr_SetString(PyExc_IndexError, + "cannot replace elements of an empty array"); + goto fail; + } Py_INCREF(PyArray_DESCR(self)); values = (PyArrayObject *)PyArray_FromAny(values0, PyArray_DESCR(self), 0, 0, NPY_ARRAY_DEFAULT | NPY_ARRAY_FORCECAST, NULL); @@ -414,9 +419,8 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, Py_INCREF(PyArray_DESCR(self)); obj = (PyArrayObject *)PyArray_FromArray(self, PyArray_DESCR(self), flags); - if (obj != self) { - copied = 1; - } + copied = 1; + assert(self != obj); self = obj; } max_item = PyArray_SIZE(self); diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index f17e4ffa65c1..4a6c1f093769 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1580,7 +1580,7 @@ array_subscript(PyArrayObject *self, PyObject *op) if (PyArray_GetDTypeTransferFunction(is_aligned, itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + PyArray_DESCR(self), PyArray_DESCR((PyArrayObject *)result), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto finish; } @@ -1960,6 +1960,10 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) tmp_arr = (PyArrayObject *)op; } + if (tmp_arr && solve_may_share_memory(self, tmp_arr, 1) != 0) { + Py_SETREF(tmp_arr, (PyArrayObject *)PyArray_NewCopy(tmp_arr, NPY_ANYORDER)); + } + /* * Special case for very simple 1-d fancy indexing, which however * is quite common. This saves not only a lot of setup time in the @@ -1992,9 +1996,9 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) npy_intp itemsize = PyArray_ITEMSIZE(self); int is_aligned = IsUintAligned(self) && IsUintAligned(tmp_arr); - if (PyArray_GetDTypeTransferFunction(is_aligned, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + if (PyArray_GetDTypeTransferFunction( + is_aligned, itemsize, itemsize, + PyArray_DESCR(tmp_arr), PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } @@ -2030,6 +2034,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) goto fail; } + int allocated_array = 0; if (tmp_arr == NULL) { /* Fill extra op, need to swap first */ tmp_arr = mit->extra_op; @@ -2043,6 +2048,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (PyArray_CopyObject(tmp_arr, op) < 0) { goto fail; } + allocated_array = 1; } if (PyArray_MapIterCheckIndices(mit) < 0) { @@ -2086,10 +2092,12 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - - if (PyArray_GetDTypeTransferFunction(1, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + // TODO: the heuristic used here to determine the src_dtype might be subtly wrong + // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. + if (PyArray_GetDTypeTransferFunction( + 1, itemsize, itemsize, + allocated_array ? PyArray_DESCR(mit->extra_op) : PyArray_DESCR(self), + PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index b61cbed4c957..adc1e53e24ab 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -811,8 +811,8 @@ array_astype(PyArrayObject *self, /* * If the memory layout matches and, data types are equivalent, - * and it's not a subtype if subok is False, then we - * can skip the copy. + * it's not a subtype if subok is False, and if the cast says + * view are possible, we can skip the copy. */ if (forcecopy != NPY_AS_TYPE_COPY_ALWAYS && (order == NPY_KEEPORDER || @@ -823,11 +823,15 @@ array_astype(PyArrayObject *self, PyArray_IS_C_CONTIGUOUS(self)) || (order == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(self))) && - (subok || PyArray_CheckExact(self)) && - PyArray_EquivTypes(dtype, PyArray_DESCR(self))) { - Py_DECREF(dtype); - Py_INCREF(self); - return (PyObject *)self; + (subok || PyArray_CheckExact(self))) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(dtype, PyArray_DESCR(self), + &view_offset, NPY_NO_CASTING, 1); + if (is_safe && (view_offset != NPY_MIN_INTP)) { + Py_DECREF(dtype); + Py_INCREF(self); + return (PyObject *)self; + } } if (!PyArray_CanCastArrayTo(self, dtype, casting)) { @@ -980,8 +984,7 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) Py_DECREF(self); return ret; } else { // copy == NPY_COPY_NEVER - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array from given array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(self); return NULL; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 3c153adb83a8..9816fb5bc0a2 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -467,7 +467,6 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, if (ret == NULL) { return NULL; } - assert(PyArray_DESCR(ret) == descr); } /* @@ -1449,23 +1448,6 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) return 1; } - if (Py_TYPE(Py_TYPE(type1)) == &PyType_Type) { - /* - * 2021-12-17: This case is nonsense and should be removed eventually! - * - * boost::python has/had a bug effectively using EquivTypes with - * `type(arbitrary_obj)`. That is clearly wrong as that cannot be a - * `PyArray_Descr *`. We assume that `type(type(type(arbitrary_obj))` - * is always in practice `type` (this is the type of the metaclass), - * but for our descriptors, `type(type(descr))` is DTypeMeta. - * - * In that case, we just return False. There is a possibility that - * this actually _worked_ effectively (returning 1 sometimes). - * We ignore that possibility for simplicity; it really is not our bug. - */ - return 0; - } - /* * Do not use PyArray_CanCastTypeTo because it supports legacy flexible * dtypes as input. @@ -1586,8 +1568,7 @@ _array_fromobject_generic( } else { if (copy == NPY_COPY_NEVER) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating a new array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); goto finish; } ret = (PyArrayObject *)PyArray_NewCopy(oparr, order); @@ -1609,7 +1590,10 @@ _array_fromobject_generic( /* One more chance for faster exit if user specified the dtype. */ oldtype = PyArray_DESCR(oparr); - if (PyArray_EquivTypes(oldtype, dtype)) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(oldtype, dtype, &view_offset, NPY_NO_CASTING, 1); + npy_intp view_safe = (is_safe && (view_offset != NPY_MIN_INTP)); + if (view_safe) { if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { if (oldtype == dtype) { Py_INCREF(op); @@ -2013,14 +1997,11 @@ array_empty(PyObject *NPY_UNUSED(ignored), ret = (PyArrayObject *)PyArray_Empty_int( shape.len, shape.ptr, dt_info.descr, dt_info.dtype, is_f_order); - npy_free_cache_dim_obj(shape); - return (PyObject *)ret; - fail: Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); npy_free_cache_dim_obj(shape); - return NULL; + return (PyObject *)ret; } static PyObject * @@ -3273,7 +3254,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) return NULL; } - NPY_cast_info cast_info = {.func = NULL}; + NPY_cast_info x_cast_info = {.func = NULL}; + NPY_cast_info y_cast_info = {.func = NULL}; ax = (PyArrayObject*)PyArray_FROM_O(x); if (ax == NULL) { @@ -3297,13 +3279,33 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) NPY_ITER_READONLY | NPY_ITER_ALIGNED, NPY_ITER_READONLY | NPY_ITER_ALIGNED }; + common_dt = PyArray_ResultType(2, &op_in[2], 0, NULL); if (common_dt == NULL) { goto fail; } + npy_intp itemsize = common_dt->elsize; + + // If x and y don't have references, we ask the iterator to create buffers + // using the common data type of x and y and then do fast trivial copies + // in the loop below. + // Otherwise trivial copies aren't possible and we handle the cast item by item + // in the loop. + PyArray_Descr *x_dt, *y_dt; + int trivial_copy_loop = !PyDataType_REFCHK(common_dt) && + ((itemsize == 16) || (itemsize == 8) || (itemsize == 4) || + (itemsize == 2) || (itemsize == 1)); + if (trivial_copy_loop) { + x_dt = common_dt; + y_dt = common_dt; + } + else { + x_dt = PyArray_DESCR(op_in[2]); + y_dt = PyArray_DESCR(op_in[3]); + } /* `PyArray_DescrFromType` cannot fail for simple builtin types: */ - PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), - common_dt, common_dt}; + PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), x_dt, y_dt}; + NpyIter * iter; NPY_BEGIN_THREADS_DEF; @@ -3317,26 +3319,27 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) /* Get the result from the iterator object array */ ret = (PyObject*)NpyIter_GetOperandArray(iter)[0]; - - npy_intp itemsize = common_dt->elsize; - - int has_ref = PyDataType_REFCHK(common_dt); + PyArray_Descr *ret_dt = PyArray_DESCR((PyArrayObject *)ret); NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; - npy_intp transfer_strides[2] = {itemsize, itemsize}; + npy_intp x_strides[2] = {x_dt->elsize, itemsize}; + npy_intp y_strides[2] = {y_dt->elsize, itemsize}; npy_intp one = 1; - if (has_ref || ((itemsize != 16) && (itemsize != 8) && (itemsize != 4) && - (itemsize != 2) && (itemsize != 1))) { + if (!trivial_copy_loop) { // The iterator has NPY_ITER_ALIGNED flag so no need to check alignment // of the input arrays. - // - // There's also no need to set up a cast for y, since the iterator - // ensures both casts are identical. if (PyArray_GetDTypeTransferFunction( - 1, itemsize, itemsize, common_dt, common_dt, 0, - &cast_info, &transfer_flags) != NPY_SUCCEED) { + 1, x_strides[0], x_strides[1], + PyArray_DESCR(op_in[2]), ret_dt, 0, + &x_cast_info, &transfer_flags) != NPY_SUCCEED) { + goto fail; + } + if (PyArray_GetDTypeTransferFunction( + 1, y_strides[0], y_strides[1], + PyArray_DESCR(op_in[3]), ret_dt, 0, + &y_cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } } @@ -3368,19 +3371,19 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) npy_intp ystride = strides[3]; /* constant sizes so compiler replaces memcpy */ - if (!has_ref && itemsize == 16) { + if (trivial_copy_loop && itemsize == 16) { INNER_WHERE_LOOP(16); } - else if (!has_ref && itemsize == 8) { + else if (trivial_copy_loop && itemsize == 8) { INNER_WHERE_LOOP(8); } - else if (!has_ref && itemsize == 4) { + else if (trivial_copy_loop && itemsize == 4) { INNER_WHERE_LOOP(4); } - else if (!has_ref && itemsize == 2) { + else if (trivial_copy_loop && itemsize == 2) { INNER_WHERE_LOOP(2); } - else if (!has_ref && itemsize == 1) { + else if (trivial_copy_loop && itemsize == 1) { INNER_WHERE_LOOP(1); } else { @@ -3389,18 +3392,18 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) if (*csrc) { char *args[2] = {xsrc, dst}; - if (cast_info.func( - &cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { + if (x_cast_info.func( + &x_cast_info.context, args, &one, + x_strides, x_cast_info.auxdata) < 0) { goto fail; } } else { char *args[2] = {ysrc, dst}; - if (cast_info.func( - &cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { + if (y_cast_info.func( + &y_cast_info.context, args, &one, + y_strides, y_cast_info.auxdata) < 0) { goto fail; } } @@ -3420,7 +3423,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_DECREF(ax); Py_DECREF(ay); Py_DECREF(common_dt); - NPY_cast_info_xfree(&cast_info); + NPY_cast_info_xfree(&x_cast_info); + NPY_cast_info_xfree(&y_cast_info); if (NpyIter_Deallocate(iter) != NPY_SUCCEED) { Py_DECREF(ret); @@ -3434,7 +3438,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_XDECREF(ax); Py_XDECREF(ay); Py_XDECREF(common_dt); - NPY_cast_info_xfree(&cast_info); + NPY_cast_info_xfree(&x_cast_info); + NPY_cast_info_xfree(&y_cast_info); return NULL; } @@ -3503,6 +3508,36 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), if (PyArray_Check(from_obj)) { ret = PyArray_CanCastArrayTo((PyArrayObject *)from_obj, d2, casting); } + else if (PyArray_IsScalar(from_obj, Generic)) { + /* + * TODO: `PyArray_IsScalar` should not be required for new dtypes. + * weak-promotion branch is in practice identical to dtype one. + */ + if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) { + PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str_dtype); + if (descr == NULL) { + goto finish; + } + if (!PyArray_DescrCheck(descr)) { + Py_DECREF(descr); + PyErr_SetString(PyExc_TypeError, + "numpy_scalar.dtype did not return a dtype instance."); + goto finish; + } + ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); + Py_DECREF(descr); + } + else { + /* need to convert to object to consider old value-based logic */ + PyArrayObject *arr; + arr = (PyArrayObject *)PyArray_FROM_O(from_obj); + if (arr == NULL) { + goto finish; + } + ret = PyArray_CanCastArrayTo(arr, d2, casting); + Py_DECREF(arr); + } + } else if (PyArray_IsPythonNumber(from_obj)) { PyErr_SetString(PyExc_TypeError, "can_cast() does not support Python ints, floats, and " @@ -3511,15 +3546,6 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), "explicitly allow them again in the future."); goto finish; } - else if (PyArray_IsScalar(from_obj, Generic)) { - PyArrayObject *arr; - arr = (PyArrayObject *)PyArray_FROM_O(from_obj); - if (arr == NULL) { - goto finish; - } - ret = PyArray_CanCastArrayTo(arr, d2, casting); - Py_DECREF(arr); - } /* Otherwise use CanCastTypeTo */ else { if (!PyArray_DescrConverter2(from_obj, &d1) || d1 == NULL) { @@ -4787,6 +4813,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_preserve = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert_if_no_array = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_cpu = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_err_msg_substr = NULL; static int @@ -4865,6 +4892,10 @@ intern_strings(void) if (npy_ma_str_cpu == NULL) { return -1; } + npy_ma_str_dtype = PyUnicode_InternFromString("dtype"); + if (npy_ma_str_dtype == NULL) { + return -1; + } npy_ma_str_array_err_msg_substr = PyUnicode_InternFromString( "__array__() got an unexpected keyword argument 'copy'"); if (npy_ma_str_array_err_msg_substr == NULL) { diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index ba03d367eeb8..52ca654804d0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -19,6 +19,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_preserve; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/public_dtype_api.c b/numpy/_core/src/multiarray/public_dtype_api.c index 73ab8a6b9f92..9b2d7a393842 100644 --- a/numpy/_core/src/multiarray/public_dtype_api.c +++ b/numpy/_core/src/multiarray/public_dtype_api.c @@ -71,7 +71,9 @@ PyArrayInitDTypeMeta_FromSpec( return -1; } - dtypemeta_initialize_struct_from_spec(DType, spec, 0); + if (dtypemeta_initialize_struct_from_spec(DType, spec, 0) < 0) { + return -1; + } if (NPY_DT_SLOTS(DType)->setitem == NULL || NPY_DT_SLOTS(DType)->getitem == NULL) { @@ -169,10 +171,15 @@ _fill_dtype_api(void *full_api_table[]) api_table[33] = &PyArray_ObjectDType; api_table[34] = &PyArray_VoidDType; /* Abstract */ - api_table[35] = &PyArray_PyIntAbstractDType; - api_table[36] = &PyArray_PyFloatAbstractDType; - api_table[37] = &PyArray_PyComplexAbstractDType; + api_table[35] = &PyArray_PyLongDType; + api_table[36] = &PyArray_PyFloatDType; + api_table[37] = &PyArray_PyComplexDType; api_table[38] = &PyArray_DefaultIntDType; /* Non-legacy DTypes that are built in to NumPy */ api_table[39] = &PyArray_StringDType; + + /* Abstract ones added directly: */ + full_api_table[366] = &PyArray_IntAbstractDType; + full_api_table[367] = &PyArray_FloatAbstractDType; + full_api_table[368] = &PyArray_ComplexAbstractDType; } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 17098af5d3a6..a5185cba60aa 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -1201,7 +1201,9 @@ halftype_@kind@(PyObject *self) if (string == NULL || npy_legacy_print_mode <= 125) { return string; } - return PyUnicode_FromFormat("np.float16(%S)", string); + PyObject *res = PyUnicode_FromFormat("np.float16(%S)", string); + Py_DECREF(string); + return res; #endif } diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index e6819e3212dd..44ae6c92d128 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -79,7 +79,10 @@ string_to_string_resolve_descriptors(PyObject *NPY_UNUSED(self), return NPY_UNSAFE_CASTING; } - *view_offset = 0; + // views are only legal between descriptors that share allocators (e.g. the same object) + if (descr0->allocator == descr1->allocator) { + *view_offset = 0; + }; return NPY_NO_CASTING; } @@ -156,7 +159,7 @@ unicode_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); @@ -392,6 +395,7 @@ string_to_bool(PyArrayMethod_Context *context, char *const data[], npy_string_allocator *allocator = NpyString_acquire_allocator(descr); int has_null = descr->na_object != NULL; int has_string_na = descr->has_string_na; + int has_nan_na = descr->has_nan_na; const npy_static_string *default_string = &descr->default_string; npy_intp N = dimensions[0]; @@ -412,8 +416,13 @@ string_to_bool(PyArrayMethod_Context *context, char *const data[], } else if (is_null) { if (has_null && !has_string_na) { - // numpy treats NaN as truthy, following python - *out = NPY_TRUE; + if (has_nan_na) { + // numpy treats NaN as truthy, following python + *out = NPY_TRUE; + } + else { + *out = NPY_FALSE; + } } else { *out = (npy_bool)(default_string->size == 0); @@ -571,6 +580,9 @@ string_to_pylong(char *in, int has_null, { PyObject *val_obj = non_nullable_string_to_pystring( in, has_null, default_string, allocator); + if (val_obj == NULL) { + return NULL; + } // interpret as an integer in base 10 PyObject *pylong_value = PyLong_FromUnicodeObject(val_obj, 10); Py_DECREF(val_obj); @@ -882,6 +894,7 @@ string_to_pyfloat(char *in, int has_null, goto fail; \ } \ double dval = PyFloat_AS_DOUBLE(pyfloat_value); \ + Py_DECREF(pyfloat_value); \ npy_##typename fval = (double_to_float)(dval); \ \ if (NPY_UNLIKELY(isinf_name(fval) && !(npy_isinf(dval)))) { \ @@ -1669,7 +1682,7 @@ void_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); @@ -1799,7 +1812,7 @@ bytes_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 807184c3c26a..fc5212e85987 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -185,6 +185,30 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) return na_eq_cmp(sna, ona); } +// Currently this can only return 0 or -1, the latter indicating that the +// error indicator is set. Pass in out_na if you want to figure out which +// na is valid. +NPY_NO_EXPORT int +stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na) { + if ((na1 != NULL) && (na2 != NULL)) { + int na_eq = na_eq_cmp(na1, na2); + + if (na_eq < 0) { + return -1; + } + else if (na_eq == 0) { + PyErr_Format(PyExc_TypeError, + "Cannot find a compatible null string value for " + "null strings '%R' and '%R'", na1, na2); + return -1; + } + } + if (out_na != NULL) { + *out_na = na1 ? na1 : na2; + } + return 0; +} + /* * This is used to determine the correct dtype to return when dealing * with a mix of different dtypes (for example when creating an array @@ -193,18 +217,18 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) static PyArray_StringDTypeObject * common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dtype2) { - int eq = _eq_comparison(dtype1->coerce, dtype2->coerce, dtype1->na_object, - dtype2->na_object); + PyObject *out_na_object = NULL; - if (eq <= 0) { - PyErr_SetString( - PyExc_ValueError, - "Cannot find common instance for unequal dtype instances"); + if (stringdtype_compatible_na( + dtype1->na_object, dtype2->na_object, &out_na_object) == -1) { + PyErr_Format(PyExc_TypeError, + "Cannot find common instance for incompatible dtypes " + "'%R' and '%R'", (PyObject *)dtype1, (PyObject *)dtype2); return NULL; } return (PyArray_StringDTypeObject *)new_stringdtype_instance( - dtype1->na_object, dtype1->coerce); + out_na_object, dtype1->coerce && dtype1->coerce); } /* @@ -280,30 +304,22 @@ stringdtype_setitem(PyArray_StringDTypeObject *descr, PyObject *obj, char **data { npy_packed_static_string *sdata = (npy_packed_static_string *)dataptr; - int is_cmp = 0; - // borrow reference PyObject *na_object = descr->na_object; - // Note there are two different na_object != NULL checks here. - // - // Do not refactor this! - // // We need the result of the comparison after acquiring the allocator, but // cannot use functions requiring the GIL when the allocator is acquired, // so we do the comparison before acquiring the allocator. - if (na_object != NULL) { - is_cmp = na_eq_cmp(obj, na_object); - if (is_cmp == -1) { - return -1; - } + int na_cmp = na_eq_cmp(obj, na_object); + if (na_cmp == -1) { + return -1; } npy_string_allocator *allocator = NpyString_acquire_allocator(descr); if (na_object != NULL) { - if (is_cmp) { + if (na_cmp) { if (NpyString_pack_null(allocator, sdata) < 0) { PyErr_SetString(PyExc_MemoryError, "Failed to pack null string during StringDType " @@ -400,8 +416,23 @@ stringdtype_getitem(PyArray_StringDTypeObject *descr, char **dataptr) // PyArray_NonzeroFunc // Unicode strings are nonzero if their length is nonzero. npy_bool -nonzero(void *data, void *NPY_UNUSED(arr)) +nonzero(void *data, void *arr) { + PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)PyArray_DESCR(arr); + int has_null = descr->na_object != NULL; + int has_nan_na = descr->has_nan_na; + int has_string_na = descr->has_string_na; + if (has_null && NpyString_isnull((npy_packed_static_string *)data)) { + if (!has_string_na) { + if (has_nan_na) { + // numpy treats NaN as truthy, following python + return 1; + } + else { + return 0; + } + } + } return NpyString_size((npy_packed_static_string *)data) != 0; } @@ -517,7 +548,7 @@ stringdtype_ensure_canonical(PyArray_StringDTypeObject *self) static int stringdtype_clear_loop(void *NPY_UNUSED(traverse_context), - PyArray_Descr *descr, char *data, npy_intp size, + const PyArray_Descr *descr, char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)descr; @@ -554,88 +585,36 @@ stringdtype_get_clear_loop(void *NPY_UNUSED(traverse_context), } static int -stringdtype_is_known_scalar_type(PyArray_DTypeMeta *NPY_UNUSED(cls), +stringdtype_is_known_scalar_type(PyArray_DTypeMeta *cls, PyTypeObject *pytype) { - if (pytype == &PyFloat_Type) { - return 1; - } - if (pytype == &PyLong_Type) { - return 1; - } - if (pytype == &PyBool_Type) { - return 1; - } - if (pytype == &PyComplex_Type) { - return 1; - } - if (pytype == &PyUnicode_Type) { - return 1; - } - if (pytype == &PyBytes_Type) { - return 1; - } - if (pytype == &PyBoolArrType_Type) { - return 1; - } - if (pytype == &PyByteArrType_Type) { - return 1; - } - if (pytype == &PyShortArrType_Type) { - return 1; - } - if (pytype == &PyIntArrType_Type) { - return 1; - } - if (pytype == &PyLongArrType_Type) { - return 1; - } - if (pytype == &PyLongLongArrType_Type) { - return 1; - } - if (pytype == &PyUByteArrType_Type) { - return 1; - } - if (pytype == &PyUShortArrType_Type) { - return 1; - } - if (pytype == &PyUIntArrType_Type) { - return 1; - } - if (pytype == &PyULongArrType_Type) { - return 1; - } - if (pytype == &PyULongLongArrType_Type) { - return 1; - } - if (pytype == &PyHalfArrType_Type) { - return 1; - } - if (pytype == &PyFloatArrType_Type) { - return 1; - } - if (pytype == &PyDoubleArrType_Type) { - return 1; - } - if (pytype == &PyLongDoubleArrType_Type) { - return 1; - } - if (pytype == &PyCFloatArrType_Type) { - return 1; - } - if (pytype == &PyCDoubleArrType_Type) { - return 1; - } - if (pytype == &PyCLongDoubleArrType_Type) { - return 1; - } - if (pytype == &PyIntpArrType_Type) { - return 1; - } - if (pytype == &PyUIntpArrType_Type) { + if (python_builtins_are_known_scalar_types(cls, pytype)) { return 1; } - if (pytype == &PyDatetimeArrType_Type) { + // accept every built-in numpy dtype + else if (pytype == &PyBoolArrType_Type || + pytype == &PyByteArrType_Type || + pytype == &PyShortArrType_Type || + pytype == &PyIntArrType_Type || + pytype == &PyLongArrType_Type || + pytype == &PyLongLongArrType_Type || + pytype == &PyUByteArrType_Type || + pytype == &PyUShortArrType_Type || + pytype == &PyUIntArrType_Type || + pytype == &PyULongArrType_Type || + pytype == &PyULongLongArrType_Type || + pytype == &PyHalfArrType_Type || + pytype == &PyFloatArrType_Type || + pytype == &PyDoubleArrType_Type || + pytype == &PyLongDoubleArrType_Type || + pytype == &PyCFloatArrType_Type || + pytype == &PyCDoubleArrType_Type || + pytype == &PyCLongDoubleArrType_Type || + pytype == &PyIntpArrType_Type || + pytype == &PyUIntpArrType_Type || + pytype == &PyDatetimeArrType_Type || + pytype == &PyTimedeltaArrType_Type) + { return 1; } return 0; @@ -922,8 +901,7 @@ load_new_string(npy_packed_static_string *out, npy_static_string *out_ss, "Failed to allocate string in %s", err_context); return -1; } - int is_null = NpyString_load(allocator, out_pss, out_ss); - if (is_null == -1) { + if (NpyString_load(allocator, out_pss, out_ss) == -1) { npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", err_context); return -1; diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.h b/numpy/_core/src/multiarray/stringdtype/dtype.h index 278513fe8f12..2c2719602c32 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.h +++ b/numpy/_core/src/multiarray/stringdtype/dtype.h @@ -49,6 +49,9 @@ stringdtype_finalize_descr(PyArray_Descr *dtype); NPY_NO_EXPORT int _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona); +NPY_NO_EXPORT int +stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 85f499c3c3ae..c9b5620211dc 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -286,7 +286,7 @@ NpyString_free_allocator(npy_string_allocator *allocator) * allocator mutex is held, as doing so may cause deadlocks. */ NPY_NO_EXPORT npy_string_allocator * -NpyString_acquire_allocator(PyArray_StringDTypeObject *descr) +NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) { if (!PyThread_acquire_lock(descr->allocator->allocator_lock, NOWAIT_LOCK)) { PyThread_acquire_lock(descr->allocator->allocator_lock, WAIT_LOCK); @@ -318,7 +318,7 @@ NpyString_acquire_allocator(PyArray_StringDTypeObject *descr) */ NPY_NO_EXPORT void NpyString_acquire_allocators(size_t n_descriptors, - PyArray_Descr *descrs[], + PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) { for (size_t i=0; itype_num = typenum; /* update prototype to notice duplicate registration */ descr_proto->type_num = typenum; - if (dtypemeta_wrap_legacy_descriptor(descr, descr_proto->f, name, NULL) < 0) { + if (dtypemeta_wrap_legacy_descriptor( + descr, descr_proto->f, &PyArrayDescr_Type, name, NULL) < 0) { descr->type_num = -1; NPY_NUMUSERTYPES--; /* Override the type, it might be wrong and then decref crashes */ diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 1c06eb5755c7..194a81e2d7e9 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -2,31 +2,23 @@ #define VQSORT_ONLY_STATIC 1 #include "hwy/contrib/sort/vqsort-inl.h" +#if VQSORT_ENABLED + +#define DISPATCH_VQSORT(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, intptr_t size) \ +{ \ + hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); \ +} \ + namespace np { namespace highway { namespace qsort_simd { -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(float *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(double *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} + DISPATCH_VQSORT(int32_t) + DISPATCH_VQSORT(uint32_t) + DISPATCH_VQSORT(int64_t) + DISPATCH_VQSORT(uint64_t) + DISPATCH_VQSORT(double) + DISPATCH_VQSORT(float) } } } // np::highway::qsort_simd + +#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index e08fb3629ec8..ba3fe4920594 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -1,8 +1,22 @@ #ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#include "hwy/highway.h" + #include "common.hpp" +// This replicates VQSORT_ENABLED from hwy/contrib/sort/shared-inl.h +// without checking the scalar target as this is not built within the dynamic +// dispatched sources. +#if (HWY_COMPILER_MSVC && !HWY_IS_DEBUG_BUILD) || \ + (HWY_ARCH_ARM_V7 && HWY_IS_DEBUG_BUILD) || \ + (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) || \ + (HWY_ARCH_ARM_A64 && HWY_COMPILER_CLANG && \ + (HWY_IS_HWASAN || HWY_IS_MSAN || HWY_IS_TSAN || HWY_IS_ASAN)) +#define NPY_DISABLE_HIGHWAY_SORT +#endif + +#ifndef NPY_DISABLE_HIGHWAY_SORT namespace np { namespace highway { namespace qsort_simd { #ifndef NPY_DISABLE_OPTIMIZATION @@ -21,3 +35,4 @@ NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp n } } } // np::highway::qsort_simd #endif // NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#endif // NPY_DISABLE_HIGHWAY_SORT diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index 35b6cc58c7e8..d069cb6373d0 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -4,6 +4,8 @@ #include "quicksort.hpp" +#if VQSORT_ENABLED + namespace np { namespace highway { namespace qsort_simd { template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, intptr_t size) @@ -24,3 +26,5 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, intptr_t size) } } } } // np::highway::qsort_simd + +#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index 15e5668f599d..aca748056f39 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -84,7 +84,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else + #elif !defined(NPY_DISABLE_HIGHWAY_SORT) #include "highway_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif @@ -95,7 +95,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else + #elif !defined(NPY_DISABLE_HIGHWAY_SORT) #include "highway_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 868696d22ad8..9a1b616d5cd4 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 868696d22ad84c5cd46bf9c2a4dac65e60a9213a +Subproject commit 9a1b616d5cd4eaf49f7664fb86ccc1d18bad2b8d diff --git a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp index 3083d6a8bf23..04bb03532719 100644 --- a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp @@ -1,87 +1,26 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SKX) -#include "x86-simd-sort/src/avx512-64bit-argsort.hpp" -#elif defined(NPY_HAVE_AVX2) -#include "x86-simd-sort/src/avx2-32bit-half.hpp" -#include "x86-simd-sort/src/avx2-32bit-qsort.hpp" -#include "x86-simd-sort/src/avx2-64bit-qsort.hpp" -#include "x86-simd-sort/src/xss-common-argsort.h" -#endif +#include "x86-simd-sort/src/x86simdsort-static-incl.h" -namespace { -template -void x86_argsort(T* arr, size_t* arg, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_argsort(arr, arg, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_argsort(arr, arg, num, true); -#endif -} - -template -void x86_argselect(T* arr, size_t* arg, npy_intp kth, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_argselect(arr, arg, kth, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_argselect(arr, arg, kth, num, true); -#endif -} -} // anonymous +#define DISPATCH_ARG_METHODS(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(TYPE* arr, npy_intp* arg, npy_intp num, npy_intp kth) \ +{ \ + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num, true); \ +} \ +template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(TYPE* arr, npy_intp *arg, npy_intp size) \ +{ \ + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size, true); \ +} \ namespace np { namespace qsort_simd { -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(float *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(double *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int32_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint32_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int64_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint64_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(float *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(double *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} + DISPATCH_ARG_METHODS(uint32_t) + DISPATCH_ARG_METHODS(int32_t) + DISPATCH_ARG_METHODS(float) + DISPATCH_ARG_METHODS(uint64_t) + DISPATCH_ARG_METHODS(int64_t) + DISPATCH_ARG_METHODS(double) }} // namespace np::simd diff --git a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp index ea4516408c56..c4505f058857 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp @@ -1,89 +1,25 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SKX) - #include "x86-simd-sort/src/avx512-32bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-64bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-64bit-argsort.hpp" -#elif defined(NPY_HAVE_AVX2) - #include "x86-simd-sort/src/avx2-32bit-qsort.hpp" - #include "x86-simd-sort/src/avx2-64bit-qsort.hpp" -#endif +#include "x86-simd-sort/src/x86simdsort-static-incl.h" -namespace { -template -void x86_qsort(T* arr, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_qsort(arr, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_qsort(arr, num, true); -#endif -} - -template -void x86_qselect(T* arr, npy_intp num, npy_intp kth) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_qselect(arr, kth, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_qselect(arr, kth, num, true); -#endif -} -} // anonymous +#define DISPATCH_SORT_METHODS(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(TYPE *arr, npy_intp num, npy_intp kth) \ +{ \ + x86simdsortStatic::qselect(arr, kth, num, true); \ +} \ +template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, npy_intp num) \ +{ \ + x86simdsortStatic::qsort(arr, num, true); \ +} \ namespace np { namespace qsort_simd { -#if defined(NPY_HAVE_AVX512_SKX) || defined(NPY_HAVE_AVX2) -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int32_t *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint32_t *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int64_t*arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint64_t*arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(float *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(double *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(float *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(double *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -#endif // NPY_HAVE_AVX512_SKX || NPY_HAVE_AVX2 - + DISPATCH_SORT_METHODS(uint32_t) + DISPATCH_SORT_METHODS(int32_t) + DISPATCH_SORT_METHODS(float) + DISPATCH_SORT_METHODS(uint64_t) + DISPATCH_SORT_METHODS(int64_t) + DISPATCH_SORT_METHODS(double) }} // namespace np::qsort_simd #endif // __CYGWIN__ diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 8222fc77cae3..063e713c5256 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -1,11 +1,13 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SPR) - #include "x86-simd-sort/src/avx512fp16-16bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-16bit-qsort.hpp" -#elif defined(NPY_HAVE_AVX512_ICL) - #include "x86-simd-sort/src/avx512-16bit-qsort.hpp" +#include "x86-simd-sort/src/x86simdsort-static-incl.h" +/* + * MSVC doesn't set the macro __AVX512VBMI2__ which is required for the 16-bit + * functions and therefore we need to manually include this file here + */ +#ifdef _MSC_VER +#include "x86-simd-sort/src/avx512-16bit-qsort.hpp" #endif namespace np { namespace qsort_simd { @@ -13,24 +15,23 @@ namespace np { namespace qsort_simd { /* * QSelect dispatch functions: */ -#if defined(NPY_HAVE_AVX512_ICL) || defined(NPY_HAVE_AVX512_SPR) template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_intp kth) { #if defined(NPY_HAVE_AVX512_SPR) - avx512_qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); + x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); #else - avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true); + avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true, false); #endif } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint16_t *arr, npy_intp num, npy_intp kth) { - avx512_qselect(arr, kth, num); + x86simdsortStatic::qselect(arr, kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_intp kth) { - avx512_qselect(arr, kth, num); + x86simdsortStatic::qselect(arr, kth, num); } /* @@ -39,20 +40,19 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { #if defined(NPY_HAVE_AVX512_SPR) - avx512_qsort(reinterpret_cast<_Float16*>(arr), size, true); + x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); #else - avx512_qsort_fp16(reinterpret_cast(arr), size, true); + avx512_qsort_fp16(reinterpret_cast(arr), size, true, false); #endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) { - avx512_qsort(arr, size); + x86simdsortStatic::qsort(arr, size); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, npy_intp size) { - avx512_qsort(arr, size); + x86simdsortStatic::qsort(arr, size); } -#endif // NPY_HAVE_AVX512_ICL || SPR }} // namespace np::qsort_simd diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 2c7d231b3695..02278806751f 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -653,8 +653,8 @@ add_sfloats_resolve_descriptors( */ static int translate_given_descrs_to_double( - int nin, int nout, PyArray_DTypeMeta *wrapped_dtypes[], - PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]) + int nin, int nout, PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]) { assert(nin == 2 && nout == 1); for (int i = 0; i < 3; i++) { @@ -671,8 +671,8 @@ translate_given_descrs_to_double( static int translate_loop_descrs( - int nin, int nout, PyArray_DTypeMeta *new_dtypes[], - PyArray_Descr *given_descrs[], + int nin, int nout, PyArray_DTypeMeta *const new_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *NPY_UNUSED(original_descrs[]), PyArray_Descr *loop_descrs[]) { diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 26cc66c3a898..aecd2613a9c7 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -46,6 +46,7 @@ #include "numpy/npy_3kcompat.h" #include "common.h" +#include "arrayobject.h" #include "dispatching.h" #include "dtypemeta.h" #include "npy_hashtable.h" @@ -63,7 +64,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion); + npy_bool legacy_promotion_is_possible); /** @@ -274,21 +275,20 @@ resolve_implementation_info(PyUFuncObject *ufunc, /* Unspecified out always matches (see below for inputs) */ continue; } + assert(i == 0); /* - * This is a reduce-like operation, which always have the form - * `(res_DType, op_DType, res_DType)`. If the first and last - * dtype of the loops match, this should be reduce-compatible. + * This is a reduce-like operation, we enforce that these + * register with None as the first DType. If a reduction + * uses the same DType, we will do that promotion. + * A `(res_DType, op_DType, res_DType)` pattern can make sense + * in other context as well and could be confusing. */ - if (PyTuple_GET_ITEM(curr_dtypes, 0) - == PyTuple_GET_ITEM(curr_dtypes, 2)) { + if (PyTuple_GET_ITEM(curr_dtypes, 0) == Py_None) { continue; } - /* - * This should be a reduce, but doesn't follow the reduce - * pattern. So (for now?) consider this not a match. - */ + /* Otherwise, this is not considered a match */ matches = NPY_FALSE; - continue; + break; } if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) { @@ -488,7 +488,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, * those defined by the `signature` unmodified). */ static PyObject * -call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter, +call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], PyArrayObject *const operands[]) { @@ -498,37 +498,51 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter, int promoter_result; PyArray_DTypeMeta *new_op_dtypes[NPY_MAXARGS]; - if (PyCapsule_CheckExact(promoter)) { - /* We could also go the other way and wrap up the python function... */ - PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( - promoter, "numpy._ufunc_promoter"); - if (promoter_function == NULL) { + if (info != NULL) { + PyObject *promoter = PyTuple_GET_ITEM(info, 1); + if (PyCapsule_CheckExact(promoter)) { + /* We could also go the other way and wrap up the python function... */ + PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( + promoter, "numpy._ufunc_promoter"); + if (promoter_function == NULL) { + return NULL; + } + promoter_result = promoter_function((PyObject *)ufunc, + op_dtypes, signature, new_op_dtypes); + } + else { + PyErr_SetString(PyExc_NotImplementedError, + "Calling python functions for promotion is not implemented."); return NULL; } - promoter_result = promoter_function((PyObject *)ufunc, - op_dtypes, signature, new_op_dtypes); - } - else { - PyErr_SetString(PyExc_NotImplementedError, - "Calling python functions for promotion is not implemented."); - return NULL; - } - if (promoter_result < 0) { - return NULL; - } - /* - * If none of the dtypes changes, we would recurse infinitely, abort. - * (Of course it is nevertheless possible to recurse infinitely.) - */ - int dtypes_changed = 0; - for (int i = 0; i < nargs; i++) { - if (new_op_dtypes[i] != op_dtypes[i]) { - dtypes_changed = 1; - break; + if (promoter_result < 0) { + return NULL; + } + /* + * If none of the dtypes changes, we would recurse infinitely, abort. + * (Of course it is nevertheless possible to recurse infinitely.) + * + * TODO: We could allow users to signal this directly and also move + * the call to be (almost immediate). That would call it + * unnecessarily sometimes, but may allow additional flexibility. + */ + int dtypes_changed = 0; + for (int i = 0; i < nargs; i++) { + if (new_op_dtypes[i] != op_dtypes[i]) { + dtypes_changed = 1; + break; + } + } + if (!dtypes_changed) { + goto finish; } } - if (!dtypes_changed) { - goto finish; + else { + /* Reduction special path */ + new_op_dtypes[0] = NPY_DT_NewRef(op_dtypes[1]); + new_op_dtypes[1] = NPY_DT_NewRef(op_dtypes[1]); + Py_XINCREF(op_dtypes[2]); + new_op_dtypes[2] = op_dtypes[2]; } /* @@ -576,8 +590,7 @@ _make_new_typetup( none_count++; } else { - if (!NPY_DT_is_legacy(signature[i]) - || NPY_DT_is_abstract(signature[i])) { + if (!NPY_DT_is_legacy(signature[i])) { /* * The legacy type resolution can't deal with these. * This path will return `None` or so in the future to @@ -745,7 +758,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion) + npy_bool legacy_promotion_is_possible) { /* * Fetch the dispatching info which consists of the implementation and @@ -788,13 +801,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, /* * At this point `info` is NULL if there is no matching loop, or it is - * a promoter that needs to be used/called: + * a promoter that needs to be used/called. + * TODO: It may be nice to find a better reduce-solution, but this way + * it is a True fallback (not registered so lowest priority) */ - if (info != NULL) { - PyObject *promoter = PyTuple_GET_ITEM(info, 1); - + if (info != NULL || op_dtypes[0] == NULL) { info = call_promoter_and_recurse(ufunc, - promoter, op_dtypes, signature, ops); + info, op_dtypes, signature, ops); if (info == NULL && PyErr_Occurred()) { return NULL; } @@ -814,7 +827,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * However, we need to give the legacy implementation a chance here. * (it will modify `op_dtypes`). */ - if (!allow_legacy_promotion || ufunc->type_resolver == NULL || + if (!legacy_promotion_is_possible || ufunc->type_resolver == NULL || (ufunc->ntypes == 0 && ufunc->userloops == NULL)) { /* Already tried or not a "legacy" ufunc (no loop found, return) */ return NULL; @@ -921,11 +934,11 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promoting_pyscalars, npy_bool ensure_reduce_compatible) { int nin = ufunc->nin, nargs = ufunc->nargs; + npy_bool legacy_promotion_is_possible = NPY_TRUE; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -950,10 +963,21 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, */ Py_CLEAR(op_dtypes[i]); } + /* + * If the op_dtype ends up being a non-legacy one, then we cannot use + * legacy promotion (unless this is a python scalar). + */ + if (op_dtypes[i] != NULL && !NPY_DT_is_legacy(op_dtypes[i]) && ( + signature[i] != NULL || // signature cannot be a pyscalar + !(PyArray_FLAGS(ops[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL))) { + legacy_promotion_is_possible = NPY_FALSE; + } } - if (force_legacy_promotion - && npy_promotion_state == NPY_USE_LEGACY_PROMOTION + int current_promotion_state = npy_promotion_state; + + if (force_legacy_promotion && legacy_promotion_is_possible + && current_promotion_state == NPY_USE_LEGACY_PROMOTION && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { /* * We must use legacy promotion for value-based logic. Call the old @@ -968,11 +992,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } /* Pause warnings and always use "new" path */ - int old_promotion_state = npy_promotion_state; npy_promotion_state = NPY_USE_WEAK_PROMOTION; PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, allow_legacy_promotion); - npy_promotion_state = old_promotion_state; + ops, signature, op_dtypes, legacy_promotion_is_possible); + npy_promotion_state = current_promotion_state; if (info == NULL) { goto handle_error; @@ -1017,7 +1040,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, Py_INCREF(signature[0]); return promote_and_get_ufuncimpl(ufunc, ops, signature, op_dtypes, - force_legacy_promotion, allow_legacy_promotion, + force_legacy_promotion, promoting_pyscalars, NPY_FALSE); } diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index c711a66688c6..9bb5fbd9b013 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -22,7 +22,6 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promote_pyscalars, npy_bool ensure_reduce_compatible); diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index b8c1926b2f7e..ab830d52e9ab 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -323,34 +323,6 @@ abs_ptrdiff(char *a, char *b) ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \ ((abs_ptrdiff(args[1], args[0]) == 0)))) -/* - * Avoid using SIMD for very large step sizes for several reasons: - * 1) Supporting large step sizes requires use of i64gather/scatter_ps instructions, - * in which case we need two i64gather instructions and an additional vinsertf32x8 - * instruction to load a single zmm register (since one i64gather instruction - * loads into a ymm register). This is not ideal for performance. - * 2) Gather and scatter instructions can be slow when the loads/stores - * cross page boundaries. - * - * We instead rely on i32gather/scatter_ps instructions which use a 32-bit index - * element. The index needs to be < INT_MAX to avoid overflow. MAX_STEP_SIZE - * ensures this. The condition also requires that the input and output arrays - * should have no overlap in memory. - */ -#define IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[2], steps[2] * dimensions[0]))) - -#define IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[1], steps[1] * dimensions[0]))) - /* * 1) Output should be contiguous, can handle strided input data * 2) Input step should be smaller than MAX_STEP_SIZE for performance @@ -359,7 +331,7 @@ abs_ptrdiff(char *a, char *b) #define IS_OUTPUT_BLOCKABLE_UNARY(esizein, esizeout, vsize) \ ((steps[0] & (esizein-1)) == 0 && \ steps[1] == (esizeout) && llabs(steps[0]) < MAX_STEP_SIZE && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[0], steps[0] * dimensions[0]))) + (nomemoverlap(args[1], steps[1], args[0], steps[0], dimensions[0]))) #define IS_BLOCKABLE_REDUCE(esize, vsize) \ (steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \ diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 6e90d55225b5..681cbadadb07 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -104,8 +104,8 @@ generic_wrapped_legacy_loop(PyArrayMethod_Context *NPY_UNUSED(context), */ NPY_NO_EXPORT NPY_CASTING wrapped_legacy_resolve_descriptors(PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *NPY_UNUSED(given_descrs[]), + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const NPY_UNUSED(given_descrs[]), PyArray_Descr *NPY_UNUSED(loop_descrs[]), npy_intp *NPY_UNUSED(view_offset)) { @@ -123,8 +123,8 @@ wrapped_legacy_resolve_descriptors(PyArrayMethodObject *NPY_UNUSED(self), static NPY_CASTING simple_legacy_resolve_descriptors( PyArrayMethodObject *method, - PyArray_DTypeMeta **dtypes, - PyArray_Descr **given_descrs, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *given_descrs, PyArray_Descr **output_descrs, npy_intp *NPY_UNUSED(view_offset)) { @@ -246,7 +246,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, static int copy_cached_initial( PyArrayMethod_Context *context, npy_bool NPY_UNUSED(reduction_is_empty), - char *initial) + void *initial) { memcpy(initial, context->method->legacy_initial, context->descriptors[0]->elsize); @@ -266,7 +266,7 @@ copy_cached_initial( static int get_initial_from_ufunc( PyArrayMethod_Context *context, npy_bool reduction_is_empty, - char *initial) + void *initial) { if (context->caller == NULL || !PyObject_TypeCheck(context->caller, &PyUFunc_Type)) { diff --git a/numpy/_core/src/umath/legacy_array_method.h b/numpy/_core/src/umath/legacy_array_method.h index 750de06c7992..82eeb04a0a15 100644 --- a/numpy/_core/src/umath/legacy_array_method.h +++ b/numpy/_core/src/umath/legacy_array_method.h @@ -28,7 +28,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, NPY_NO_EXPORT NPY_CASTING wrapped_legacy_resolve_descriptors(PyArrayMethodObject *, - PyArray_DTypeMeta **, PyArray_Descr **, PyArray_Descr **, npy_intp *); + PyArray_DTypeMeta *const *, PyArray_Descr *const *, PyArray_Descr **, npy_intp *); #ifdef __cplusplus } diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index 159e275bd45e..b548fdf980d9 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1350,12 +1350,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) * #TYPE = FLOAT, DOUBLE# * #c = f, # * #C = F, # + * #suffix = f32, f64# */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_frexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[0], steps[0], args[1], steps[1], dimensions[0]))) { AVX512_SKX_frexp_@TYPE@(args, dimensions, steps); return; } @@ -1370,7 +1375,11 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_ldexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[1], steps[1], args[2], steps[2], dimensions[0]))) { AVX512_SKX_ldexp_@TYPE@(args, dimensions, steps); return; } diff --git a/numpy/_core/src/umath/loops_utils.h.src b/numpy/_core/src/umath/loops_utils.h.src index 5640a1f0b646..828d16ee635c 100644 --- a/numpy/_core/src/umath/loops_utils.h.src +++ b/numpy/_core/src/umath/loops_utils.h.src @@ -16,28 +16,31 @@ #endif /* * nomemoverlap - returns false if two strided arrays have an overlapping - * region in memory. ip_size/op_size = size of the arrays which can be negative - * indicating negative steps. + * region in memory. */ NPY_FINLINE npy_bool -nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) +nomemoverlap(char *ip, npy_intp ip_step, char *op, npy_intp op_step, npy_intp len) { + // Calculate inclusive ranges for offsets of items in arrays. + // The end pointer points to address of the last item. + const npy_intp ip_offset = ip_step * (len - 1); + const npy_intp op_offset = op_step * (len - 1); char *ip_start, *ip_end, *op_start, *op_end; - if (ip_size < 0) { - ip_start = ip + ip_size; + if (ip_step < 0) { + ip_start = ip + ip_offset; ip_end = ip; } else { ip_start = ip; - ip_end = ip + ip_size; + ip_end = ip + ip_offset; } - if (op_size < 0) { - op_start = op + op_size; + if (op_step < 0) { + op_start = op + op_offset; op_end = op; } else { op_start = op; - op_end = op + op_size; + op_end = op + op_offset; } return (ip_start == op_start && op_end == ip_end) || (ip_start > op_end) || (op_start > ip_end); @@ -48,7 +51,7 @@ nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) NPY_FINLINE npy_bool is_mem_overlap(const void *src, npy_intp src_step, const void *dst, npy_intp dst_step, npy_intp len) { - return !(nomemoverlap((char*)src, src_step*len, (char*)dst, dst_step*len)); + return !(nomemoverlap((char*)src, src_step, (char*)dst, dst_step, len)); } /* diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 5a938eaedb85..46466418e417 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -21,6 +21,7 @@ #include "array_coercion.h" #include "array_method.h" #include "ctors.h" +#include "refcount.h" #include "numpy/ufuncobject.h" #include "lowlevel_strided_loops.h" @@ -438,7 +439,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, Py_INCREF(result); if (initial_buf != NULL && PyDataType_REFCHK(PyArray_DESCR(result))) { - PyArray_Item_XDECREF(initial_buf, PyArray_DESCR(result)); + PyArray_ClearBuffer(PyArray_DESCR(result), initial_buf, 0, 1, 1); } PyMem_FREE(initial_buf); NPY_AUXDATA_FREE(auxdata); @@ -450,7 +451,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, fail: if (initial_buf != NULL && PyDataType_REFCHK(op_dtypes[0])) { - PyArray_Item_XDECREF(initial_buf, op_dtypes[0]); + PyArray_ClearBuffer(op_dtypes[0], initial_buf, 0, 1, 1); } PyMem_FREE(initial_buf); NPY_AUXDATA_FREE(auxdata); diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 9e0c9481960b..05026be96e67 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -177,7 +177,7 @@ resolve_descriptors_with_scalars( { int value_range = 0; - npy_bool first_is_pyint = dtypes[0] == &PyArray_PyIntAbstractDType; + npy_bool first_is_pyint = dtypes[0] == &PyArray_PyLongDType; int arr_idx = first_is_pyint? 1 : 0; int scalar_idx = first_is_pyint? 0 : 1; PyObject *scalar = input_scalars[scalar_idx]; @@ -327,7 +327,7 @@ template static int add_dtype_loops(PyObject *umath, PyArrayMethod_Spec *spec, PyObject *info) { - PyArray_DTypeMeta *PyInt = &PyArray_PyIntAbstractDType; + PyArray_DTypeMeta *PyInt = &PyArray_PyLongDType; PyObject *name = PyUnicode_FromString(comp_name(comp)); if (name == nullptr) { @@ -441,7 +441,7 @@ init_special_int_comparisons(PyObject *umath) * `np.equal(2, 4)` (with two python integers) use an object loop. */ PyObject *dtype_tuple = PyTuple_Pack(3, - &PyArray_PyIntAbstractDType, &PyArray_PyIntAbstractDType, Bool); + &PyArray_PyLongDType, &PyArray_PyLongDType, Bool); if (dtype_tuple == NULL) { goto finish; } diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 02c2c82c4ac1..ce6377590d15 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -1149,49 +1149,54 @@ string_lrstrip_whitespace(Buffer buf, Buffer out, STRIPTYPE striptype) return 0; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf.after - buf.buf); Buffer traverse_buf = Buffer(buf.buf, num_bytes); if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len) { + while (new_start < len) { if (!traverse_buf.first_character_isspace()) { break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; + traverse_buf++; // may go one beyond buffer } } - npy_intp j = len - 1; // Could also turn negative if we're stripping the whole string + size_t new_stop = len; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf.after, 0) - 1; } else { - traverse_buf = buf + j; + traverse_buf = buf + (new_stop - 1); } if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { + while (new_stop > new_start) { if (*traverse_buf != 0 && !traverse_buf.first_character_isspace()) { break; } + num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf--; - j--; + new_stop--; + + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { + traverse_buf--; + } } } - Buffer offset_buf = buf + i; + Buffer offset_buf = buf + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } @@ -1218,13 +1223,13 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT return len1; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf1.after - buf1.buf); Buffer traverse_buf = Buffer(buf1.buf, num_bytes); if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len1) { + for (; new_start < len1; traverse_buf++) { Py_ssize_t res; switch (enc) { case ENCODING::ASCII: @@ -1245,21 +1250,20 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; } } - npy_intp j = len1 - 1; + size_t new_stop = len1; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf1.after, 0) - 1; } else { - traverse_buf = buf1 + j; + traverse_buf = buf1 + (new_stop - 1); } if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { + while (new_stop > new_start) { Py_ssize_t res; switch (enc) { case ENCODING::ASCII: @@ -1280,21 +1284,22 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT break; } num_bytes -= traverse_buf.num_bytes_next_character(); - j--; - if (j > 0) { + new_stop--; + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { traverse_buf--; } } } - Buffer offset_buf = buf1 + i; + Buffer offset_buf = buf1 + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } template @@ -1462,7 +1467,7 @@ string_expandtabs_length(Buffer buf, npy_int64 tabsize) line_pos = 0; } } - if (new_len == PY_SSIZE_T_MAX || new_len < 0) { + if (new_len > INT_MAX || new_len < 0) { npy_gil_error(PyExc_OverflowError, "new string is too long"); return -1; } diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 33563b7007c2..61abdcb5ad19 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -60,13 +60,13 @@ struct CheckedIndexer { char_type *buffer; size_t length; - CheckedIndexer() + CheckedIndexer() { buffer = NULL; length = 0; } - CheckedIndexer(char_type *buf, size_t len) + CheckedIndexer(char_type *buf, size_t len) { buffer = buf; length = len; diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index a5686c884fc3..34c356a4eedd 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -512,8 +512,8 @@ string_expandtabs_loop(PyArrayMethod_Context *context, static NPY_CASTING string_addition_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -540,8 +540,8 @@ string_addition_resolve_descriptors( static NPY_CASTING string_multiply_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -574,8 +574,8 @@ string_multiply_resolve_descriptors( static NPY_CASTING string_strip_whitespace_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -594,8 +594,8 @@ string_strip_whitespace_resolve_descriptors( static NPY_CASTING string_strip_chars_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -618,7 +618,7 @@ string_strip_chars_resolve_descriptors( static int string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -634,7 +634,7 @@ string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), static int string_replace_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -657,8 +657,8 @@ string_replace_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_replace_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[5]), - PyArray_Descr *given_descrs[5], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], PyArray_Descr *loop_descrs[5], npy_intp *NPY_UNUSED(view_offset)) { @@ -694,7 +694,7 @@ string_replace_resolve_descriptors( static int string_startswith_endswith_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -710,7 +710,7 @@ string_startswith_endswith_promoter(PyObject *NPY_UNUSED(ufunc), static int string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -723,7 +723,7 @@ string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), static int string_expandtabs_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -738,8 +738,8 @@ string_expandtabs_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_expandtabs_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 340079a197d8..3f46cdce7cef 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -43,7 +43,7 @@ static NPY_CASTING multiply_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *dtypes[], PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const dtypes[], PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_Descr *ldescr = given_descrs[0]; @@ -239,27 +239,18 @@ static int multiply_left_strided_loop( static NPY_CASTING binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + int out_coerce = descr1->coerce && descr1->coerce; + PyObject *out_na_object = NULL; - // _eq_comparison has a short-circuit pointer comparison fast path, - // so no need to check here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } @@ -272,8 +263,7 @@ binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[2] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[1])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[1])->coerce); + out_na_object, out_coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -429,7 +419,7 @@ minimum_maximum_strided_loop(PyArrayMethod_Context *context, char *const data[], npy_packed_static_string *sout = (npy_packed_static_string *)out; int cmp = _compare(in1, in2, in1_descr, in2_descr); if (cmp == 0 && (in1 == out || in2 == out)) { - continue; + goto next_step; } if ((cmp < 0) ^ invert) { // if in and out are the same address, do nothing to avoid a @@ -449,6 +439,8 @@ minimum_maximum_strided_loop(PyArrayMethod_Context *context, char *const data[], } } } + + next_step: in1 += in1_stride; in2 += in2_stride; out += out_stride; @@ -556,9 +548,17 @@ string_comparison_strided_loop(PyArrayMethod_Context *context, char *const data[ static NPY_CASTING string_comparison_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { + PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; + PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { + return (NPY_CASTING)-1; + } + Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; Py_INCREF(given_descrs[1]); @@ -602,7 +602,8 @@ string_isnan_strided_loop(PyArrayMethod_Context *context, char *const data[], static NPY_CASTING string_bool_output_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); @@ -615,7 +616,8 @@ string_bool_output_resolve_descriptors( static NPY_CASTING string_intp_output_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); @@ -761,7 +763,8 @@ string_strlen_strided_loop(PyArrayMethod_Context *context, char *const data[], static int string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -775,27 +778,15 @@ string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_findlike_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - // _eq_comparison has a short-circuit pointer comparison fast path, - // so no need to check here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { return (NPY_CASTING)-1; } @@ -821,7 +812,8 @@ string_findlike_resolve_descriptors( static int string_startswith_endswith_promoter( PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -835,27 +827,15 @@ string_startswith_endswith_promoter( static NPY_CASTING string_startswith_endswith_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - // _eq_comparison has a short-circuit pointer comparison fast path, so - // no need to do it here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { return (NPY_CASTING)-1; } @@ -1043,56 +1023,36 @@ string_startswith_endswith_strided_loop(PyArrayMethod_Context *context, } static int -strip_chars_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *new_op_dtypes[]) +all_strings_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) { + if ((op_dtypes[0] != &PyArray_StringDType && + op_dtypes[1] != &PyArray_StringDType && + op_dtypes[2] != &PyArray_StringDType)) { + /* + * This promoter was triggered with only unicode arguments, so use + * unicode. This can happen due to `dtype=` support which sets the + * output DType/signature. + */ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_UnicodeDType); + return 0; + } + if ((signature[0] == &PyArray_UnicodeDType && + signature[1] == &PyArray_UnicodeDType && + signature[2] == &PyArray_UnicodeDType)) { + /* Unicode forced, but didn't override a string input: invalid */ + return -1; + } new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); return 0; } -static NPY_CASTING -strip_chars_resolve_descriptors( - struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], - PyArray_Descr *loop_descrs[], - npy_intp *NPY_UNUSED(view_offset)) -{ - Py_INCREF(given_descrs[0]); - loop_descrs[0] = given_descrs[0]; - - // we don't actually care about the null behavior of the second argument, - // so no need to check if the first two descrs are equal like in - // binary_resolve_descriptors - - Py_INCREF(given_descrs[1]); - loop_descrs[1] = given_descrs[1]; - - PyArray_Descr *out_descr = NULL; - - if (given_descrs[2] == NULL) { - out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[0])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[0])->coerce); - - if (out_descr == NULL) { - return (NPY_CASTING)-1; - } - } - else { - Py_INCREF(given_descrs[2]); - out_descr = given_descrs[2]; - } - - loop_descrs[2] = out_descr; - - return NPY_NO_CASTING; -} - - NPY_NO_EXPORT int string_lrstrip_chars_strided_loop( PyArrayMethod_Context *context, char *const data[], @@ -1105,6 +1065,7 @@ string_lrstrip_chars_strided_loop( PyArray_StringDTypeObject *s1descr = (PyArray_StringDTypeObject *)context->descriptors[0]; int has_null = s1descr->na_object != NULL; int has_string_na = s1descr->has_string_na; + int has_nan_na = s1descr->has_nan_na; const npy_static_string *default_string = &s1descr->default_string; npy_intp N = dimensions[0]; @@ -1131,28 +1092,47 @@ string_lrstrip_chars_strided_loop( s2 = *default_string; } } + else if (has_nan_na) { + if (s2_isnull) { + npy_gil_error(PyExc_ValueError, + "Cannot use a null string that is not a " + "string as the %s delimiter", ufunc_name); + } + if (s1_isnull) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } + } else { npy_gil_error(PyExc_ValueError, - "Cannot strip null values that are not strings"); + "Can only strip null values that are strings " + "or NaN-like values"); goto fail; } } + { + char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); + Buffer buf1((char *)s1.buf, s1.size); + Buffer buf2((char *)s2.buf, s2.size); + Buffer outbuf(new_buf, s1.size); + size_t new_buf_size = string_lrstrip_chars + (buf1, buf2, outbuf, striptype); + + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", + ufunc_name); + PyMem_RawFree(new_buf); + goto fail; + } - - char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); - Buffer buf1((char *)s1.buf, s1.size); - Buffer buf2((char *)s2.buf, s2.size); - Buffer outbuf(new_buf, s1.size); - size_t new_buf_size = string_lrstrip_chars - (buf1, buf2, outbuf, striptype); - - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", - ufunc_name); - goto fail; + PyMem_RawFree(new_buf); } - - PyMem_RawFree(new_buf); + next_step: in1 += strides[0]; in2 += strides[1]; @@ -1171,8 +1151,8 @@ string_lrstrip_chars_strided_loop( static NPY_CASTING strip_whitespace_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1209,8 +1189,9 @@ string_lrstrip_whitespace_strided_loop( const char *ufunc_name = ((PyUFuncObject *)context->caller)->name; STRIPTYPE striptype = *(STRIPTYPE *)context->method->static_data; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; - int has_string_na = descr->has_string_na; int has_null = descr->na_object != NULL; + int has_string_na = descr->has_string_na; + int has_nan_na = descr->has_nan_na; const npy_static_string *default_string = &descr->default_string; npy_string_allocator *allocators[2] = {}; @@ -1240,26 +1221,39 @@ string_lrstrip_whitespace_strided_loop( if (has_string_na || !has_null) { s = *default_string; } + else if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } else { npy_gil_error(PyExc_ValueError, - "Cannot strip null values that are not strings"); + "Can only strip null values that are strings or " + "NaN-like values"); goto fail; } } + { + char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); + Buffer buf((char *)s.buf, s.size); + Buffer outbuf(new_buf, s.size); + size_t new_buf_size = string_lrstrip_whitespace( + buf, outbuf, striptype); + + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", + ufunc_name); + goto fail; + } - char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); - Buffer buf((char *)s.buf, s.size); - Buffer outbuf(new_buf, s.size); - size_t new_buf_size = string_lrstrip_whitespace( - buf, outbuf, striptype); - - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", - ufunc_name); - goto fail; + PyMem_RawFree(new_buf); } - PyMem_RawFree(new_buf); + next_step: in += strides[0]; out += strides[1]; @@ -1278,7 +1272,8 @@ string_lrstrip_whitespace_strided_loop( static int string_replace_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -1291,30 +1286,24 @@ string_replace_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; PyArray_StringDTypeObject *descr3 = (PyArray_StringDTypeObject *)given_descrs[2]; + int out_coerce = descr1->coerce && descr2->coerce && descr3->coerce; + PyObject *out_na_object = NULL; - // _eq_comparison has a short-circuit pointer comparison fast path, so - // no need to do it here - int eq_res = (_eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object) && - _eq_comparison(descr1->coerce, descr3->coerce, - descr1->na_object, descr3->na_object)); - - if (eq_res < 0) { + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "String replace is only supported with equal StringDType " - "instances."); + if (stringdtype_compatible_na( + out_na_object, descr3->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } @@ -1331,8 +1320,7 @@ replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[4] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[0])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[0])->coerce); + out_na_object, out_coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -1365,7 +1353,9 @@ string_replace_strided_loop( PyArray_StringDTypeObject *descr0 = (PyArray_StringDTypeObject *)context->descriptors[0]; + int has_null = descr0->na_object != NULL; int has_string_na = descr0->has_string_na; + int has_nan_na = descr0->has_nan_na; const npy_static_string *default_string = &descr0->default_string; @@ -1395,11 +1385,29 @@ string_replace_strided_loop( goto fail; } else if (i1_isnull || i2_isnull || i3_isnull) { - if (!has_string_na) { - npy_gil_error(PyExc_ValueError, - "Null values are not supported as replacement arguments " - "for replace"); - goto fail; + if (has_null && !has_string_na) { + if (i2_isnull || i3_isnull) { + npy_gil_error(PyExc_ValueError, + "Null values are not supported as search " + "patterns or replacement strings for " + "replace"); + goto fail; + } + else if (i1_isnull) { + if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in replace"); + goto fail; + } + goto next_step; + } + else { + npy_gil_error(PyExc_ValueError, + "Only string or NaN-like null strings can " + "be used as search strings for replace"); + } + } } else { if (i1_isnull) { @@ -1414,32 +1422,51 @@ string_replace_strided_loop( } } - // conservatively overallocate - // TODO check overflow - size_t max_size; - if (i2s.size == 0) { - // interleaving - max_size = i1s.size + (i1s.size + 1)*(i3s.size); - } - else { - // replace i2 with i3 - max_size = i1s.size * (i3s.size/i2s.size + 1); - } - char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); - Buffer buf1((char *)i1s.buf, i1s.size); - Buffer buf2((char *)i2s.buf, i2s.size); - Buffer buf3((char *)i3s.buf, i3s.size); - Buffer outbuf(new_buf, max_size); + { + Buffer buf1((char *)i1s.buf, i1s.size); + Buffer buf2((char *)i2s.buf, i2s.size); - size_t new_buf_size = string_replace( - buf1, buf2, buf3, *(npy_int64 *)in4, outbuf); + npy_int64 in_count = *(npy_int64*)in4; + if (in_count == -1) { + in_count = NPY_MAX_INT64; + } - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); - goto fail; - } + npy_int64 found_count = string_count( + buf1, buf2, 0, NPY_MAX_INT64); + if (found_count < 0) { + goto fail; + } - PyMem_RawFree(new_buf); + npy_intp count = Py_MIN(in_count, found_count); + + Buffer buf3((char *)i3s.buf, i3s.size); + + // conservatively overallocate + // TODO check overflow + size_t max_size; + if (i2s.size == 0) { + // interleaving + max_size = i1s.size + (i1s.size + 1)*(i3s.size); + } + else { + // replace i2 with i3 + size_t change = i2s.size >= i3s.size ? 0 : i3s.size - i2s.size; + max_size = i1s.size + count * change; + } + char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); + Buffer outbuf(new_buf, max_size); + + size_t new_buf_size = string_replace( + buf1, buf2, buf3, count, outbuf); + + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); + goto fail; + } + + PyMem_RawFree(new_buf); + } + next_step: in1 += strides[0]; in2 += strides[1]; @@ -1459,8 +1486,8 @@ string_replace_strided_loop( static NPY_CASTING expandtabs_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1572,11 +1599,10 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, } - NPY_NO_EXPORT int string_inputs_promoter( - PyObject *ufunc_obj, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], + PyObject *ufunc_obj, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[], PyArray_DTypeMeta *final_dtype, PyArray_DTypeMeta *result_dtype) @@ -1608,8 +1634,8 @@ string_inputs_promoter( static int string_object_bool_output_promoter( - PyObject *ufunc, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], + PyObject *ufunc, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { return string_inputs_promoter( @@ -1619,8 +1645,8 @@ string_object_bool_output_promoter( static int string_unicode_bool_output_promoter( - PyObject *ufunc, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], + PyObject *ufunc, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { return string_inputs_promoter( @@ -1631,7 +1657,7 @@ string_unicode_bool_output_promoter( static int is_integer_dtype(PyArray_DTypeMeta *DType) { - if (DType == &PyArray_PyIntAbstractDType) { + if (DType == &PyArray_PyLongDType) { return 1; } else if (DType == &PyArray_Int8DType) { @@ -1701,8 +1727,9 @@ is_integer_dtype(PyArray_DTypeMeta *DType) static int -string_multiply_promoter(PyObject *ufunc_obj, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], +string_multiply_promoter(PyObject *ufunc_obj, + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { PyUFuncObject *ufunc = (PyUFuncObject *)ufunc_obj; @@ -1794,16 +1821,16 @@ add_promoter(PyObject *numpy, const char *ufunc_name, PyObject *DType_tuple = PyTuple_New(n_dtypes); - for (size_t i=0; iresolve_descriptors == &wrapped_legacy_resolve_descriptors) { /* * In this case the legacy type resolution was definitely called @@ -1091,7 +1087,7 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, NpyIter *iter = NpyIter_AdvancedNew(nop + masked, op, iter_flags, order, NPY_UNSAFE_CASTING, - op_flags, context->descriptors, + op_flags, (PyArray_Descr **)context->descriptors, -1, NULL, NULL, buffersize); if (iter == NULL) { return -1; @@ -2364,8 +2360,7 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, } PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, - ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, - NPY_FALSE, NPY_TRUE); + ops, signature, operation_DTypes, NPY_FALSE, NPY_FALSE, NPY_TRUE); if (evil_ndim_mutating_hack) { ((PyArrayObject_fields *)out)->nd = 0; } @@ -2407,6 +2402,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, out_descrs[0], out_descrs[1], out_descrs[2]); goto fail; } + /* + * After checking that they are equivalent, we enforce the use of the out + * one (which the user should have defined). (Needed by string dtype) + */ + Py_INCREF(out_descrs[2]); + Py_SETREF(out_descrs[0], out_descrs[2]); + /* TODO: This really should _not_ be unsafe casting (same above)! */ if (validate_casting(ufuncimpl, ufunc, ops, out_descrs, casting) < 0) { goto fail; @@ -4056,17 +4058,15 @@ resolve_descriptors(int nop, original_dtypes[i] = PyArray_DTYPE(operands[i]); Py_INCREF(original_dtypes[i]); } - if (i < nin - && NPY_DT_is_abstract(signature[i]) - && inputs_tup != NULL) { - /* - * TODO: We may wish to allow any scalar here. Checking for - * abstract assumes this works out for Python scalars, - * which is the important case (especially for now). - * - * One possible check would be `DType->type == type(obj)`. - */ - input_scalars[i] = PyTuple_GET_ITEM(inputs_tup, i); + /* + * Check whether something is a scalar of the given type. + * We leave it to resolve_descriptors_with_scalars to deal + * with, e.g., only doing something special for python scalars. + */ + if (i < nin && inputs_tup != NULL) { + PyObject *input = PyTuple_GET_ITEM(inputs_tup, i); + input_scalars[i] = signature[i]->scalar_type == Py_TYPE(input) ? + input : NULL; } else { input_scalars[i] = NULL; @@ -4441,13 +4441,12 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, npy_bool subok = NPY_TRUE; int keepdims = -1; /* We need to know if it was passed */ npy_bool force_legacy_promotion; - npy_bool allow_legacy_promotion; npy_bool promoting_pyscalars; if (convert_ufunc_arguments(ufunc, /* extract operand related information: */ full_args, operands, operand_DTypes, - &force_legacy_promotion, &allow_legacy_promotion, + &force_legacy_promotion, &promoting_pyscalars, /* extract general information: */ order_obj, &order, @@ -4468,7 +4467,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, */ PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, operands, signature, - operand_DTypes, force_legacy_promotion, allow_legacy_promotion, + operand_DTypes, force_legacy_promotion, promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto fail; @@ -5799,22 +5798,20 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) operand_DTypes[0] = NPY_DTYPE(PyArray_DESCR(op1_array)); Py_INCREF(operand_DTypes[0]); int force_legacy_promotion = 0; - int allow_legacy_promotion = NPY_DT_is_legacy(operand_DTypes[0]); if (op2_array != NULL) { tmp_operands[1] = op2_array; operand_DTypes[1] = NPY_DTYPE(PyArray_DESCR(op2_array)); Py_INCREF(operand_DTypes[1]); - allow_legacy_promotion &= NPY_DT_is_legacy(operand_DTypes[1]); tmp_operands[2] = tmp_operands[0]; operand_DTypes[2] = operand_DTypes[0]; Py_INCREF(operand_DTypes[2]); - if (allow_legacy_promotion && ((PyArray_NDIM(op1_array) == 0) - != (PyArray_NDIM(op2_array) == 0))) { - /* both are legacy and only one is 0-D: force legacy */ - force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); - } + if ((PyArray_NDIM(op1_array) == 0) + != (PyArray_NDIM(op2_array) == 0)) { + /* both are legacy and only one is 0-D: force legacy */ + force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); + } } else { tmp_operands[1] = tmp_operands[0]; @@ -5825,7 +5822,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) ufuncimpl = promote_and_get_ufuncimpl(ufunc, tmp_operands, signature, operand_DTypes, force_legacy_promotion, - allow_legacy_promotion, NPY_FALSE, NPY_FALSE); + NPY_FALSE, NPY_FALSE); if (ufuncimpl == NULL) { for (int i = 0; i < 3; i++) { Py_XDECREF(signature[i]); @@ -6067,7 +6064,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, npy_promotion_state = NPY_USE_WEAK_PROMOTION; npy_bool promoting_pyscalars = NPY_FALSE; - npy_bool allow_legacy_promotion = NPY_TRUE; if (_get_fixed_signature(ufunc, NULL, signature_obj, signature) < 0) { goto finish; @@ -6100,9 +6096,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, } DTypes[i] = NPY_DTYPE(descr); Py_INCREF(DTypes[i]); - if (!NPY_DT_is_legacy(DTypes[i])) { - allow_legacy_promotion = NPY_FALSE; - } } /* Explicitly allow int, float, and complex for the "weak" types. */ else if (descr_obj == (PyObject *)&PyLong_Type) { @@ -6112,8 +6105,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_INT); - Py_INCREF(&PyArray_PyIntAbstractDType); - DTypes[i] = &PyArray_PyIntAbstractDType; + Py_INCREF(&PyArray_PyLongDType); + DTypes[i] = &PyArray_PyLongDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == (PyObject *)&PyFloat_Type) { @@ -6123,8 +6116,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_FLOAT); - Py_INCREF(&PyArray_PyFloatAbstractDType); - DTypes[i] = &PyArray_PyFloatAbstractDType; + Py_INCREF(&PyArray_PyFloatDType); + DTypes[i] = &PyArray_PyFloatDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == (PyObject *)&PyComplex_Type) { @@ -6134,8 +6127,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_COMPLEX); - Py_INCREF(&PyArray_PyComplexAbstractDType); - DTypes[i] = &PyArray_PyComplexAbstractDType; + Py_INCREF(&PyArray_PyComplexDType); + DTypes[i] = &PyArray_PyComplexDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == Py_None) { @@ -6158,7 +6151,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, if (!reduction) { ufuncimpl = promote_and_get_ufuncimpl(ufunc, dummy_arrays, signature, DTypes, NPY_FALSE, - allow_legacy_promotion, promoting_pyscalars, NPY_FALSE); + promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto finish; } @@ -6243,7 +6236,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, context->descriptors = call_info->_descrs; for (int i=0; i < ufunc->nargs; i++) { Py_INCREF(operation_descrs[i]); - context->descriptors[i] = operation_descrs[i]; + ((PyArray_Descr **)context->descriptors)[i] = operation_descrs[i]; } result = PyTuple_Pack(2, result_dtype_tuple, capsule); diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 4975d41147ea..f9962a9b4e32 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -226,7 +226,7 @@ NPY_NO_EXPORT int PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING casting, PyArrayObject **operands, - PyArray_Descr **dtypes) + PyArray_Descr *const *dtypes) { int i, nin = ufunc->nin, nop = nin + ufunc->nout; @@ -1471,7 +1471,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, static int find_userloop(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata) { @@ -1535,7 +1535,7 @@ find_userloop(PyUFuncObject *ufunc, NPY_NO_EXPORT int PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata, int *out_needs_api) diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 84a2593f44c4..3f8e7505ea39 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -134,7 +134,7 @@ type_tuple_type_resolver(PyUFuncObject *self, NPY_NO_EXPORT int PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata, int *out_needs_api); diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 3f3228237c21..ddfe56b687a7 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -36,8 +36,8 @@ static NPY_CASTING wrapping_method_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[], - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *view_offset) { @@ -54,7 +54,7 @@ wrapping_method_resolve_descriptors( self->wrapped_meth, self->wrapped_dtypes, orig_given_descrs, orig_loop_descrs, view_offset); for (int i = 0; i < nargs; i++) { - Py_XDECREF(orig_given_descrs); + Py_XDECREF(orig_given_descrs[i]); } if (casting < 0) { return -1; @@ -62,7 +62,7 @@ wrapping_method_resolve_descriptors( int res = self->translate_loop_descrs( nin, nout, dtypes, given_descrs, orig_loop_descrs, loop_descrs); for (int i = 0; i < nargs; i++) { - Py_DECREF(orig_given_descrs); + Py_DECREF(orig_loop_descrs[i]); } if (res < 0) { return -1; @@ -95,6 +95,7 @@ wrapping_auxdata_free(wrapping_auxdata *wrapping_auxdata) if (wrapping_auxdata_freenum < WRAPPING_AUXDATA_FREELIST_SIZE) { wrapping_auxdata_freelist[wrapping_auxdata_freenum] = wrapping_auxdata; + wrapping_auxdata_freenum++; } else { PyMem_Free(wrapping_auxdata); @@ -158,8 +159,8 @@ wrapping_method_get_loop( auxdata->orig_context.caller = context->caller; if (context->method->translate_given_descrs( - nin, nout, context->method->wrapped_dtypes, - context->descriptors, auxdata->orig_context.descriptors) < 0) { + nin, nout, context->method->wrapped_dtypes, context->descriptors, + (PyArray_Descr **)auxdata->orig_context.descriptors) < 0) { NPY_AUXDATA_FREE((NpyAuxData *)auxdata); return -1; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 78e39add631a..28622b6a5c3e 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -46,12 +46,17 @@ "add", "multiply", "isalpha", "isdigit", "isspace", "isalnum", "islower", "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", - "rstrip", "strip", "replace", "expandtabs", + "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", + "zfill", # _vec_string - Will gradually become ufuncs as well - "mod", "decode", "encode", "center", "ljust", "rjust", "zfill", "upper", - "lower", "swapcase", "capitalize", "title", "join", "split", "rsplit", - "splitlines", "partition", "rpartition", "translate", + "upper", "lower", "swapcase", "capitalize", "title", + + # _vec_string - Will probably not become ufuncs + "mod", "decode", "encode", "translate", + + # Removed from namespace until behavior has been crystalized + # "join", "split", "rsplit", "splitlines", "partition", "rpartition", ] @@ -113,14 +118,15 @@ def multiply(a, i): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype i : array_like, with any integer dtype Returns ------- out : ndarray - Output array of str or unicode, depending on input types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types Examples -------- @@ -172,7 +178,7 @@ def mod(a, values): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype values : array_like of values These values will be element-wise interpolated into the string. @@ -180,8 +186,9 @@ def mod(a, values): Returns ------- out : ndarray - Output array of str or unicode, depending on input types - + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, '__mod__', (values,)), a) @@ -195,7 +202,7 @@ def find(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype sub : array_like, with `np.bytes_` or `np.str_` dtype The substring to search for. @@ -231,9 +238,9 @@ def rfind(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -259,9 +266,9 @@ def index(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype, optional @@ -292,16 +299,16 @@ def rindex(a, sub, start=0, end=None): Parameters ---------- - a : array-like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype - sub : array-like, with `np.bytes_` or `np.str_` dtype + sub : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype start, end : array-like, with any integer dtype, optional Returns ------- out : ndarray - Output array of ints. + Output array of ints. See Also -------- @@ -312,7 +319,7 @@ def rindex(a, sub, start=0, end=None): >>> a = np.array(["Computer Science"]) >>> np.strings.rindex(a, "Science", start=0, end=None) array([9]) - + """ end = end if end is not None else MAX return _rindex_ufunc(a, sub, start, end) @@ -325,9 +332,9 @@ def count(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -368,9 +375,9 @@ def startswith(a, prefix, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - prefix : array_like, with `np.bytes_` or `np.str_` dtype + prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype With ``start``, test beginning at that position. With ``end``, @@ -397,9 +404,9 @@ def endswith(a, suffix, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - suffix : array_like, with `np.bytes_` or `np.str_` dtype + suffix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype With ``start``, test beginning at that position. With ``end``, @@ -439,7 +446,7 @@ def decode(a, encoding=None, errors=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` dtype encoding : str, optional The name of an encoding @@ -485,7 +492,7 @@ def encode(a, encoding=None, errors=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType`` or ``str_`` dtype encoding : str, optional The name of an encoding @@ -511,7 +518,7 @@ def encode(a, encoding=None, errors=None): >>> np.strings.encode(a, encoding='cp037') array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') - + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), @@ -533,7 +540,7 @@ def expandtabs(a, tabsize=8): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array tabsize : int, optional Replace tabs with `tabsize` number of spaces. If not given defaults @@ -542,7 +549,8 @@ def expandtabs(a, tabsize=8): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type See Also -------- @@ -550,7 +558,7 @@ def expandtabs(a, tabsize=8): Examples -------- - >>> a = np.array(['\t\tHello\tworld']) + >>> a = np.array(['\t\tHello\tworld']) >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP array([' Hello world'], dtype='>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) array(['aAa', ' a', 'abB'], dtype='>> a = np.array(["That is a mango", "Monkeys eat mangos"]) @@ -1089,29 +1106,28 @@ def replace(a, old, new, count=-1): >>> a = np.array(["The dish is fresh", "This is it"]) >>> np.strings.replace(a, 'is', 'was') array(['The dwash was fresh', 'Thwas was it'], dtype='>> np.strings.join('-', 'osd') - array('o-s-d', dtype='>> np.strings.join('-', 'osd') # doctest: +SKIP + array('o-s-d', dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) - array(['g-h-c', 'o.s.d'], dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP + array(['g-h-c', 'o.s.d'], dtype='>> x = np.array("Numpy is nice!") - >>> np.strings.split(x, " ") - array(list(['Numpy', 'is', 'nice!']), dtype=object) + >>> np.strings.split(x, " ") # doctest: +SKIP + array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP - >>> np.strings.split(x, " ", 1) - array(list(['Numpy', 'is nice!']), dtype=object) + >>> np.strings.split(x, " ", 1) # doctest: +SKIP + array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP See Also -------- @@ -1188,7 +1205,7 @@ def split(a, sep=None, maxsplit=None): a, np.object_, 'split', [sep] + _clean_args(maxsplit)) -def rsplit(a, sep=None, maxsplit=None): +def _rsplit(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the string, using `sep` as the delimiter string. @@ -1200,7 +1217,7 @@ def rsplit(a, sep=None, maxsplit=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sep : str or unicode, optional If `sep` is not specified or None, any whitespace string @@ -1212,7 +1229,7 @@ def rsplit(a, sep=None, maxsplit=None): Returns ------- out : ndarray - Array of list objects + Array of list objects See Also -------- @@ -1221,9 +1238,10 @@ def rsplit(a, sep=None, maxsplit=None): Examples -------- >>> a = np.array(['aAaAaA', 'abBABba']) - >>> np.strings.rsplit(a, 'A') - array([list(['a', 'a', 'a', '']), list(['abB', 'Bba'])], dtype=object) - + >>> np.strings.rsplit(a, 'A') # doctest: +SKIP + array([list(['a', 'a', 'a', '']), # doctest: +SKIP + list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP + """ # This will return an array of lists of different sizes, so we # leave it as an object array @@ -1231,7 +1249,7 @@ def rsplit(a, sep=None, maxsplit=None): a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) -def splitlines(a, keepends=None): +def _splitlines(a, keepends=None): """ For each element in `a`, return a list of the lines in the element, breaking at line boundaries. @@ -1240,7 +1258,7 @@ def splitlines(a, keepends=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype keepends : bool, optional Line breaks are not included in the resulting list unless @@ -1260,7 +1278,7 @@ def splitlines(a, keepends=None): a, np.object_, 'splitlines', _clean_args(keepends)) -def partition(a, sep): +def _partition(a, sep): """ Partition each element in `a` around `sep`. @@ -1274,24 +1292,24 @@ def partition(a, sep): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array sep : {str, unicode} Separator to split each string element in `a`. Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type. - The output array will have an extra dimension with 3 - elements per input element. + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types. The output array will have an extra + dimension with 3 elements per input element. Examples -------- >>> x = np.array(["Numpy is nice!"]) - >>> np.strings.partition(x, " ") - array([['Numpy', ' ', 'is nice!']], dtype='>> np.strings.partition(x, " ") # doctest: +SKIP + array([['Numpy', ' ', 'is nice!']], dtype='>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> np.strings.rpartition(a, 'A') - array([['aAaAa', 'A', ''], - [' a', 'A', ' '], - ['abB', 'A', 'Bba']], dtype='>> np.strings.rpartition(a, 'A') # doctest: +SKIP + array([['aAaAa', 'A', ''], # doctest: +SKIP + [' a', 'A', ' '], # doctest: +SKIP + ['abB', 'A', 'Bba']], dtype='>> deletechars = ' ' >>> np.char.translate(a, table, deletechars) array(['112 3', '1231', '2311'], dtype=' +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api_latest" +}; + +PyMODINIT_FUNC PyInit_limited_api_latest(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index a6d290304036..65287d8654f5 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -34,6 +34,16 @@ py.extension_module( limited_api: '3.6', ) +py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), +) + py.extension_module( 'limited_api2', 'limited_api2.pyx', diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 3d9ac2927a33..1a82a99b6c76 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -61,9 +61,13 @@ def install_temp(tmpdir_factory): ) try: subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) - except subprocess.CalledProcessError as p: - print(f"{p.stdout=}") - print(f"{p.stderr=}") + except subprocess.CalledProcessError: + print("----------------") + print("meson build failed when doing") + print(f"'meson setup --native-file {native_file} {srcdir}'") + print(f"'meson compile -vv'") + print(f"in {build_dir}") + print("----------------") raise sys.path.append(str(build_dir)) @@ -142,6 +146,13 @@ def test_default_int(install_temp): assert checks.get_default_integer() is np.dtype(int) + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + def test_convert_datetime64_to_datetimestruct(install_temp): # GH#21199 import checks @@ -274,3 +285,11 @@ def test_fillwithbytes(install_temp): arr = checks.compile_fillwithbyte() assert_array_equal(arr, np.ones((1, 2))) + + +def test_complex(install_temp): + from checks import inc2_cfloat_struct + + arr = np.array([0, 10+10j], dtype="F") + inc2_cfloat_struct(arr) + assert arr[1] == (12 + 12j) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index f0d4d533cd92..21bf685e294f 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -677,18 +677,22 @@ def test_lib_functions_deprecation_call(self): class TestDeprecatedDTypeAliases(_DeprecationTestCase): - @staticmethod - def _check_for_warning(func): + def _check_for_warning(self, func): with warnings.catch_warnings(record=True) as caught_warnings: func() assert len(caught_warnings) == 1 w = caught_warnings[0] assert w.category is DeprecationWarning - assert "alias `a` was removed in NumPy 2.0" in str(w.message) + assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) def test_a_dtype_alias(self): - self._check_for_warning(lambda: np.dtype("a")) - self._check_for_warning(lambda: np.dtype("a10")) + for dtype in ["a", "a10"]: + f = lambda: np.dtype(dtype) + self._check_for_warning(f) + self.assert_deprecated(f) + f = lambda: np.array(["hello", "world"]).astype("a10") + self._check_for_warning(f) + self.assert_deprecated(f) class TestDeprecatedArrayWrap(_DeprecationTestCase): diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 664f4e028151..73e02a84e2e8 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -96,6 +96,11 @@ def test_invalid_types(self): assert_raises(TypeError, np.dtype, 'q8') assert_raises(TypeError, np.dtype, 'Q8') + # Make sure negative-sized dtype raises an error + assert_raises(TypeError, np.dtype, 'S-1') + assert_raises(TypeError, np.dtype, 'U-1') + assert_raises(TypeError, np.dtype, 'V-1') + def test_richcompare_invalid_dtype_equality(self): # Make sure objects that cannot be converted to valid # dtypes results in False/True when compared to valid dtypes. @@ -231,6 +236,22 @@ def test_create_invalid_string_errors(self): with pytest.raises(ValueError): type(np.dtype("U"))(-1) + # OverflowError on 32 bit + with pytest.raises((TypeError, OverflowError)): + # see gh-26556 + type(np.dtype("S"))(2**61) + + with pytest.raises(TypeError): + np.dtype("S1234hello") + + def test_leading_zero_parsing(self): + dt1 = np.dtype('S010') + dt2 = np.dtype('S10') + + assert dt1 == dt2 + assert repr(dt1) == "dtype('S10')" + assert dt1.itemsize == 10 + class TestRecord: def test_equivalent_record(self): diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index bea1c1017fb2..9611f75221d2 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -133,6 +133,28 @@ def test_empty_fancy_index(self): b = np.array([]) assert_raises(IndexError, a.__getitem__, b) + def test_gh_26542(self): + a = np.array([0, 1, 2]) + idx = np.array([2, 1, 0]) + a[idx] = a + expected = np.array([2, 1, 0]) + assert_equal(a, expected) + + def test_gh_26542_2d(self): + a = np.array([[0, 1, 2]]) + idx_row = np.zeros(3, dtype=int) + idx_col = np.array([2, 1, 0]) + a[idx_row, idx_col] = a + expected = np.array([[2, 1, 0]]) + assert_equal(a, expected) + + def test_gh_26542_index_overlap(self): + arr = np.arange(100) + expected_vals = np.copy(arr[:-10]) + arr[10:] = arr[:-10] + actual_vals = arr[10:] + assert_equal(actual_vals, expected_vals) + def test_ellipsis_index(self): a = np.array([[1, 2, 3], [4, 5, 6], diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index c1b2cfcbaff9..9273264a867d 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -40,16 +40,18 @@ def install_temp(tmpdir_factory): pytest.skip("No usable 'meson' found") if sys.platform == "win32": subprocess.check_call(["meson", "setup", + "--werror", "--buildtype=release", "--vsenv", str(srcdir)], cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", str(srcdir)], + subprocess.check_call(["meson", "setup", "--werror", str(srcdir)], cwd=build_dir ) try: - subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + subprocess.check_call( + ["meson", "compile", "-vv"], cwd=build_dir) except subprocess.CalledProcessError as p: print(f"{p.stdout=}") print(f"{p.stderr=}") @@ -73,5 +75,6 @@ def test_limited_api(install_temp): and building a cython extension with the limited API """ - import limited_api1 - import limited_api2 + import limited_api1 # Earliest (3.6) + import limited_api_latest # Latest version (current Python) + import limited_api2 # cython diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 4a75d96fc06e..81123b1128c8 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -425,6 +425,18 @@ def test_fill_readonly(self): with pytest.raises(ValueError, match=".*read-only"): a.fill(0) + def test_fill_subarrays(self): + # NOTE: + # This is also a regression test for a crash with PYTHONMALLOC=debug + + dtype = np.dtype("2=2.12'. If you are a user of the module, the easiest solution will be to - either downgrade NumPy or update the failing module (if available). + downgrade to 'numpy<2' or try to upgrade the affected module. + We expect that some modules will need time to support NumPy 2. """) - if not release and short_version.startswith("2.0.0"): - # TODO: Can remove this after the release. - msg += textwrap.dedent("""\ - NOTE: When testing against pre-release versions of NumPy 2.0 - or building nightly wheels for it, it is necessary to ensure - the NumPy pre-release is used at build time. - The main way to ensure this is using no build isolation - and installing dependencies manually with NumPy. - - If your dependencies have the issue, check whether they - build nightly wheels build against NumPy 2.0. - - pybind11 note: If you see this message and do not see - any errors raised, it's possible this is due to a - package using an old version of pybind11 that should be - updated. - - """) - msg += "Traceback (most recent call last):" + tb_msg = "Traceback (most recent call last):" for line in traceback.format_stack()[:-1]: if "frozen importlib" in line: continue - msg += line - # Only print the message. This has two reasons (for now!): - # 1. Old NumPy replaced the error here making it never actually show - # in practice, thus raising alone would not be helpful. - # 2. pybind11 simply reaches into NumPy internals and requires a - # new release that includes the fix. That is missing as of 2023-11. - # But, it "conveniently" ignores the ABI version. - sys.stderr.write(msg) + tb_msg += line + + # Also print the message (with traceback). This is because old versions + # of NumPy unfortunately set up the import to replace (and hide) the + # error. The traceback shouldn't be needed, but e.g. pytest plugins + # seem to swallow it and we should be failing anyway... + sys.stderr.write(msg + tb_msg) + raise ImportError(msg) ret = getattr(_multiarray_umath, attr_name, None) if ret is None: diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py index e9b22a3921a5..f2436f86a7e6 100644 --- a/numpy/f2py/_backends/_distutils.py +++ b/numpy/f2py/_backends/_distutils.py @@ -13,9 +13,9 @@ class DistutilsBackend(Backend): def __init__(sef, *args, **kwargs): warnings.warn( - "distutils has been deprecated since NumPy 1.26.x" + "\ndistutils has been deprecated since NumPy 1.26.x\n" "Use the Meson backend instead, or generate wrappers" - "without -c and use a custom build script", + " without -c and use a custom build script", VisibleDeprecationWarning, stacklevel=2, ) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index 20df79a1c71d..b438ed223433 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -28,7 +28,7 @@ def __init__( include_dirs: list[Path], object_files: list[Path], linker_args: list[str], - c_args: list[str], + fortran_args: list[str], build_type: str, python_exe: str, ): @@ -46,12 +46,18 @@ def __init__( self.include_dirs = [] self.substitutions = {} self.objects = object_files + # Convert args to '' wrapped variant for meson + self.fortran_args = [ + f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x + for x in fortran_args + ] self.pipeline = [ self.initialize_template, self.sources_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, + self.fortran_args_substitution, ] self.build_type = build_type self.python_exe = python_exe @@ -73,8 +79,8 @@ def initialize_template(self) -> None: self.substitutions["python"] = self.python_exe def sources_substitution(self) -> None: - self.substitutions["source_list"] = f",\n{self.indent}".join( - [f"{self.indent}'{source}'," for source in self.sources] + self.substitutions["source_list"] = ",\n".join( + [f"{self.indent}'''{source}'''," for source in self.sources] ) def deps_substitution(self) -> None: @@ -85,20 +91,20 @@ def deps_substitution(self) -> None: def libraries_substitution(self) -> None: self.substitutions["lib_dir_declarations"] = "\n".join( [ - f"lib_dir_{i} = declare_dependency(link_args : ['-L{lib_dir}'])" + f"lib_dir_{i} = declare_dependency(link_args : ['''-L{lib_dir}'''])" for i, lib_dir in enumerate(self.library_dirs) ] ) self.substitutions["lib_declarations"] = "\n".join( [ - f"{lib} = declare_dependency(link_args : ['-l{lib}'])" + f"{lib.replace('.','_')} = declare_dependency(link_args : ['-l{lib}'])" for lib in self.libraries ] ) self.substitutions["lib_list"] = f"\n{self.indent}".join( - [f"{self.indent}{lib}," for lib in self.libraries] + [f"{self.indent}{lib.replace('.','_')}," for lib in self.libraries] ) self.substitutions["lib_dir_list"] = f"\n{self.indent}".join( [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))] @@ -106,15 +112,23 @@ def libraries_substitution(self) -> None: def include_substitution(self) -> None: self.substitutions["inc_list"] = f",\n{self.indent}".join( - [f"{self.indent}'{inc}'," for inc in self.include_dirs] + [f"{self.indent}'''{inc}'''," for inc in self.include_dirs] ) + def fortran_args_substitution(self) -> None: + if self.fortran_args: + self.substitutions["fortran_args"] = ( + f"{self.indent}fortran_args: [{', '.join([arg for arg in self.fortran_args])}]," + ) + else: + self.substitutions["fortran_args"] = "" + def generate_meson_build(self): for node in self.pipeline: node() template = Template(self.meson_build_template()) meson_build = template.substitute(self.substitutions) - meson_build = re.sub(r',,', ',', meson_build) + meson_build = re.sub(r",,", ",", meson_build) return meson_build @@ -126,6 +140,7 @@ def __init__(self, *args, **kwargs): self.build_type = ( "debug" if any("debug" in flag for flag in self.fc_flags) else "release" ) + self.fc_flags = _get_flags(self.fc_flags) def _move_exec_to_root(self, build_dir: Path): walk_dir = Path(build_dir) / self.meson_build_dir @@ -203,3 +218,17 @@ def _prepare_sources(mname, sources, bdir): if not Path(source).suffix == ".pyf" ] return extended_sources + + +def _get_flags(fc_flags): + flag_values = [] + flag_pattern = re.compile(r"--f(77|90)flags=(.*)") + for flag in fc_flags: + match_result = flag_pattern.match(flag) + if match_result: + values = match_result.group(2).strip().split() + values = [val.strip("'\"") for val in values] + flag_values.extend(values) + # Hacky way to preserve order of flags + unique_flags = list(dict.fromkeys(flag_values)) + return unique_flags diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index 8e34fdc8d4d6..fdcc1b17ce21 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -8,7 +8,7 @@ project('${modulename}', ]) fc = meson.get_compiler('fortran') -py = import('python').find_installation('${python}', pure: false) +py = import('python').find_installation('''${python}''', pure: false) py_dep = py.dependency() incdir_numpy = run_command(py, @@ -51,4 +51,5 @@ ${dep_list} ${lib_list} ${lib_dir_list} ], +${fortran_args} install : true) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 13a1074b447e..42d563b81af2 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -17,6 +17,7 @@ from . import __version__ from . import cfuncs +from .cfuncs import errmess __all__ = [ 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', @@ -51,7 +52,6 @@ f2py_version = __version__.version -errmess = sys.stderr.write show = pprint.pprint options = {} diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 721e075b6c73..faf8dd401301 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -122,7 +122,7 @@ #setdims# #ifdef PYPY_VERSION #define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) - capi_arglist_list = PySequence_List(capi_arglist); + capi_arglist_list = PySequence_List((PyObject *)capi_arglist); if (capi_arglist_list == NULL) goto capi_fail; #else #define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 4328a6e5004c..1dc3247323d5 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -16,7 +16,16 @@ from . import __version__ f2py_version = __version__.version -errmess = sys.stderr.write + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) ##################### Definitions ################## diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 8d3fc27608bd..2c6fa83889ca 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -425,11 +425,11 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[-1] not in "\n\r\f": break l = l[:-1] - if not strictf77: - (l, rl) = split_by_unquoted(l, '!') - l += ' ' - if rl[:5].lower() == '!f2py': # f2py directive - l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + # Unconditionally remove comments + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') if l.strip() == '': # Skip empty line if sourcecodeform == 'free': # In free form, a statement continues in the next line @@ -466,25 +466,13 @@ def readfortrancode(ffile, dowithline=show, istop=1): finalline = '' origfinalline = '' else: - if not strictf77: - # F90 continuation - r = cont1.match(l) - if r: - l = r.group('line') # Continuation follows .. - if cont: - ll = ll + cont2.match(l).group('line') - finalline = '' - origfinalline = '' - else: - # clean up line beginning from possible digits. - l = ' ' + l[5:] - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - cont = (r is not None) + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' else: # clean up line beginning from possible digits. l = ' ' + l[5:] diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index f5fab23ab867..32cfbd0a3bea 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -28,11 +28,12 @@ from . import f90mod_rules from . import __version__ from . import capi_maps +from .cfuncs import errmess from numpy.f2py._backends import f2py_build_generator f2py_version = __version__.version numpy_version = __version__.version -errmess = sys.stderr.write + # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess @@ -635,10 +636,14 @@ def run_compile(): r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in flib_flags] - _reg4 = re.compile( - r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') - fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + if not (MESON_ONLY_VER or backend_key == 'meson'): + fc_flags.extend(distutils_flags) + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index f3bffdc1c220..8c6ba1396924 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -115,7 +115,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyArray_DESCR(arr)->type, PyArray_TYPE(arr), PyArray_ITEMSIZE(arr), - PyArray_DESCR(arr)->alignment, + PyDataType_ALIGNMENT(arr), PyArray_FLAGS(arr), PyArray_ITEMSIZE(arr)); } diff --git a/numpy/f2py/tests/src/regression/f77comments.f b/numpy/f2py/tests/src/regression/f77comments.f new file mode 100644 index 000000000000..452a01a14439 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f77comments.f @@ -0,0 +1,26 @@ + SUBROUTINE TESTSUB( + & INPUT1, INPUT2, !Input + & OUTPUT1, OUTPUT2) !Output + + IMPLICIT NONE + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + + OUTPUT1 = INPUT1 + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 + + RETURN + END SUBROUTINE TESTSUB + + SUBROUTINE TESTSUB2(OUTPUT) + IMPLICIT NONE + INTEGER, PARAMETER :: N = 10 ! Array dimension + REAL, INTENT(OUT) :: OUTPUT(N) + INTEGER :: I + + DO I = 1, N + OUTPUT(I) = I * 2.0 + END DO + + RETURN + END diff --git a/numpy/f2py/tests/src/regression/f77fixedform.f95 b/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 000000000000..e47a13f7e851 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/numpy/f2py/tests/src/regression/f90continuation.f90 b/numpy/f2py/tests/src/regression/f90continuation.f90 new file mode 100644 index 000000000000..879e716bbec6 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f90continuation.f90 @@ -0,0 +1,9 @@ +SUBROUTINE TESTSUB(INPUT1, & ! Hello +! commenty +INPUT2, OUTPUT1, OUTPUT2) ! more comments + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + OUTPUT1 = INPUT1 + & + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 +END SUBROUTINE TESTSUB diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index c0a8045d91b9..e11ed1a0efa3 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -1,7 +1,9 @@ import os import pytest +import platform import numpy as np +import numpy.testing as npt from . import util @@ -76,3 +78,64 @@ def test_gh25344(self): exp = 7.0 res = self.module.add(3.0, 4.0) assert exp == res + +class TestF77Comments(util.F2PyTest): + # Check that comments are stripped from F77 continuation lines + sources = [util.getpath("tests", "src", "regression", "f77comments.f")] + + @pytest.mark.slow + def test_gh26148(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) + + @pytest.mark.slow + def test_gh26466(self): + # Check that comments after PARAMETER directions are stripped + expected = np.arange(1, 11, dtype=np.float32)*2 + res=self.module.testsub2() + npt.assert_allclose(expected, res) + +class TestF90Contiuation(util.F2PyTest): + # Check that comments are stripped from F90 continuation lines + sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] + + @pytest.mark.slow + def test_gh26148b(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) + +@pytest.mark.slow +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -Og\"", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index faedd4cc1597..eab827127e95 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -121,7 +121,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): dst_sources.append(dst) base, ext = os.path.splitext(dst) - if ext in (".f90", ".f", ".c", ".pyf"): + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): f2py_sources.append(dst) assert f2py_sources @@ -332,7 +332,7 @@ def build_meson(source_files, module_name=None, **kwargs): # compiler stack is on the CI try: backend.compile() - except: + except subprocess.CalledProcessError: pytest.skip("Failed to compile module") # Import the compiled module diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 5972a346de20..d91b92c63f4b 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -34,7 +34,7 @@ import warnings from numpy.lib.array_utils import normalize_axis_index -from numpy._core import (asarray, empty, zeros, swapaxes, result_type, +from numpy._core import (asarray, empty_like, result_type, conjugate, take, sqrt, reciprocal) from . import _pocketfft_umath as pfu from numpy._core import overrides @@ -85,8 +85,8 @@ def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): out_dtype = real_dtype else: # Others, complex output. out_dtype = result_type(a.dtype, 1j) - out = empty(a.shape[:axis] + (n_out,) + a.shape[axis+1:], - dtype=out_dtype) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis+1:], + dtype=out_dtype) elif ((shape := getattr(out, "shape", None)) is not None and (len(shape) != a.ndim or shape[axis] != n_out)): raise ValueError("output array has wrong shape.") diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 127ebfdb6149..6d42c020ab65 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -172,6 +172,7 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, auto plan = pocketfft::detail::get_plan>(npts); auto buffered = (step_out != sizeof(std::complex)); pocketfft::detail::arr> buff(buffered ? nout : 0); + auto nin_used = nin <= npts ? nin : npts; for (size_t i = 0; i < n_outer; i++, ip += si, fp += sf, op += so) { std::complex *op_or_buff = buffered ? buff.data() : (std::complex *)op; /* @@ -183,10 +184,10 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, * Pocketfft uses FFTpack order, R0,R1,I1,...Rn-1,In-1,Rn[,In] (last * for npts odd only). To make unpacking easy, we place the real data * offset by one in the buffer, so that we just have to move R0 and - * create I0=0. Note that copy_data will zero the In component for + * create I0=0. Note that copy_input will zero the In component for * even number of points. */ - copy_input(ip, step_in, nin, &((T *)op_or_buff)[1], nout*2 - 1); + copy_input(ip, step_in, nin_used, &((T *)op_or_buff)[1], nout*2 - 1); plan->exec(&((T *)op_or_buff)[1], *(T *)fp, pocketfft::FORWARD); op_or_buff[0] = op_or_buff[0].imag(); // I0->R0, I0=0 if (buffered) { diff --git a/numpy/fft/pocketfft b/numpy/fft/pocketfft index 0f7aa1225b06..33ae5dc94c9c 160000 --- a/numpy/fft/pocketfft +++ b/numpy/fft/pocketfft @@ -1 +1 @@ -Subproject commit 0f7aa1225b065938fc263b7914df16b8c1cbc9d7 +Subproject commit 33ae5dc94c9cdc7f1c78346504a85de87cadaa12 diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 500d97282cde..d1e4da2eb831 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -38,7 +38,7 @@ def test_identity_long_short(self, dtype): # Test with explicitly given number of points, both for n # smaller and for n larger than the input size. maxlen = 16 - atol = 4 * np.spacing(np.array(1., dtype=dtype)) + atol = 5 * np.spacing(np.array(1., dtype=dtype)) x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) xr = random(maxlen).astype(dtype) @@ -183,7 +183,6 @@ def test_fft_bad_out(self): with pytest.raises(TypeError, match="Cannot cast"): np.fft.fft(x, out=np.zeros_like(x, dtype=float)) - @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) def test_ifft(self, norm): x = random(30) + 1j*random(30) @@ -258,6 +257,17 @@ def test_rfft(self): np.fft.rfft(x, n=n) / n, np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + def test_rfft_even(self): + x = np.arange(8) + n = 4 + y = np.fft.rfft(x, n) + assert_allclose(y, np.fft.fft(x[:n])[:n//2 + 1], rtol=1e-14) + + def test_rfft_odd(self): + x = np.array([1, 0, 2, 3, -3]) + y = np.fft.rfft(x) + assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14) + def test_irfft(self): x = random(30) assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) @@ -487,6 +497,16 @@ def test_fft_with_order(dtype, order, fft): raise ValueError() +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") class TestFFTThreadSafe: threads = 16 diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 7ec52167f1c0..af6c4da4c3b7 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -293,7 +293,8 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func): return left_stat, right_stat -def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): """ Pad `axis` of `arr` with reflection. @@ -308,6 +309,8 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): dimension. method : str Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. include_edge : bool If true, edge value is included in reflection, otherwise the edge value forms the symmetric axis to the reflection. @@ -320,11 +323,20 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): """ left_pad, right_pad = width_pair old_length = padded.shape[axis] - right_pad - left_pad - + if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period # Edge is included, we need to offset the pad amount by 1 edge_offset = 1 else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) edge_offset = 0 # Edge is not included, no need to offset pad amount old_length -= 1 # but must be omitted from the chunk @@ -865,7 +877,7 @@ def pad(array, pad_width, mode='constant', **kwargs): # the length of the original values in the current dimension. left_index, right_index = _set_reflect_both( roi, axis, (left_index, right_index), - method, include_edge + method, array.shape[axis], include_edge ) elif mode == "wrap": diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index c8e1fa888295..ebcb15f083fa 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -228,8 +228,13 @@ def unique(ar, return_index=False, return_inverse=False, .. versionchanged: 2.0 For multi-dimensional inputs, ``unique_inverse`` is reshaped such that the input can be reconstructed using - ``np.take(unique, unique_inverse)`` when ``axis = None``, and - ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. Examples -------- @@ -282,7 +287,7 @@ def unique(ar, return_index=False, return_inverse=False, ar = np.asanyarray(ar) if axis is None: ret = _unique1d(ar, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=ar.shape) + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None) return _unpack_tuple(ret) # axis was specified and not None @@ -328,13 +333,15 @@ def reshape_uniq(uniq): output = _unique1d(consolidated, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=inverse_shape) + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis) output = (reshape_uniq(output[0]),) + output[1:] return _unpack_tuple(output) def _unique1d(ar, return_index=False, return_inverse=False, - return_counts=False, *, equal_nan=True, inverse_shape=None): + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None): """ Find the unique elements of an array, ignoring shape. """ @@ -371,7 +378,7 @@ def _unique1d(ar, return_index=False, return_inverse=False, imask = np.cumsum(mask) - 1 inv_idx = np.empty(mask.shape, dtype=np.intp) inv_idx[perm] = imask - ret += (inv_idx.reshape(inverse_shape),) + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) if return_counts: idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) ret += (np.diff(idx),) @@ -853,30 +860,16 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): if ar2.dtype == bool: ar2 = ar2.astype(np.uint8) - ar2_min = np.min(ar2) - ar2_max = np.max(ar2) + ar2_min = int(np.min(ar2)) + ar2_max = int(np.max(ar2)) - ar2_range = int(ar2_max) - int(ar2_min) + ar2_range = ar2_max - ar2_min # Constraints on whether we can actually use the table method: # 1. Assert memory usage is not too large below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max - # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype - if ar1.size > 0: - ar1_min = np.min(ar1) - ar1_max = np.max(ar1) - - # After masking, the range of ar1 is guaranteed to be - # within the range of ar2: - ar1_upper = min(int(ar1_max), int(ar2_max)) - ar1_lower = max(int(ar1_min), int(ar2_min)) - - range_safe_from_overflow &= all(( - ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max, - ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min - )) # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. @@ -906,8 +899,25 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) - outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - - ar2_min] + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) + outgoing_array[basic_mask] = isin_helper_ar[ + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] return outgoing_array elif kind == 'table': # not range_safe_from_overflow diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 65bc7c592b29..1a1507bfc10e 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2132,6 +2132,12 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, return arrays +def _get_vectorize_dtype(dtype): + if dtype.char in "SU": + return dtype.char + return dtype + + @set_module('numpy') class vectorize: """ @@ -2330,7 +2336,7 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): - otypes = ''.join([_nx.dtype(x).char for x in otypes]) + otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes @@ -4394,7 +4400,66 @@ def quantile(a, Notes ----- - In general, the quantile at probability level :math:`q` of a cumulative + Given a sample `a` from an underlying distribution, `quantile` provides a + nonparametric estimate of the inverse cumulative distribution function. + + By default, this is done by interpolating between adjacent elements in + ``y``, a sorted copy of `a`:: + + (1-g)*y[j] + g*y[j+1] + + where the index ``j`` and coefficient ``g`` are the integral and + fractional components of ``q * (n-1)``, and ``n`` is the number of + elements in the sample. + + This is a special case of Equation 1 of H&F [1]_. More generally, + + - ``j = (q*n + m - 1) // 1``, and + - ``g = (q*n + m - 1) % 1``, + + where ``m`` may be defined according to several different conventions. + The preferred convention may be selected using the ``method`` parameter: + + =============================== =============== =============== + ``method`` number in H&F ``m`` + =============================== =============== =============== + ``interpolated_inverted_cdf`` 4 ``0`` + ``hazen`` 5 ``1/2`` + ``weibull`` 6 ``q`` + ``linear`` (default) 7 ``1 - q`` + ``median_unbiased`` 8 ``q/3 + 1/3`` + ``normal_unbiased`` 9 ``q/4 + 3/8`` + =============================== =============== =============== + + Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to + ``n - 1`` when the results of the formula would be outside the allowed + range of non-negative indices. The ``- 1`` in the formulas for ``j`` and + ``g`` accounts for Python's 0-based indexing. + + The table above includes only the estimators from H&F that are continuous + functions of probability `q` (estimators 4-9). NumPy also provides the + three discontinuous estimators from H&F (estimators 1-3), where ``j`` is + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. + + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` + 3. ``closest_observation``: ``m = -1/2`` and + ``g = 1 - int((index == j) & (j%2 == 1))`` + + For backward compatibility with previous versions of NumPy, `quantile` + provides four additional discontinuous estimators. Like + ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, + but ``g`` is defined as follows. + + - ``lower``: ``g = 0`` + - ``midpoint``: ``g = 0.5`` + - ``higher``: ``g = 1`` + - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` + + **Weighted quantiles:** + More formally, the quantile at probability level :math:`q` of a cumulative distribution function :math:`F(y)=P(Y \\leq y)` with probability measure :math:`P` is defined as any number :math:`x` that fulfills the *coverage conditions* @@ -4742,7 +4807,9 @@ def _discret_interpolation_to_boundaries(index, gamma_condition_fun): def _closest_observation(n, quantiles): - gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0) + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, gamma_fun) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 54788a738c7e..5b92b85fb78f 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -141,7 +141,7 @@ def _copyto(a, val, mask): return a -def _remove_nan_1d(arr1d, overwrite_input=False): +def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False): """ Equivalent to arr1d[~arr1d.isnan()], but in a different order @@ -151,6 +151,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ---------- arr1d : ndarray Array to remove nans from + second_arr1d : ndarray or None + A second array which will have the same positions removed as arr1d. overwrite_input : bool True if `arr1d` can be modified in place @@ -158,6 +160,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ------- res : ndarray Array with nan elements removed + second_res : ndarray or None + Second array with nan element positions of first array removed. overwrite_input : bool True if `res` can be modified in place, given the constraint on the input @@ -172,9 +176,12 @@ def _remove_nan_1d(arr1d, overwrite_input=False): if s.size == arr1d.size: warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=6) - return arr1d[:0], True + if second_arr1d is None: + return arr1d[:0], None, True + else: + return arr1d[:0], second_arr1d[:0], True elif s.size == 0: - return arr1d, overwrite_input + return arr1d, second_arr1d, overwrite_input else: if not overwrite_input: arr1d = arr1d.copy() @@ -183,7 +190,15 @@ def _remove_nan_1d(arr1d, overwrite_input=False): # fill nans in beginning of array with non-nans of end arr1d[s[:enonan.size]] = enonan - return arr1d[:-s.size], True + if second_arr1d is None: + return arr1d[:-s.size], None, True + else: + if not overwrite_input: + second_arr1d = second_arr1d.copy() + enonan = second_arr1d[-s.size:][~c[-s.size:]] + second_arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], second_arr1d[:-s.size], True def _divide_by_count(a, b, out=None): @@ -1061,7 +1076,7 @@ def _nanmedian1d(arr1d, overwrite_input=False): Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage """ - arr1d_parsed, overwrite_input = _remove_nan_1d( + arr1d_parsed, _, overwrite_input = _remove_nan_1d( arr1d, overwrite_input=overwrite_input, ) @@ -1646,13 +1661,36 @@ def _nanquantile_ureduce_func( wgt = None if weights is None else weights.ravel() result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) else: - result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, method, weights) - # apply_along_axis fills in collapsed axis with results. - # Move that axis to the beginning to match percentile's - # convention. - if q.ndim != 0: - result = np.moveaxis(result, axis, 0) + # Note that this code could try to fill in `out` right away + if weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + ) + # This path dealt with `out` already... + return result if out is not None: out[...] = result @@ -1666,8 +1704,9 @@ def _nanquantile_1d( Private function for rank 1 arrays. Compute quantile ignoring NaNs. See nanpercentile for parameter usage """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) if arr1d.size == 0: # convert to scalar return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 8fef65e7f6ab..9b298be08e9e 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -278,6 +278,34 @@ def __repr__(self): array_names += "..." return f"NpzFile {filename!r} with keys: {array_names}" + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return Mapping.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return Mapping.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return Mapping.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return Mapping.values(self) + @set_module('numpy') def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, @@ -487,12 +515,12 @@ def save(file, arr, allow_pickle=True, fix_imports=True): arr : array_like Array data to be saved. allow_pickle : bool, optional - Allow saving object arrays using Python pickles. Reasons for + Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute - arbitrary code) and portability (pickled objects may not be loadable + arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is - compatible between Python 2 and Python 3). + compatible between different versions of Python). Default: True fix_imports : bool, optional Only useful in forcing objects in object arrays on Python 3 to be @@ -1005,6 +1033,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', # Due to chunking, certain error reports are less clear, currently. if filelike: data = iter(data) # cannot chunk when reading from file + filelike = False c_byte_converters = False if read_dtype_via_object_chunks == "S": @@ -1020,7 +1049,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', next_arr = _load_from_filelike( data, delimiter=delimiter, comment=comment, quote=quote, imaginary_unit=imaginary_unit, - usecols=usecols, skiplines=skiplines, max_rows=max_rows, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, converters=converters, dtype=dtype, encoding=encoding, filelike=filelike, byte_converters=byte_converters, @@ -1814,10 +1843,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` - is a file object. The special value 'bytes' enables backward + is a file object. The special value 'bytes' enables backward compatibility workarounds that ensure that you receive byte arrays - when possible and passes latin1 encoded strings to converters. - Override this value to receive unicode arrays and pass strings + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. @@ -1854,7 +1883,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. * Custom converters may receive unexpected values due to dtype - discovery. + discovery. References ---------- @@ -2127,7 +2156,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, except ValueError: # We couldn't find it: the name must have been dropped continue - # Redefine the key if it's a column number + # Redefine the key if it's a column number # and usecols is defined if usecols: try: @@ -2161,23 +2190,23 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) - converters = [StringConverter(dt, + converters = [StringConverter(dt, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) - converters = [StringConverter(dtype, + converters = [StringConverter(dtype, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): - # If the converter is specified by column names, + # If the converter is specified by column names, # use the index instead if _is_string_like(j): try: @@ -2201,8 +2230,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if conv is bytes: user_conv = asbytes elif byte_converters: - # Converters may use decode to workaround numpy's old - # behavior, so encode the string again before passing + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing # to the user converter. def tobytes_first(x, conv): if type(x) is bytes: @@ -2338,7 +2367,7 @@ def tobytes_first(x, conv): "argument is deprecated. Set the encoding, use None for the " "system default.", np.exceptions.VisibleDeprecationWarning, stacklevel=2) - + def encode_unicode_cols(row_tup): row = list(row_tup) for i in strcolidx: diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 87f35a7a4f60..a90403459848 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -271,6 +271,8 @@ def dtype_to_descr(dtype): warnings.warn("metadata on a dtype is not saved to an npy/npz. " "Use another format (such as pickle) to store it.", UserWarning, stacklevel=2) + dtype = new_dtype + if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get @@ -741,7 +743,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): "when allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} - pickle.dump(array, fp, protocol=3, **pickle_kwargs) + pickle.dump(array, fp, protocol=4, **pickle_kwargs) elif array.flags.f_contiguous and not array.flags.c_contiguous: if isfileobj(fp): array.T.tofile(fp) diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 8723f4d9ba73..ef3319e901a0 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -867,6 +867,42 @@ def test_check_03(self): a = np.pad([1, 2, 3], 4, 'reflect') b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) + + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) class TestEmptyArray: diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index f537621482c0..e19114abab6e 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -400,6 +400,7 @@ def test_isin_table_timedelta_fails(self): (np.uint16, np.uint8), (np.uint8, np.int16), (np.int16, np.uint8), + (np.uint64, np.int64), ] ) @pytest.mark.parametrize("kind", [None, "sort", "table"]) @@ -415,10 +416,8 @@ def test_isin_mixed_dtype(self, dtype1, dtype2, kind): expected = np.array([True, True, False, False]) - expect_failure = kind == "table" and any(( - dtype1 == np.int8 and dtype2 == np.int16, - dtype1 == np.int16 and dtype2 == np.int8 - )) + expect_failure = kind == "table" and ( + dtype1 == np.int16 and dtype2 == np.int8) if expect_failure: with pytest.raises(RuntimeError, match="exceed the maximum"): @@ -426,6 +425,22 @@ def test_isin_mixed_dtype(self, dtype1, dtype2, kind): else: assert_array_equal(isin(ar1, ar2, kind=kind), expected) + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63+1], dtype=np.uint64), + np.array([-2**62, -2**62-1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) def test_isin_mixed_boolean(self, kind): """Test that isin works as expected for bool/int input.""" @@ -814,11 +829,8 @@ def test_unique_1d_with_axis(self, axis): def test_unique_inverse_with_axis(self, axis): x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) uniq, inv = unique(x, return_inverse=True, axis=axis) - assert_equal(inv.ndim, x.ndim) - if axis is None: - assert_array_equal(x, np.take(uniq, inv)) - else: - assert_array_equal(x, np.take_along_axis(uniq, inv, axis=axis)) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) def test_unique_axis_zeros(self): # issue 15559 @@ -830,7 +842,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(1, 0))) assert_array_equal(idx, np.array([0])) - assert_array_equal(inv, np.array([[0], [0]])) + assert_array_equal(inv, np.array([0, 0])) assert_array_equal(cnt, np.array([2])) # there's 0 elements of shape (2,) along axis 1 @@ -840,7 +852,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(2, 0))) assert_array_equal(idx, np.array([])) - assert_array_equal(inv, np.empty((1, 0))) + assert_array_equal(inv, np.array([])) assert_array_equal(cnt, np.array([])) # test a "complicated" shape @@ -909,7 +921,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=0" assert_array_equal(data[idx], uniq, msg) msg = "Unique's return_inverse=True failed with axis=0" - assert_array_equal(np.take_along_axis(uniq, inv, axis=0), data) + assert_array_equal(np.take(uniq, inv, axis=0), data) msg = "Unique's return_counts=True failed with axis=0" assert_array_equal(cnt, np.array([2, 2]), msg) @@ -918,7 +930,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=1" assert_array_equal(data[:, idx], uniq) msg = "Unique's return_inverse=True failed with axis=1" - assert_array_equal(np.take_along_axis(uniq, inv, axis=1), data) + assert_array_equal(np.take(uniq, inv, axis=1), data) msg = "Unique's return_counts=True failed with axis=1" assert_array_equal(cnt, np.array([2, 1, 1]), msg) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 31352864b7e2..bb262e048cba 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -998,32 +998,30 @@ def test_header_growth_axis(): assert len(fp.getvalue()) == expected_header_length -@pytest.mark.parametrize('dt, fail', [ - (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', - metadata={'some': 'stuff'})]}), True), - (np.dtype(int, metadata={'some': 'stuff'}), False), - (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False), +@pytest.mark.parametrize('dt', [ + np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', + metadata={'some': 'stuff'})]}), + np.dtype(int, metadata={'some': 'stuff'}), + np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), # recursive: metadata on the field of a dtype - (np.dtype({'names': ['a', 'b'], 'formats': [ + np.dtype({'names': ['a', 'b'], 'formats': [ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) - ]}), False) + ]}), ]) @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), reason="PyPy bug in error formatting") -def test_metadata_dtype(dt, fail): +def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() with assert_warns(UserWarning): np.save(buf, arr) buf.seek(0) - if fail: - with assert_raises(ValueError): - np.load(buf) - else: - arr2 = np.load(buf) - # BUG: assert_array_equal does not check metadata - from numpy.lib._utils_impl import drop_metadata - assert_array_equal(arr, arr2) - assert drop_metadata(arr.dtype) is not arr.dtype - assert drop_metadata(arr2.dtype) is arr2.dtype + + # Loading should work (metadata was stripped): + arr2 = np.load(buf) + # BUG: assert_array_equal does not check metadata + from numpy.lib._utils_impl import drop_metadata + assert_array_equal(arr, arr2) + assert drop_metadata(arr.dtype) is not arr.dtype + assert drop_metadata(arr2.dtype) is arr2.dtype diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a6465019fae4..b267a879e042 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -254,8 +254,8 @@ def test_nd(self): @pytest.mark.parametrize("dtype", ["i8", "U10", "object", "datetime64[ms]"]) def test_any_and_all_result_dtype(dtype): arr = np.ones(3, dtype=dtype) - assert np.any(arr).dtype == np.bool_ - assert np.all(arr).dtype == np.bool_ + assert np.any(arr).dtype == np.bool + assert np.all(arr).dtype == np.bool class TestCopy: @@ -1901,6 +1901,13 @@ def test_positional_regression_9477(self): r = f([2]) assert_equal(r.dtype, np.dtype('float64')) + def test_datetime_conversion(self): + otype = "datetime64[ns]" + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + dtype='datetime64[ns]') + assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", + otypes=[otype])(arr), arr) + class TestLeaks: class A: @@ -3987,6 +3994,20 @@ def test_weibull_fraction(self): quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') assert_equal(quantile, np.array(Fraction(1, 20))) + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 44664c2df891..38ded1f26cda 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2760,12 +2760,16 @@ def test_npzfile_dict(): assert_(f in ['x', 'y']) assert_equal(a.shape, (3, 3)) + for a in z.values(): + assert_equal(a.shape, (3, 3)) + assert_(len(z.items()) == 2) for f in z: assert_(f in ['x', 'y']) assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 78c84e491c08..0b2f4042e66d 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -970,12 +970,15 @@ def test_parametric_unit_discovery( """Check that the correct unit (e.g. month, day, second) is discovered from the data when a user specifies a unitless datetime.""" # Unit should be "D" (days) due to last entry - data = [generic_data] * 50000 + [long_datum] + data = [generic_data] * nrows + [long_datum] expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows+1 + assert len(data) == len(expected) # file-like path txt = StringIO("\n".join(data)) a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) @@ -983,11 +986,17 @@ def test_parametric_unit_discovery( fd, fname = mkstemp() os.close(fd) with open(fname, "w") as fh: - fh.write("\n".join(data)) + fh.write("\n".join(data)+"\n") + # loading the full file... a = np.loadtxt(fname, dtype=unitless_dtype) - os.remove(fname) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows/2)) + os.remove(fname) + assert len(a) == int(nrows/2) + assert_equal(a, expected[:int(nrows/2)]) def test_str_dtype_unit_discovery_with_converter(): @@ -1041,5 +1050,26 @@ def test_field_growing_cases(): assert len(res) == 0 for i in range(1, 1024): - res = np.loadtxt(["," * i], delimiter=",", dtype=bytes) + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) assert len(res) == i+1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"]*file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index da3ee0f2a3dc..d196b133005f 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1144,7 +1144,8 @@ def test_complex(self): assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) @pytest.mark.parametrize("weighted", [False, True]) - def test_result_values(self, weighted): + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): if weighted: percentile = partial(np.percentile, method="inverted_cdf") nanpercentile = partial(np.nanpercentile, method="inverted_cdf") @@ -1160,13 +1161,16 @@ def gen_weights(d): return None tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] - res = nanpercentile(_ndat, 28, axis=1, weights=gen_weights(_ndat)) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) # Transpose the array to fit the output convention of numpy.percentile tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) for d in _rdat]) + out = np.empty_like(tgt) if use_out else None res = nanpercentile(_ndat, (28, 98), axis=1, - weights=gen_weights(_ndat)) + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) @pytest.mark.parametrize("axis", [None, 0, 1]) @@ -1242,6 +1246,58 @@ def test_multiple_percentiles(self): np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) ) + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + class TestNanFunctions_Quantile: # most of this is already tested by TestPercentile diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index e2f72ac90c92..644912d941e3 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -43,7 +43,7 @@ def _compare_dtypes(dt1, dt2): assert dt_m.metadata is None assert dt_m['l1'].metadata is None assert dt_m['l1']['l2'].metadata is None - + # alignment dt = np.dtype([('x', '::nan = {NPY_NAN, NPY_N * column_strides: the number of bytes between consecutive columns. * output_lead_dim: BLAS/LAPACK-side leading dimension, in elements */ -typedef struct linearize_data_struct +struct linearize_data { npy_intp rows; npy_intp columns; npy_intp row_strides; npy_intp column_strides; npy_intp output_lead_dim; -} LINEARIZE_DATA_t; +}; -static inline void -init_linearize_data_ex(LINEARIZE_DATA_t *lin_data, - npy_intp rows, +static inline +linearize_data init_linearize_data_ex(npy_intp rows, npy_intp columns, npy_intp row_strides, npy_intp column_strides, npy_intp output_lead_dim) { - lin_data->rows = rows; - lin_data->columns = columns; - lin_data->row_strides = row_strides; - lin_data->column_strides = column_strides; - lin_data->output_lead_dim = output_lead_dim; + return {rows, columns, row_strides, column_strides, output_lead_dim}; } -static inline void -init_linearize_data(LINEARIZE_DATA_t *lin_data, - npy_intp rows, +static inline +linearize_data init_linearize_data(npy_intp rows, npy_intp columns, npy_intp row_strides, npy_intp column_strides) { - init_linearize_data_ex( - lin_data, rows, columns, row_strides, column_strides, columns); + return init_linearize_data_ex( + rows, columns, row_strides, column_strides, columns); } #if _UMATH_LINALG_DEBUG @@ -603,7 +595,7 @@ dump_ufunc_object(PyUFuncObject* ufunc) } static inline void -dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params) +dump_linearize_data(const char* name, const linearize_data* params) { TRACE_TXT("\n\t%s rows: %zd columns: %zd"\ "\n\t\trow_strides: %td column_strides: %td"\ @@ -845,7 +837,7 @@ template static inline void * linearize_matrix(typ *dst, typ *src, - const LINEARIZE_DATA_t* data) + const linearize_data* data) { using ftyp = fortran_type_t; if (dst) { @@ -890,7 +882,7 @@ template static inline void * delinearize_matrix(typ *dst, typ *src, - const LINEARIZE_DATA_t* data) + const linearize_data* data) { using ftyp = fortran_type_t; @@ -937,7 +929,7 @@ using ftyp = fortran_type_t; template static inline void -nan_matrix(typ *dst, const LINEARIZE_DATA_t* data) +nan_matrix(typ *dst, const linearize_data* data) { int i, j; for (i = 0; i < data->rows; i++) { @@ -953,7 +945,7 @@ nan_matrix(typ *dst, const LINEARIZE_DATA_t* data) template static inline void -zero_matrix(typ *dst, const LINEARIZE_DATA_t* data) +zero_matrix(typ *dst, const linearize_data* data) { int i, j; for (i = 0; i < data->rows; i++) { @@ -1168,9 +1160,8 @@ slogdet(char **args, tmp_buff = (char *)malloc(matrix_size + pivot_size); if (tmp_buff) { - LINEARIZE_DATA_t lin_data; /* swapped steps to get matrix in FORTRAN order */ - init_linearize_data(&lin_data, m, m, steps[1], steps[0]); + linearize_data lin_data = init_linearize_data(m, m, steps[1], steps[0]); BEGIN_OUTER_LOOP_3 linearize_matrix((typ*)tmp_buff, (typ*)args[0], &lin_data); slogdet_single_element(m, @@ -1220,11 +1211,11 @@ det(char **args, tmp_buff = (char *)malloc(matrix_size + pivot_size); if (tmp_buff) { - LINEARIZE_DATA_t lin_data; + /* swapped steps to get matrix in FORTRAN order */ + linearize_data lin_data = init_linearize_data(m, m, steps[1], steps[0]); + typ sign; basetyp logdet; - /* swapped steps to get matrix in FORTRAN order */ - init_linearize_data(&lin_data, m, m, steps[1], steps[0]); BEGIN_OUTER_LOOP_2 linearize_matrix((typ*)tmp_buff, (typ*)args[0], &lin_data); @@ -1524,20 +1515,11 @@ eigh_wrapper(char JOBZ, JOBZ, UPLO, (fortran_int)dimensions[0], dispatch_scalar())) { - LINEARIZE_DATA_t matrix_in_ld; - LINEARIZE_DATA_t eigenvectors_out_ld; - LINEARIZE_DATA_t eigenvalues_out_ld; - - init_linearize_data(&matrix_in_ld, - eigh_params.N, eigh_params.N, - steps[1], steps[0]); - init_linearize_data(&eigenvalues_out_ld, - 1, eigh_params.N, - 0, steps[2]); + linearize_data matrix_in_ld = init_linearize_data(eigh_params.N, eigh_params.N, steps[1], steps[0]); + linearize_data eigenvalues_out_ld = init_linearize_data(1, eigh_params.N, 0, steps[2]); + linearize_data eigenvectors_out_ld = {}; /* silence uninitialized warning */ if ('V' == eigh_params.JOBZ) { - init_linearize_data(&eigenvectors_out_ld, - eigh_params.N, eigh_params.N, - steps[4], steps[3]); + eigenvectors_out_ld = init_linearize_data(eigh_params.N, eigh_params.N, steps[4], steps[3]); } for (iter = 0; iter < outer_dim; ++iter) { @@ -1741,11 +1723,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; nrhs = (fortran_int)dimensions[1]; if (init_gesv(¶ms, n, nrhs)) { - LINEARIZE_DATA_t a_in, b_in, r_out; - - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&b_in, nrhs, n, steps[3], steps[2]); - init_linearize_data(&r_out, nrhs, n, steps[5], steps[4]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data b_in = init_linearize_data(nrhs, n, steps[3], steps[2]); + linearize_data r_out = init_linearize_data(nrhs, n, steps[5], steps[4]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -1780,10 +1760,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; if (init_gesv(¶ms, n, 1)) { - LINEARIZE_DATA_t a_in, b_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&b_in, 1, n, 1, steps[2]); - init_linearize_data(&r_out, 1, n, 1, steps[3]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data b_in = init_linearize_data(1, n, 1, steps[2]); + linearize_data r_out = init_linearize_data(1, n, 1, steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -1817,9 +1796,8 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; if (init_gesv(¶ms, n, n)) { - LINEARIZE_DATA_t a_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&r_out, n, n, steps[3], steps[2]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data r_out = init_linearize_data(n, n, steps[3], steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; @@ -1978,9 +1956,8 @@ cholesky(char uplo, char **args, npy_intp const *dimensions, npy_intp const *ste n = (fortran_int)dimensions[0]; if (init_potrf(¶ms, uplo, n)) { - LINEARIZE_DATA_t a_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&r_out, n, n, steps[3], steps[2]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data r_out = init_linearize_data(n, n, steps[3], steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; linearize_matrix(params.A, (ftyp*)args[0], &a_in); @@ -2465,27 +2442,25 @@ eig_wrapper(char JOBVL, if (init_geev(&geev_params, JOBVL, JOBVR, (fortran_int)dimensions[0], dispatch_scalar())) { - LINEARIZE_DATA_t a_in; - LINEARIZE_DATA_t w_out; - LINEARIZE_DATA_t vl_out; - LINEARIZE_DATA_t vr_out; + linearize_data vl_out = {}; /* silence uninitialized warning */ + linearize_data vr_out = {}; /* silence uninitialized warning */ - init_linearize_data(&a_in, + linearize_data a_in = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); steps += 2; - init_linearize_data(&w_out, + linearize_data w_out = init_linearize_data( 1, geev_params.N, 0, steps[0]); steps += 1; if ('V' == geev_params.JOBVL) { - init_linearize_data(&vl_out, + vl_out = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); steps += 2; } if ('V' == geev_params.JOBVR) { - init_linearize_data(&vr_out, + vr_out = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); } @@ -2953,13 +2928,13 @@ using basetyp = basetype_t; (fortran_int)dimensions[0], (fortran_int)dimensions[1], dispatch_scalar())) { - LINEARIZE_DATA_t a_in, u_out = {}, s_out = {}, v_out = {}; + linearize_data u_out = {}, s_out = {}, v_out = {}; fortran_int min_m_n = params.M < params.N ? params.M : params.N; - init_linearize_data(&a_in, params.N, params.M, steps[1], steps[0]); + linearize_data a_in = init_linearize_data(params.N, params.M, steps[1], steps[0]); if ('N' == params.JOBZ) { /* only the singular values are wanted */ - init_linearize_data(&s_out, 1, min_m_n, 0, steps[2]); + s_out = init_linearize_data(1, min_m_n, 0, steps[2]); } else { fortran_int u_columns, v_rows; if ('S' == params.JOBZ) { @@ -2969,13 +2944,13 @@ dispatch_scalar())) { u_columns = params.M; v_rows = params.N; } - init_linearize_data(&u_out, + u_out = init_linearize_data( u_columns, params.M, steps[3], steps[2]); - init_linearize_data(&s_out, + s_out = init_linearize_data( 1, min_m_n, 0, steps[4]); - init_linearize_data(&v_out, + v_out = init_linearize_data( params.N, v_rows, steps[6], steps[5]); } @@ -3296,10 +3271,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[1]; if (init_geqrf(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_out; - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_out, 1, fortran_int_min(m, n), 1, steps[2]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_out = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; @@ -3590,11 +3564,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[1]; if (init_gqr(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_in, q_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_in, 1, fortran_int_min(m, n), 1, steps[2]); - init_linearize_data(&q_out, fortran_int_min(m, n), m, steps[4], steps[3]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_in = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); + linearize_data q_out = init_linearize_data(fortran_int_min(m, n), m, steps[4], steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -3646,11 +3618,9 @@ using ftyp = fortran_type_t; if (init_gqr_complete(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_in, q_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_in, 1, fortran_int_min(m, n), 1, steps[2]); - init_linearize_data(&q_out, m, m, steps[4], steps[3]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_in = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); + linearize_data q_out = init_linearize_data(m, m, steps[4], steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -4053,13 +4023,11 @@ using basetyp = basetype_t; excess = m - n; if (init_gelsd(¶ms, m, n, nrhs, dispatch_scalar{})) { - LINEARIZE_DATA_t a_in, b_in, x_out, s_out, r_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data_ex(&b_in, nrhs, m, steps[3], steps[2], fortran_int_max(n, m)); - init_linearize_data_ex(&x_out, nrhs, n, steps[5], steps[4], fortran_int_max(n, m)); - init_linearize_data(&r_out, 1, nrhs, 1, steps[6]); - init_linearize_data(&s_out, 1, fortran_int_min(n, m), 1, steps[7]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data b_in = init_linearize_data_ex(nrhs, m, steps[3], steps[2], fortran_int_max(n, m)); + linearize_data x_out = init_linearize_data_ex(nrhs, n, steps[5], steps[4], fortran_int_max(n, m)); + linearize_data r_out = init_linearize_data(1, nrhs, 1, steps[6]); + linearize_data s_out = init_linearize_data(1, fortran_int_min(n, m), 1, steps[7]); BEGIN_OUTER_LOOP_7 int not_ok; @@ -4217,14 +4185,14 @@ GUFUNC_FUNC_ARRAY_REAL_COMPLEX__(lstsq); GUFUNC_FUNC_ARRAY_EIG(eig); GUFUNC_FUNC_ARRAY_EIG(eigvals); -static char equal_2_types[] = { +static const char equal_2_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_CDOUBLE, NPY_CDOUBLE }; -static char equal_3_types[] = { +static const char equal_3_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, @@ -4232,47 +4200,47 @@ static char equal_3_types[] = { }; /* second result is logdet, that will always be a REAL */ -static char slogdet_types[] = { +static const char slogdet_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE }; -static char eigh_types[] = { +static const char eigh_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE }; -static char eighvals_types[] = { +static const char eighvals_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_DOUBLE }; -static char eig_types[] = { +static const char eig_types[] = { NPY_FLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE }; -static char eigvals_types[] = { +static const char eigvals_types[] = { NPY_FLOAT, NPY_CFLOAT, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE }; -static char svd_1_1_types[] = { +static const char svd_1_1_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_DOUBLE }; -static char svd_1_3_types[] = { +static const char svd_1_3_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, @@ -4280,25 +4248,25 @@ static char svd_1_3_types[] = { }; /* A, tau */ -static char qr_r_raw_types[] = { +static const char qr_r_raw_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, tau, q */ -static char qr_reduced_types[] = { +static const char qr_reduced_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, tau, q */ -static char qr_complete_types[] = { +static const char qr_complete_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, b, rcond, x, resid, rank, s, */ -static char lstsq_types[] = { +static const char lstsq_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT, @@ -4313,7 +4281,7 @@ typedef struct gufunc_descriptor_struct { int nin; int nout; PyUFuncGenericFunction *funcs; - char *types; + const char *types; } GUFUNC_DESCRIPTOR_t; GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 8316b481e827..c67582265e4d 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -37,6 +37,7 @@ from numpy import array as narray, expand_dims, iinfo, finfo from numpy._core.numeric import normalize_axis_tuple from numpy._utils._inspect import getargspec, formatargspec +from numpy._utils import set_module __all__ = [ @@ -2636,7 +2637,7 @@ class MaskedIterator: >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) - + >>> for item in fl: ... print(item) ... @@ -2717,6 +2718,7 @@ def __next__(self): return d +@set_module("numpy.ma") class MaskedArray(ndarray): """ An array class with possibly masked values. @@ -8378,7 +8380,7 @@ def asarray(a, dtype=None, order=None): mask=False, fill_value=1e+20) >>> type(np.ma.asarray(x)) - + """ order = order or 'C' @@ -8425,7 +8427,7 @@ def asanyarray(a, dtype=None): mask=False, fill_value=1e+20) >>> type(np.ma.asanyarray(x)) - + """ # workaround for #8666, to preserve identity. Ideally the bottom line diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 56228b927080..8e458fe165af 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,5 +1,6 @@ from typing import Any -from numpy.lib.index_tricks import AxisConcatenator + +from numpy.lib._index_tricks_impl import AxisConcatenator from numpy.ma.core import ( dot as dot, diff --git a/numpy/meson.build b/numpy/meson.build index 80fa720b82e6..1283dbb44ddd 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -362,7 +362,7 @@ conf_data.set('PYTHON_VERSION', py.language_version()) # `np.show_config()`; needs some special handling for the case BLAS was found # but CBLAS not (and hence BLAS was also disabled) dependency_map = { - 'LAPACK': lapack_dep, + 'LAPACK': lapack, } if have_blas dependency_map += {'BLAS': blas} diff --git a/numpy/random/_bounded_integers.pxd.in b/numpy/random/_bounded_integers.pxd.in index 5ae5a806715c..bdcb32a7e212 100644 --- a/numpy/random/_bounded_integers.pxd.in +++ b/numpy/random/_bounded_integers.pxd.in @@ -6,7 +6,7 @@ ctypedef np.npy_bool bool_t from numpy.random cimport bitgen_t -cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: +cdef inline uint64_t _gen_mask(uint64_t max_val) noexcept nogil: """Mask generator for use in bounded random numbers""" # Smallest bit mask >= max cdef uint64_t mask = max_val diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 5dc2ebf6c1ef..16a0e5e0ff8d 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -68,9 +68,12 @@ class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... - def __getstate__(self) -> dict[str, Any]: ... - def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ... + def __getstate__(self) -> None: ... + def __setstate__(self, state: dict[str, Any] | None) -> None: ... + def __reduce__(self) -> tuple[ + Callable[[BitGenerator], Generator], + tuple[BitGenerator], + None]: ... @property def bit_generator(self) -> BitGenerator: ... def spawn(self, n_children: int) -> list[Generator]: ... @@ -210,6 +213,7 @@ class Generator: self, low: int, high: None | int = ..., + size: None = ..., ) -> int: ... @overload def integers( # type: ignore[misc] @@ -221,6 +225,15 @@ class Generator: endpoint: bool = ..., ) -> bool: ... @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: type[np.bool] = ..., + endpoint: bool = ..., + ) -> np.bool: ... + @overload def integers( # type: ignore[misc] self, low: int, @@ -230,6 +243,96 @@ class Generator: endpoint: bool = ..., ) -> int: ... @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + endpoint: bool = ..., + ) -> uint8: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + endpoint: bool = ..., + ) -> uint16: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + endpoint: bool = ..., + ) -> uint32: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + endpoint: bool = ..., + ) -> uint: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + endpoint: bool = ..., + ) -> uint64: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + endpoint: bool = ..., + ) -> int8: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + endpoint: bool = ..., + ) -> int16: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + endpoint: bool = ..., + ) -> int32: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + endpoint: bool = ..., + ) -> int_: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + endpoint: bool = ..., + ) -> int64: ... + @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ab8a15555ae3..0d134c823588 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -146,7 +146,7 @@ cdef class Generator: Container for the BitGenerators. - ``Generator`` exposes a number of methods for generating random + `Generator` exposes a number of methods for generating random numbers drawn from a variety of probability distributions. In addition to the distribution-specific arguments, each method takes a keyword argument `size` that defaults to ``None``. If `size` is ``None``, then a single @@ -159,7 +159,7 @@ cdef class Generator: **No Compatibility Guarantee** - ``Generator`` does not provide a version compatibility guarantee. In + `Generator` does not provide a version compatibility guarantee. In particular, as better algorithms evolve the bit stream may change. Parameters @@ -171,8 +171,8 @@ cdef class Generator: ----- The Python stdlib module `random` contains pseudo-random number generator with a number of methods that are similar to the ones available in - ``Generator``. It uses Mersenne Twister, and this bit generator can - be accessed using ``MT19937``. ``Generator``, besides being + `Generator`. It uses Mersenne Twister, and this bit generator can + be accessed using `MT19937`. `Generator`, besides being NumPy-aware, has the advantage that it provides a much larger number of probability distributions to choose from. @@ -214,17 +214,19 @@ cdef class Generator: # Pickling support: def __getstate__(self): - return self.bit_generator.state + return None - def __setstate__(self, state): - self.bit_generator.state = state + def __setstate__(self, bit_gen): + if isinstance(bit_gen, dict): + # Legacy path + # Prior to 2.0.x only the state of the underlying bit generator + # was preserved and any seed sequence information was lost + self.bit_generator.state = bit_gen def __reduce__(self): - ctor, name_tpl, state = self._bit_generator.__reduce__() - from ._pickle import __generator_ctor - # Requirements of __generator_ctor are (name, ctor) - return __generator_ctor, (name_tpl[0], ctor), state + # Requirements of __generator_ctor are (bit_generator, ) + return __generator_ctor, (self._bit_generator, ), None @property def bit_generator(self): @@ -990,7 +992,7 @@ cdef class Generator: if a.ndim == 0: return idx - if not is_scalar and idx.ndim == 0: + if not is_scalar and idx.ndim == 0 and a.ndim == 1: # If size == () then the user requested a 0-d array as opposed to # a scalar object when size is None. However a[idx] is always a # scalar and not an array. So this makes sure the result is an @@ -5023,11 +5025,11 @@ def default_rng(seed=None): Examples -------- - ``default_rng`` is the recommended constructor for the random number class - ``Generator``. Here are several ways we can construct a random - number generator using ``default_rng`` and the ``Generator`` class. + `default_rng` is the recommended constructor for the random number class + `Generator`. Here are several ways we can construct a random + number generator using `default_rng` and the `Generator` class. - Here we use ``default_rng`` to generate a random float: + Here we use `default_rng` to generate a random float: >>> import numpy as np >>> rng = np.random.default_rng(12345) @@ -5039,7 +5041,7 @@ def default_rng(seed=None): >>> type(rfloat) - Here we use ``default_rng`` to generate 3 random integers between 0 + Here we use `default_rng` to generate 3 random integers between 0 (inclusive) and 10 (exclusive): >>> import numpy as np diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index 1ebf43faa117..826cb8441ef1 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -67,9 +67,9 @@ cdef class MT19937(BitGenerator): Notes ----- - ``MT19937`` provides a capsule containing function pointers that produce + `MT19937` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers [1]_. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. The Python stdlib module "random" also contains a Mersenne Twister @@ -77,7 +77,7 @@ cdef class MT19937(BitGenerator): **State and Seeding** - The ``MT19937`` state vector consists of a 624-element array of + The `MT19937` state vector consists of a 624-element array of 32-bit unsigned integers plus a single integer value between 0 and 624 that indexes the current position within the main array. @@ -111,7 +111,7 @@ cdef class MT19937(BitGenerator): **Compatibility Guarantee** - ``MT19937`` makes a guarantee that a fixed seed will always produce + `MT19937` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index 77e2090e72bf..250bf967bba2 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -73,9 +73,9 @@ cdef class PCG64(BitGenerator): The specific member of the PCG family that we use is PCG XSL RR 128/64 as described in the paper ([2]_). - ``PCG64`` provides a capsule containing function pointers that produce + `PCG64` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. Supports the method :meth:`advance` to advance the RNG an arbitrary number of @@ -84,7 +84,7 @@ cdef class PCG64(BitGenerator): **State and Seeding** - The ``PCG64`` state vector consists of 2 unsigned 128-bit values, + The `PCG64` state vector consists of 2 unsigned 128-bit values, which are represented externally as Python ints. One is the state of the PRNG, which is advanced by a linear congruential generator (LCG). The second is a fixed odd increment used in the LCG. @@ -104,7 +104,7 @@ cdef class PCG64(BitGenerator): **Compatibility Guarantee** - ``PCG64`` makes a guarantee that a fixed seed will always produce + `PCG64` makes a guarantee that a fixed seed will always produce the same random integer stream. References @@ -305,13 +305,13 @@ cdef class PCG64DXSM(BitGenerator): generator ([1]_, [2]_). PCG-64 DXSM has a period of :math:`2^{128}` and supports advancing an arbitrary number of steps as well as :math:`2^{127}` streams. The specific member of the PCG family that we use is PCG CM DXSM 128/64. It - differs from ``PCG64`` in that it uses the stronger DXSM output function, + differs from `PCG64` in that it uses the stronger DXSM output function, a 64-bit "cheap multiplier" in the LCG, and outputs from the state before advancing it rather than advance-then-output. - ``PCG64DXSM`` provides a capsule containing function pointers that produce + `PCG64DXSM` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. Supports the method :meth:`advance` to advance the RNG an arbitrary number of @@ -320,7 +320,7 @@ cdef class PCG64DXSM(BitGenerator): **State and Seeding** - The ``PCG64DXSM`` state vector consists of 2 unsigned 128-bit values, + The `PCG64DXSM` state vector consists of 2 unsigned 128-bit values, which are represented externally as Python ints. One is the state of the PRNG, which is advanced by a linear congruential generator (LCG). The second is a fixed odd increment used in the LCG. @@ -340,7 +340,7 @@ cdef class PCG64DXSM(BitGenerator): **Compatibility Guarantee** - ``PCG64DXSM`` makes a guarantee that a fixed seed will always produce + `PCG64DXSM` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index d90da6a9b657..a046d9441fae 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -93,14 +93,14 @@ cdef class Philox(BitGenerator): the sequence in increments of :math:`2^{128}`. These features allow multiple non-overlapping sequences to be generated. - ``Philox`` provides a capsule containing function pointers that produce + `Philox` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. **State and Seeding** - The ``Philox`` state vector consists of a 256-bit value encoded as + The `Philox` state vector consists of a 256-bit value encoded as a 4-element uint64 array and a 128-bit value encoded as a 2-element uint64 array. The former is a counter which is incremented by 1 for every 4 64-bit randoms produced. The second is a key which determined the sequence @@ -122,10 +122,10 @@ cdef class Philox(BitGenerator): >>> sg = SeedSequence(1234) >>> rg = [Generator(Philox(s)) for s in sg.spawn(10)] - ``Philox`` can be used in parallel applications by calling the ``jumped`` - method to advances the state as-if :math:`2^{128}` random numbers have - been generated. Alternatively, ``advance`` can be used to advance the - counter for any positive step in [0, 2**256). When using ``jumped``, all + `Philox` can be used in parallel applications by calling the :meth:`jumped` + method to advance the state as-if :math:`2^{128}` random numbers have + been generated. Alternatively, :meth:`advance` can be used to advance the + counter for any positive step in [0, 2**256). When using :meth:`jumped`, all generators should be chained to ensure that the segments come from the same sequence. @@ -136,7 +136,7 @@ cdef class Philox(BitGenerator): ... rg.append(Generator(bit_generator)) ... bit_generator = bit_generator.jumped() - Alternatively, ``Philox`` can be used in parallel applications by using + Alternatively, `Philox` can be used in parallel applications by using a sequence of distinct keys where each instance uses different key. >>> key = 2**96 + 2**33 + 2**17 + 2**9 @@ -144,7 +144,7 @@ cdef class Philox(BitGenerator): **Compatibility Guarantee** - ``Philox`` makes a guarantee that a fixed ``seed`` will always produce + `Philox` makes a guarantee that a fixed ``seed`` will always produce the same random integer stream. Examples diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 073993726eb3..d783a378c24a 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -1,3 +1,6 @@ +from __future__ import annotations + +from .bit_generator import BitGenerator from .mtrand import RandomState from ._philox import Philox from ._pcg64 import PCG64, PCG64DXSM @@ -14,27 +17,30 @@ } -def __bit_generator_ctor(bit_generator_name='MT19937'): +def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'): """ Pickling helper function that returns a bit generator object Parameters ---------- - bit_generator_name : str - String containing the name of the BitGenerator + bit_generator : type[BitGenerator] or str + BitGenerator class or string containing the name of the BitGenerator Returns ------- - bit_generator : BitGenerator + BitGenerator BitGenerator instance """ - if bit_generator_name in BitGenerators: - bit_generator = BitGenerators[bit_generator_name] + if isinstance(bit_generator, type): + bit_gen_class = bit_generator + elif bit_generator in BitGenerators: + bit_gen_class = BitGenerators[bit_generator] else: - raise ValueError(str(bit_generator_name) + ' is not a known ' - 'BitGenerator module.') + raise ValueError( + str(bit_generator) + ' is not a known BitGenerator module.' + ) - return bit_generator() + return bit_gen_class() def __generator_ctor(bit_generator_name="MT19937", @@ -44,8 +50,9 @@ def __generator_ctor(bit_generator_name="MT19937", Parameters ---------- - bit_generator_name : str - String containing the core BitGenerator's name + bit_generator_name : str or BitGenerator + String containing the core BitGenerator's name or a + BitGenerator instance bit_generator_ctor : callable, optional Callable function that takes bit_generator_name as its only argument and returns an instantized bit generator. @@ -55,6 +62,9 @@ def __generator_ctor(bit_generator_name="MT19937", rg : Generator Generator using the named core BitGenerator """ + if isinstance(bit_generator_name, BitGenerator): + return Generator(bit_generator_name) + # Legacy path that uses a bit generator name and ctor return Generator(bit_generator_ctor(bit_generator_name)) @@ -76,5 +86,6 @@ def __randomstate_ctor(bit_generator_name="MT19937", rs : RandomState Legacy RandomState using the named core BitGenerator """ - + if isinstance(bit_generator_name, BitGenerator): + return RandomState(bit_generator_name) return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 81a4bc764026..12b48059cef2 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -50,30 +50,30 @@ cdef class SFC64(BitGenerator): Notes ----- - ``SFC64`` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast - Chaotic PRNG ([1]_). ``SFC64`` has a few different cycles that one might be + `SFC64` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast + Chaotic PRNG ([1]_). `SFC64` has a few different cycles that one might be on, depending on the seed; the expected period will be about - :math:`2^{255}` ([2]_). ``SFC64`` incorporates a 64-bit counter which means + :math:`2^{255}` ([2]_). `SFC64` incorporates a 64-bit counter which means that the absolute minimum cycle length is :math:`2^{64}` and that distinct seeds will not run into each other for at least :math:`2^{64}` iterations. - ``SFC64`` provides a capsule containing function pointers that produce + `SFC64` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. **State and Seeding** - The ``SFC64`` state vector consists of 4 unsigned 64-bit values. The last + The `SFC64` state vector consists of 4 unsigned 64-bit values. The last is a 64-bit counter that increments by 1 each iteration. The input seed is processed by `SeedSequence` to generate the first - 3 values, then the ``SFC64`` algorithm is iterated a small number of times + 3 values, then the `SFC64` algorithm is iterated a small number of times to mix. **Compatibility Guarantee** - ``SFC64`` makes a guarantee that a fixed seed will always produce the same + `SFC64` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 4556658efff4..d99278e861ea 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -92,11 +92,17 @@ class SeedSequence(ISpawnableSeedSequence): class BitGenerator(abc.ABC): lock: Lock def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def __getstate__(self) -> dict[str, Any]: ... - def __setstate__(self, state: dict[str, Any]) -> None: ... + def __getstate__(self) -> tuple[dict[str, Any], ISeedSequence]: ... + def __setstate__( + self, state_seed_seq: dict[str, Any] | tuple[dict[str, Any], ISeedSequence] + ) -> None: ... def __reduce__( self, - ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ... + ) -> tuple[ + Callable[[str], BitGenerator], + tuple[str], + tuple[dict[str, Any], ISeedSequence] + ]: ... @abc.abstractmethod @property def state(self) -> Mapping[str, Any]: ... diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index e49902f5c330..c999e6e32794 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -537,14 +537,27 @@ cdef class BitGenerator(): # Pickling support: def __getstate__(self): - return self.state + return self.state, self._seed_seq - def __setstate__(self, state): - self.state = state + def __setstate__(self, state_seed_seq): + + if isinstance(state_seed_seq, dict): + # Legacy path + # Prior to 2.0.x only the state of the underlying bit generator + # was preserved and any seed sequence information was lost + self.state = state_seed_seq + else: + self._seed_seq = state_seed_seq[1] + self.state = state_seed_seq[0] def __reduce__(self): from ._pickle import __bit_generator_ctor - return __bit_generator_ctor, (self.state['bit_generator'],), self.state + + return ( + __bit_generator_ctor, + (type(self), ), + (self.state, self._seed_seq) + ) @property def state(self): diff --git a/numpy/random/meson.build b/numpy/random/meson.build index 2da23a168b8a..103b07545d65 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -139,6 +139,9 @@ py.install_sources( 'tests/data/philox-testset-2.csv', 'tests/data/sfc64-testset-1.csv', 'tests/data/sfc64-testset-2.csv', + 'tests/data/sfc64_np126.pkl.gz', + 'tests/data/generator_pcg64_np126.pkl.gz', + 'tests/data/generator_pcg64_np121.pkl.gz', ], subdir: 'numpy/random/tests/data' ) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index dcbc91292647..dbd3cd609495 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -11,12 +11,14 @@ from numpy import ( int16, int32, int64, + int_, long, - ulong, uint8, uint16, uint32, uint64, + uint, + ulong, ) from numpy.random.bit_generator import BitGenerator from numpy._typing import ( @@ -34,6 +36,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, + _IntCodes, _LongCodes, _ShapeLike, _SingleCodes, @@ -42,6 +45,7 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, + _UIntCodes, _ULongCodes, ) @@ -69,7 +73,7 @@ class RandomState: def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... @overload def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... @@ -114,6 +118,7 @@ class RandomState: self, low: int, high: None | int = ..., + size: None = ..., ) -> int: ... @overload def randint( # type: ignore[misc] @@ -124,6 +129,14 @@ class RandomState: dtype: type[bool] = ..., ) -> bool: ... @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: type[np.bool] = ..., + ) -> np.bool: ... + @overload def randint( # type: ignore[misc] self, low: int, @@ -132,6 +145,102 @@ class RandomState: dtype: type[int] = ..., ) -> int: ... @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + ) -> uint8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + ) -> uint16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + ) -> uint32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + ) -> uint: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., + ) -> ulong: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + ) -> uint64: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + ) -> int8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + ) -> int16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + ) -> int32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + ) -> int_: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., + ) -> long: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + ) -> int64: ... + @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index d67e4533f663..b42b0a7764b8 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -205,10 +205,13 @@ cdef class RandomState: self.set_state(state) def __reduce__(self): - ctor, name_tpl, _ = self._bit_generator.__reduce__() - from ._pickle import __randomstate_ctor - return __randomstate_ctor, (name_tpl[0], ctor), self.get_state(legacy=False) + # The third argument containing the state is required here since + # RandomState contains state information in addition to the state + # contained in the bit generator that described the gaussian + # generator. This argument is passed to __setstate__ after the + # Generator is created. + return __randomstate_ctor, (self._bit_generator, ), self.get_state(legacy=False) cdef _initialize_bit_generator(self, bit_generator): self._bit_generator = bit_generator diff --git a/numpy/random/tests/data/generator_pcg64_np121.pkl.gz b/numpy/random/tests/data/generator_pcg64_np121.pkl.gz new file mode 100644 index 000000000000..b7ad03d8e63b Binary files /dev/null and b/numpy/random/tests/data/generator_pcg64_np121.pkl.gz differ diff --git a/numpy/random/tests/data/generator_pcg64_np126.pkl.gz b/numpy/random/tests/data/generator_pcg64_np126.pkl.gz new file mode 100644 index 000000000000..6c5130b5e745 Binary files /dev/null and b/numpy/random/tests/data/generator_pcg64_np126.pkl.gz differ diff --git a/numpy/random/tests/data/sfc64_np126.pkl.gz b/numpy/random/tests/data/sfc64_np126.pkl.gz new file mode 100644 index 000000000000..94fbceb38f92 Binary files /dev/null and b/numpy/random/tests/data/sfc64_np126.pkl.gz differ diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index fa2ae866beeb..12c2f1d5ab57 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -298,6 +298,24 @@ def test_pickle(self): aa = pickle.loads(pickle.dumps(ss)) assert_equal(ss.state, aa.state) + def test_pickle_preserves_seed_sequence(self): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + ss = bit_generator.seed_seq + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + bit_generator.seed_seq.spawn(10) + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned) + def test_invalid_state_type(self): bit_generator = self.bit_generator(*self.data1['seed']) with pytest.raises(TypeError): @@ -349,8 +367,9 @@ def test_getstate(self): bit_generator = self.bit_generator(*self.data1['seed']) state = bit_generator.state alt_state = bit_generator.__getstate__() - assert_state_equal(state, alt_state) - + assert isinstance(alt_state, tuple) + assert_state_equal(state, alt_state[0]) + assert isinstance(alt_state[1], SeedSequence) class TestPhilox(Base): @classmethod @@ -413,6 +432,7 @@ def test_advange_large(self): assert state["state"] == advanced_state + class TestPCG64DXSM(Base): @classmethod def setup_class(cls): @@ -502,6 +522,29 @@ def setup_class(cls): cls.invalid_init_types = [(3.2,), ([None],), (1, None)] cls.invalid_init_values = [(-1,)] + def test_legacy_pickle(self): + # Pickling format was changed in 2.0.x + import gzip + import pickle + + expected_state = np.array( + [ + 9957867060933711493, + 532597980065565856, + 14769588338631205282, + 13 + ], + dtype=np.uint64 + ) + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join(base_path, "data", f"sfc64_np126.pkl.gz") + with gzip.open(pkl_file) as gz: + sfc = pickle.load(gz) + + assert isinstance(sfc, SFC64) + assert_equal(sfc.state["state"]["state"], expected_state) + class TestDefaultRNG: def test_seed(self): diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index a0bee225d20b..514f9af2ce8c 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,3 +1,4 @@ +import os.path import sys import hashlib @@ -932,6 +933,15 @@ def test_choice_large_sample(self): res = hashlib.sha256(actual.view(np.int8)).hexdigest() assert_(choice_hash == res) + def test_choice_array_size_empty_tuple(self): + random = Generator(MT19937(self.seed)) + assert_array_equal(random.choice([1, 2, 3], size=()), np.array(1), + strict=True) + assert_array_equal(random.choice([[1, 2, 3]], size=()), [1, 2, 3]) + assert_array_equal(random.choice([[1]], size=()), [1], strict=True) + assert_array_equal(random.choice([[1]], size=(), axis=1), [1], + strict=True) + def test_bytes(self): random = Generator(MT19937(self.seed)) actual = random.bytes(10) @@ -2738,10 +2748,50 @@ def test_generator_ctor_old_style_pickle(): rg = np.random.Generator(np.random.PCG64DXSM(0)) rg.standard_normal(1) # Directly call reduce which is used in pickling - ctor, args, state_a = rg.__reduce__() + ctor, (bit_gen, ), _ = rg.__reduce__() # Simulate unpickling an old pickle that only has the name - assert args[:1] == ("PCG64DXSM",) - b = ctor(*args[:1]) - b.bit_generator.state = state_a + assert bit_gen.__class__.__name__ == "PCG64DXSM" + print(ctor) + b = ctor(*("PCG64DXSM",)) + print(b) + b.bit_generator.state = bit_gen.state state_b = b.bit_generator.state - assert state_a == state_b + assert bit_gen.state == state_b + + +def test_pickle_preserves_seed_sequence(): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + rg = np.random.Generator(np.random.PCG64DXSM(20240411)) + ss = rg.bit_generator.seed_seq + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + rg.bit_generator.seed_seq.spawn(10) + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + + +@pytest.mark.parametrize("version", [121, 126]) +def test_legacy_pickle(version): + # Pickling format was changes in 1.22.x and in 2.0.x + import pickle + import gzip + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join( + base_path, "data", f"generator_pcg64_np{version}.pkl.gz" + ) + with gzip.open(pkl_file) as gz: + rg = pickle.load(gz) + state = rg.bit_generator.state['state'] + + assert isinstance(rg, Generator) + assert isinstance(rg.bit_generator, np.random.PCG64) + assert state['state'] == 35399562948360463058890781895381311971 + assert state['inc'] == 87136372517582989555478159403783844777 diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index aa24936bae2b..5121a684f693 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -2052,8 +2052,8 @@ def test_randomstate_ctor_old_style_pickle(): # Directly call reduce which is used in pickling ctor, args, state_a = rs.__reduce__() # Simulate unpickling an old pickle that only has the name - assert args[:1] == ("MT19937",) - b = ctor(*args[:1]) + assert args[0].__class__.__name__ == "MT19937" + b = ctor(*("MT19937",)) b.set_state(state_a) state_b = b.get_state(legacy=False) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index bae98964f9d4..8e33f319b11f 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1949,8 +1949,15 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ - if not args: + if not args and not kwargs: return _assert_warns_context(warning_class) + elif len(args) < 1: + if "match" in kwargs: + raise RuntimeError( + "assert_warns does not use 'match' kwarg, " + "use pytest.warns instead" + ) + raise RuntimeError("assert_warns(...) needs at least one arg") func = args[0] args = args[1:] diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 36f9c1617f44..247bbeaec6f7 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1035,6 +1035,27 @@ def no_warnings(): assert_equal(before_filters, after_filters, "assert_warns does not preserver warnings state") + def test_args(self): + def f(a=0, b=1): + warnings.warn("yo") + return a + b + + assert assert_warns(UserWarning, f, b=20) == 20 + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, match="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" in str(exc) + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, wrong="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" not in str(exc) + def test_warn_wrong_warning(self): def f(): warnings.warn("yo", DeprecationWarning) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 61643426c8d7..d3abcb92c1c3 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -39,3 +39,16 @@ def test_short_version(): else: assert_(np.__version__.split("+")[0] == np.version.short_version, "short_version mismatch in development version") + + +def test_version_module(): + contents = set([s for s in dir(np.version) if not s.startswith('_')]) + expected = set([ + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', + ]) + + assert contents == expected diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 780e1fccb79e..618223705937 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -114,14 +114,14 @@ def test_NPY_NO_EXPORT(): "f2py", "fft", "lib", - "lib.format", # was this meant to be public? + "lib.array_utils", + "lib.format", + "lib.introspect", "lib.mixins", - "lib.recfunctions", + "lib.npyio", + "lib.recfunctions", # note: still needs cleaning, was forgotten for 2.0 "lib.scimath", "lib.stride_tricks", - "lib.npyio", - "lib.introspect", - "lib.array_utils", "linalg", "ma", "ma.extras", @@ -134,11 +134,12 @@ def test_NPY_NO_EXPORT(): "polynomial.legendre", "polynomial.polynomial", "random", + "strings", "testing", "testing.overrides", "typing", "typing.mypy_plugin", - "version" # Should be removed for NumPy 2.0 + "version", ]] if sys.version_info < (3, 12): PUBLIC_MODULES += [ @@ -158,7 +159,6 @@ def test_NPY_NO_EXPORT(): "numpy.char", "numpy.emath", "numpy.rec", - "numpy.strings", ] @@ -535,7 +535,7 @@ def test_core_shims_coherence(): # no need to add it to np.core if ( member_name.startswith("_") - or member_name == "tests" + or member_name in ["tests", "strings"] or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES ): continue diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py new file mode 100644 index 000000000000..6b3b138119bb --- /dev/null +++ b/numpy/typing/tests/data/pass/ma.py @@ -0,0 +1,8 @@ +from typing import Any + +import numpy as np +import numpy.ma + + +m : np.ma.MaskedArray[Any, np.dtype[np.float64]] = np.ma.masked_array([1.5, 2, 3], mask=[True, False, True]) + diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index 40b88ce4dfe4..69afb28c48ec 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -911,9 +911,7 @@ def_gen.__str__() def_gen.__repr__() -def_gen_state: dict[str, Any] -def_gen_state = def_gen.__getstate__() -def_gen.__setstate__(def_gen_state) +def_gen.__setstate__(dict(def_gen.bit_generator.state)) # RandomState random_st: np.random.RandomState = np.random.RandomState() diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 7b8931f607eb..53caf7ff817d 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -5,6 +5,7 @@ import numpy as np b = np.bool() +b_ = np.bool_() u8 = np.uint64() i8 = np.int64() f8 = np.float64() @@ -121,7 +122,7 @@ def __float__(self) -> float: u8 = np.uint64() f8 = np.float64() c16 = np.complex128() -b_ = np.bool() +b = np.bool() td = np.timedelta64() U = np.str_("1") S = np.bytes_("1") @@ -130,7 +131,7 @@ def __float__(self) -> float: int(i8) int(u8) int(f8) -int(b_) +int(b) int(td) int(U) int(S) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 091aa7e5ab06..9f094ba72e3c 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -48,6 +48,7 @@ assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) assert_type(np.ScalarType[8], type[np.csingle]) assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.bool_, type[np.bool]) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 42a24936b903..b31b4b56f870 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -530,12 +530,10 @@ assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -# TODO: Commented out tests are currently incorrectly typed as arrays rather -# than scalars. -#assert_type(def_gen.integers(2, dtype=np.bool), np.bool) -#assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) -#assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) -#assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) @@ -549,10 +547,10 @@ I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -# assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -# assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) @@ -561,10 +559,10 @@ assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.N assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -# assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -# assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) @@ -573,10 +571,10 @@ assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), np assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -# assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) -# assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) @@ -590,10 +588,10 @@ I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -# assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -# assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) @@ -602,10 +600,10 @@ assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.N assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -# assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -# assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) @@ -614,10 +612,10 @@ assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), n assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -# assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) -# assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) @@ -631,10 +629,10 @@ I_u4_low_like: list[int] = [0] I_u4_high_open: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) I_u4_high_closed: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) -# assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) -# assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) @@ -644,10 +642,10 @@ assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -# assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) @@ -656,10 +654,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.N assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) @@ -668,10 +666,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), n assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) @@ -680,10 +678,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) -# assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) @@ -697,10 +695,10 @@ I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -# assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) @@ -709,10 +707,10 @@ assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.N assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -# assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) @@ -721,10 +719,10 @@ assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), n assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -# assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) @@ -738,10 +736,10 @@ I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -# assert_type(def_gen.integers(128, dtype="i1"), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -# assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype="i1"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) +assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) @@ -750,10 +748,10 @@ assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.N assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -# assert_type(def_gen.integers(128, dtype="int8"), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -# assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype="int8"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) +assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) @@ -762,10 +760,10 @@ assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -# assert_type(def_gen.integers(128, dtype=np.int8), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) -# assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) @@ -779,10 +777,10 @@ I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -# assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -# assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) @@ -791,10 +789,10 @@ assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.N assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -# assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -# assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) @@ -803,10 +801,10 @@ assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), np assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -# assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) -# assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) @@ -820,10 +818,10 @@ I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -# assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -# assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) @@ -832,10 +830,10 @@ assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.N assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -# assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -# assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) @@ -844,10 +842,10 @@ assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), np assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -# assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) -# assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) @@ -861,10 +859,10 @@ I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -# assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) @@ -873,10 +871,10 @@ assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.N assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -# assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) @@ -885,10 +883,10 @@ assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), np assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -# assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) @@ -955,9 +953,7 @@ assert_type(def_gen.shuffle(D_2D, axis=1), None) assert_type(np.random.Generator(pcg64), np.random.Generator) assert_type(def_gen.__str__(), str) assert_type(def_gen.__repr__(), str) -def_gen_state = def_gen.__getstate__() -assert_type(def_gen_state, dict[str, Any]) -assert_type(def_gen.__setstate__(def_gen_state), None) +assert_type(def_gen.__setstate__(dict(def_gen.bit_generator.state)), None) # RandomState random_st: np.random.RandomState = np.random.RandomState() @@ -1324,163 +1320,164 @@ assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -# TODO: Commented out type incorrectly indicates an array return: -# assert_type(random_st.randint(2, dtype=np.bool), np.bool) -# assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) +assert_type(random_st.randint(2, dtype=np.bool), np.bool) +assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -# assert_type(random_st.randint(256, dtype="u1"), np.uint16) -# assert_type(random_st.randint(0, 256, dtype="u1"), np.uint16) +assert_type(random_st.randint(256, dtype="u1"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(256, dtype="uint8"), np.uint16) -# assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint16) +assert_type(random_st.randint(256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(256, dtype=np.uint8), np.uint16) -# assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint16) +assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(65536, dtype="u2"), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) -# assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -# assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(128, dtype="i1"), np.int8) -# assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) +assert_type(random_st.randint(128, dtype="i1"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -# assert_type(random_st.randint(128, dtype="int8"), np.int8) -# assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) +assert_type(random_st.randint(128, dtype="int8"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -# assert_type(random_st.randint(128, dtype=np.int8), np.int8) -# assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) +assert_type(random_st.randint(128, dtype=np.int8), np.int8) +assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -# assert_type(random_st.randint(32768, dtype="i2"), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) +assert_type(random_st.randint(32768, dtype="i2"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -# assert_type(random_st.randint(32768, dtype="int16"), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) + +assert_type(random_st.randint(32768, dtype="int16"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -# assert_type(random_st.randint(32768, dtype=np.int16), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) + +assert_type(random_st.randint(32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -# assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -# assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -# assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -# assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 47c08997a0e3..95775e9a8dbe 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -50,6 +50,7 @@ assert_type(V[["field1", "field2"]], np.void) V[0] = 5 # Aliases +assert_type(np.bool_(), np.bool) assert_type(np.byte(), np.byte) assert_type(np.short(), np.short) assert_type(np.intc(), np.intc) diff --git a/pavement.py b/pavement.py index 3a52db2e6555..d385f31eb138 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.0.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.0.3-notes.rst' #------------------------------------------------------- @@ -50,70 +50,6 @@ installersdir=os.path.join("release", "installers")),) -#------------------------ -# Get the release version -#------------------------ - -sys.path.insert(0, os.path.dirname(__file__)) -try: - from setup import FULLVERSION -finally: - sys.path.pop(0) - - -#-------------------------- -# Source distribution stuff -#-------------------------- -def tarball_name(ftype='gztar'): - """Generate source distribution name - - Parameters - ---------- - ftype : {'zip', 'gztar'} - Type of archive, default is 'gztar'. - - """ - root = f'numpy-{FULLVERSION}' - if ftype == 'gztar': - return root + '.tar.gz' - elif ftype == 'zip': - return root + '.zip' - raise ValueError(f"Unknown type {type}") - - -@task -def sdist(options): - """Make source distributions. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - # First clean the repo and update submodules (for up-to-date doc html theme - # and Sphinx extensions) - sh('git clean -xdf') - sh('git submodule init') - sh('git submodule update') - - # To be sure to bypass paver when building sdist... paver + numpy.distutils - # do not play well together. - # Cython is run over all Cython files in setup.py, so generated C files - # will be included. - sh('python3 setup.py sdist --formats=gztar,zip') - - # Copy the superpack into installers dir - idirs = options.installers.installersdir - if not os.path.exists(idirs): - os.makedirs(idirs) - - for ftype in ['gztar', 'zip']: - source = os.path.join('dist', tarball_name(ftype)) - target = os.path.join(idirs, tarball_name(ftype)) - shutil.copy(source, target) - - #------------- # README stuff #------------- diff --git a/pyproject.toml b/pyproject.toml index 036137c36da7..fd3549db27c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.0.0.dev0" +version = "2.0.3" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} @@ -199,8 +199,10 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:lint", ] "Environments" = [ - "spin.cmds.meson.run", ".spin/cmds.py:ipython", - ".spin/cmds.py:python", "spin.cmds.meson.gdb", + "spin.cmds.meson.run", + ".spin/cmds.py:ipython", + ".spin/cmds.py:python", + "spin.cmds.meson.gdb", "spin.cmds.meson.lldb" ] "Documentation" = [ diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 0484e5084474..1e2d5e804df3 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,4 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.26.0.4 - +scipy-openblas32==0.3.27.44.4 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 9ac795a626a6..ebf1a7dbd4dc 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.26.0.4 -scipy-openblas64==0.3.26.0.4 +scipy-openblas32==0.3.27.44.4 +scipy-openblas64==0.3.27.44.4 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index a642de83b4e3..7dfb228c83f1 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,7 +1,7 @@ # doxygen required, use apt-get or dnf -sphinx>=4.5.0 +sphinx==7.2.6 numpydoc==1.4 -pydata-sphinx-theme==0.13.3 +pydata-sphinx-theme>=0.15.2 sphinx-design scipy matplotlib diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 7352f230bb3a..4e53f86d355c 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -15,7 +15,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.7.1; platform_python_implementation != "PyPy" +mypy==1.10.0; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt index fec7750098c5..44b9ec3b0a90 100644 --- a/tools/ci/array-api-skips.txt +++ b/tools/ci/array-api-skips.txt @@ -1,50 +1,22 @@ -# 'unique_inverse' output array is 1-D for 0-D input -array_api_tests/test_set_functions.py::test_unique_all -array_api_tests/test_set_functions.py::test_unique_inverse - -# https://github.com/numpy/numpy/issues/21213 -array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] -# noted diversions from spec -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] - -# fft test suite is buggy as of 83f0bcdc -array_api_tests/test_fft.py - # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] -# a few misalignments -array_api_tests/test_operators_and_elementwise_functions.py -array_api_tests/test_signatures.py::test_func_signature[std] -array_api_tests/test_signatures.py::test_func_signature[var] -array_api_tests/test_signatures.py::test_func_signature[asarray] -array_api_tests/test_signatures.py::test_func_signature[reshape] -array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] +# out.dtype=float32, but should be int16 +# dtype('float16') not found +array_api_tests/test_operators_and_elementwise_functions.py::test_ceil +array_api_tests/test_operators_and_elementwise_functions.py::test_floor +array_api_tests/test_operators_and_elementwise_functions.py::test_trunc -# missing 'copy' keyword argument, 'newshape' should be named 'shape' +# 'newshape' should be named 'shape' array_api_tests/test_signatures.py::test_func_signature[reshape] # missing 'descending' keyword arguments array_api_tests/test_signatures.py::test_func_signature[argsort] array_api_tests/test_signatures.py::test_func_signature[sort] -# assertionError: out.dtype=float32, but should be float64 [sum(float32)] -array_api_tests/test_statistical_functions.py::test_sum +# nonzero for 0D should error +array_api_tests/test_searching_functions.py::test_nonzero_zerodim_error + +# TODO: check why in CI `inspect.signature(np.vecdot)` returns (*arg, **kwarg) +# instead of raising ValueError. mtsokol: couldn't reproduce locally +array_api_tests/test_signatures.py::test_func_signature[vecdot] diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index cbf99c9dace6..652d307bbecc 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -67,7 +67,7 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-13-2 + image: family/freebsd-14-0 platform: freebsd cpu: 1 memory: 4G diff --git a/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch b/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch deleted file mode 100644 index f06ea4eead19..000000000000 --- a/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch +++ /dev/null @@ -1,55 +0,0 @@ -From e08ebf0e90f632547c8ff5b396ec0c4ddd65aad4 Mon Sep 17 00:00:00 2001 -From: Gyeongjae Choi -Date: Sat, 10 Feb 2024 03:28:01 +0900 -Subject: [PATCH] Update numpy to 1.26.4 and don't set MESON env variable - (#4502) - -From meson-python 0.15, $MESON env variable is used to overwrite the meson binary -path. We don't want that behavior. ---- - pypabuild.py | 22 +++++++++++++++------- - 1 file changed, 15 insertions(+), 7 deletions(-) - -diff --git a/pypabuild.py b/pypabuild.py -index 9d0107a8..6961b14e 100644 ---- a/pypabuild.py -+++ b/pypabuild.py -@@ -40,6 +40,19 @@ AVOIDED_REQUIREMENTS = [ - "patchelf", - ] - -+# corresponding env variables for symlinks -+SYMLINK_ENV_VARS = { -+ "cc": "CC", -+ "c++": "CXX", -+ "ld": "LD", -+ "lld": "LLD", -+ "ar": "AR", -+ "gcc": "GCC", -+ "ranlib": "RANLIB", -+ "strip": "STRIP", -+ "gfortran": "FC", # https://mesonbuild.com/Reference-tables.html#compiler-and-linker-selection-variables -+} -+ - - def _gen_runner( - cross_build_env: Mapping[str, str], -@@ -207,13 +220,8 @@ def make_command_wrapper_symlinks(symlink_dir: Path) -> dict[str, str]: - symlink_path.unlink() - - symlink_path.symlink_to(pywasmcross_exe) -- if symlink == "c++": -- var = "CXX" -- elif symlink == "gfortran": -- var = "FC" # https://mesonbuild.com/Reference-tables.html#compiler-and-linker-selection-variables -- else: -- var = symlink.upper() -- env[var] = str(symlink_path) -+ if symlink in SYMLINK_ENV_VARS: -+ env[SYMLINK_ENV_VARS[symlink]] = str(symlink_path) - - return env - --- -2.39.3 (Apple Git-145) - diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index a5b5ae5c22e6..021b4b0289e7 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 1ebd5663d02c..81889131cfa7 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -4,7 +4,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -40,7 +40,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index f8eaaf1cae25..a2ccce66fbe5 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution @@ -96,7 +96,7 @@ License: BSD-3-Clause-Attribution Name: GCC runtime library -Files: numpy.libs\libgfortran*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran License: GPL-3.0-with-GCC-exception @@ -879,24 +879,3 @@ the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . -Name: libquadmath -Files: numpy.libs\libopenb*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/vendored-meson/meson b/vendored-meson/meson index 4e370ca8ab73..6f88e485f27b 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 4e370ca8ab73c07f7b84abe8a4b937caace050a4 +Subproject commit 6f88e485f27bb0a41d31638f0c55055362e0b1ac