diff --git a/.circleci/config.yml b/.circleci/config.yml index eb267dffd7fb..8c2b443f1e84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.8 + - image: cimg/python:3.11.10 working_directory: ~/repo @@ -54,29 +54,22 @@ jobs: command: | python3.11 -m venv venv . venv/bin/activate - pip install --progress-bar=off -r requirements/test_requirements.txt + pip install --progress-bar=off -r requirements/test_requirements.txt \ + -r requirements/build_requirements.txt \ + -r requirements/ci_requirements.txt # get newer, pre-release versions of critical packages - pip install --progress-bar=off --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple -r requirements/doc_requirements.txt + pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above - pip install . --config-settings=setup-args="-Dallow-noblas=true" - - - run: - name: create release notes - command: | - . venv/bin/activate - VERSION=$(pip show numpy | grep Version: | cut -d ' ' -f 2 | cut -c 1-5) - towncrier build --version $VERSION --yes - ./tools/ci/test_all_newsfragments_used.py + spin build --with-scipy-openblas=64 -j 2 - run: name: build devdocs w/ref warnings command: | . venv/bin/activate - cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-W -n" make -e html - if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then - echo "doc build failed: build/html is empty" + SPHINXOPTS="-W -n" spin docs + if [[ $(find doc/build/html -type f | wc -l) -lt 1000 ]]; then + echo "doc build failed: doc/build/html is empty" exit -1 fi @@ -95,10 +88,17 @@ jobs: # destination: neps - run: - name: run refguide-check + name: check doctests command: | . venv/bin/activate - python tools/refguide_check.py -v + spin check-docs -v + spin check-tutorials -v + # Currently, this does two checks not done by check-docs: + # - validates ReST blocks (via validate_rst_syntax) + # - checks that all of a module's `__all__` is reflected in the + # module-level docstring autosummary + echo calling python3 tools/refguide_check.py -v + python3 tools/refguide_check.py -v - persist_to_workspace: root: ~/repo diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index d4d6fe4a4989..4ef74bcfa7f8 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -44,22 +44,25 @@ jobs: # test_unary_spurious_fpexception is currently skipped # FIXME(@seiko2plus): Requires confirmation for the following issue: # The presence of an FP invalid exception caused by sqrt. Unsure if this is a qemu bug or not. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception", + "arm" + ] - [ "ppc64le", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "ppc64le - baseline(Power9)", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vsx3", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "s390x", "s390x-linux-gnu", @@ -68,27 +71,31 @@ jobs: # Skipping TestRationalFunctions.test_gcd_overflow test # because of a possible qemu bug that appears to be related to int64 overflow in absolute operation. # TODO(@seiko2plus): Confirm the bug and provide a minimal reproducer, then report it to upstream. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "s390x - baseline(Z13)", "s390x-linux-gnu", "s390x/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vx", - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "riscv64", "riscv64-linux-gnu", "riscv64/ubuntu:22.04", "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc" - ] + "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", + "riscv64" + ] env: TOOLCHAIN_NAME: ${{ matrix.BUILD_PROP[1] }} DOCKER_CONTAINER: ${{ matrix.BUILD_PROP[2] }} MESON_OPTIONS: ${{ matrix.BUILD_PROP[3] }} RUNTIME_TEST_FILTER: ${{ matrix.BUILD_PROP[4] }} + ARCH: ${{ matrix.BUILD_PROP[5] }} TERM: xterm-256color name: "${{ matrix.BUILD_PROP[0] }}" @@ -108,7 +115,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.0.2 + uses: actions/cache@v4.1.2 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -117,7 +124,8 @@ jobs: - name: Creates new container if: steps.container-cache.outputs.cache-hit != 'true' run: | - docker run --name the_container --interactive -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " + docker run --platform=linux/${ARCH} --name the_container --interactive \ + -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && @@ -133,7 +141,9 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install -r /numpy/requirements/build_requirements.txt && + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install -r /tmp/build_requirements.txt && python -m pip install pytest pytest-xdist hypothesis typing_extensions && rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " @@ -147,10 +157,11 @@ jobs: - name: Meson Build run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin build --clean -- ${MESON_OPTIONS} + '" - name: Meson Log if: always() @@ -158,9 +169,11 @@ jobs: - name: Run Tests run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - export F90=/usr/bin/gfortran - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + export F90=/usr/bin/gfortran + cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + '" + diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 164a4c6710c2..62fd24a4e337 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,4 +1,4 @@ -name: macOS tests (meson) +name: macOS tests on: pull_request: @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.11"] + python-version: ["3.12"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -52,15 +52,15 @@ jobs: restore-keys: | ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - - name: Setup Mambaforge - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 + - name: Setup Miniforge + uses: conda-incubator/setup-miniconda@d2e6a045a86077fb6cad6f5adf368e9076ddaa8d # v3.1.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge channel-priority: true activate-environment: numpy-dev use-only-tar-bz2: false - miniforge-variant: Mambaforge + miniforge-variant: Miniforge3 miniforge-version: latest use-mamba: true @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -103,7 +103,8 @@ jobs: accelerate: - name: Accelerate (LP64, ILP64) - ${{ matrix.build_runner[1] }} + name: Accelerate - ${{ matrix.build_runner[1] }} - ${{ matrix.version }} + # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.build_runner[0] }} strategy: @@ -112,6 +113,7 @@ jobs: build_runner: - [ macos-13, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] + version: ["3.10", "3.13t"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -119,15 +121,21 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: quansight-labs/setup-python@b9ab292c751a42bcd2bb465b7fa202ea2c3f5796 # v5.3.1 with: - python-version: '3.10' + python-version: ${{ matrix.version }} - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 if: ${{ matrix.build_runner[0] == 'macos-13' }} with: xcode-version: '14.3' + # TODO: remove cython nightly install when cython does a release + - name: Install nightly Cython + if: matrix.version == '3.13t' + run: | + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + - name: Install dependencies run: | pip install -r requirements/build_requirements.txt diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 726e6b839051..f93587076493 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -47,14 +47,14 @@ jobs: matrix: os_python: - [ubuntu-latest, '3.12'] - - [windows-2019, '3.11'] - - [macos-12, '3.10'] + - [windows-latest, '3.11'] + - [macos-latest, '3.10'] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ce034d24d2ea..e763b8d86dd4 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -76,8 +76,8 @@ jobs: # Github Actions doesn't support pairing matrix values together, let's improvise # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - - [ubuntu-20.04, manylinux_x86_64, ""] - - [ubuntu-20.04, musllinux_x86_64, ""] + - [ubuntu-22.04, manylinux_x86_64, ""] + - [ubuntu-22.04, musllinux_x86_64, ""] - [macos-13, macosx_x86_64, openblas] # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile @@ -90,14 +90,10 @@ jobs: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] python: "pp310" - - buildplat: [ ubuntu-20.04, musllinux_x86_64, "" ] + - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] python: "pp310" - buildplat: [ macos-14, macosx_arm64, accelerate ] python: "pp310" - - buildplat: [ windows-2019, win_amd64, "" ] - python: "cp313t" - - buildplat: [ windows-2019, win32, "" ] - python: "cp313t" - buildplat: [ macos13, macosx_x86_64, openblas ] python: "cp313t" @@ -130,7 +126,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: "3.x" @@ -162,22 +158,23 @@ jobs: - name: Set up free-threaded build if: matrix.python == 'cp313t' + shell: bash -el {0} run: | echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@bd033a44476646b606efccdd5eed92d5ea1d77ad # v2.20.0 + uses: pypa/cibuildwheel@7940a4c0e76eb2030e473a5f864f291f63ee879b # v2.21.3 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 + - uses: mamba-org/setup-micromamba@617811f69075e3fd3ae68ca64220ad065877f246 with: # for installation of anaconda-client, required for upload to # anaconda.org @@ -231,7 +228,7 @@ jobs: with: submodules: true # Used to push the built wheels - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: # Build sdist on lowest supported Python python-version: "3.10" @@ -253,7 +250,7 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: sdist path: ./dist/* diff --git a/.mailmap b/.mailmap index 143ad1c4a9b2..23a556dd9fc4 100644 --- a/.mailmap +++ b/.mailmap @@ -7,53 +7,55 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. -@8bitmp3 <19637339+8bitmp3@users.noreply.github.com> -@Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> -@DWesl <22566757+DWesl@users.noreply.github.com> -@Endolith -@GalaxySnail -@Illviljan <14371165+Illviljan@users.noreply.github.com> -@LSchroefl <65246829+LSchroefl@users.noreply.github.com> -@Lbogula -@Lisa <34400837+lyzlisa@users.noreply.github.com> -@Patrick <39380924+xamm@users.noreply.github.com> -@Scian <65375075+hoony6134@users.noreply.github.com> -@Searchingdays -@amagicmuffin <2014wcheng@gmail.com> -@code-review-doctor -@cook-1229 <70235336+cook-1229@users.noreply.github.com> -@dg3192 <113710955+dg3192@users.noreply.github.com> -@ellaella12 -@ellaella12 <120079323+ellaella12@users.noreply.github.com> -@h-vetinari -@h6197627 <44726212+h6197627@users.noreply.github.com> -@jbCodeHub -@juztamau5 -@legoffant <58195095+legoffant@users.noreply.github.com> -@liang3zy22 <35164941+liang3zy22@users.noreply.github.com> -@luzpaz -@luzpaz -@matoro -@mcp292 -@mgunyho <20118130+mgunyho@users.noreply.github.com> -@msavinash <73682349+msavinash@users.noreply.github.com> -@mykykh <49101849+mykykh@users.noreply.github.com> -@partev -@pkubaj -@pmvz -@pojaghi <36278217+pojaghi@users.noreply.github.com> -@pratiklp00 -@sfolje0 -@spacescientist -@stefan6419846 -@stefan6419846 <96178532+stefan6419846@users.noreply.github.com> -@tajbinjohn -@tautaus -@undermyumbrella1 -@xoviat <49173759+xoviat@users.noreply.github.com> -@xoviat <49173759+xoviat@users.noreply.github.com> -@yan-wyb -@yetanothercheer +!8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +!Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> +!DWesl <22566757+DWesl@users.noreply.github.com> +!Endolith +!GalaxySnail +!Illviljan <14371165+Illviljan@users.noreply.github.com> +!LSchroefl <65246829+LSchroefl@users.noreply.github.com> +!Lbogula +!Lisa <34400837+lyzlisa@users.noreply.github.com> +!Patrick <39380924+xamm@users.noreply.github.com> +!Scian <65375075+hoony6134@users.noreply.github.com> +!Searchingdays +!amagicmuffin <2014wcheng@gmail.com> +!code-review-doctor +!cook-1229 <70235336+cook-1229@users.noreply.github.com> +!dg3192 <113710955+dg3192@users.noreply.github.com> +!ellaella12 +!ellaella12 <120079323+ellaella12@users.noreply.github.com> +!h-vetinari +!h6197627 <44726212+h6197627@users.noreply.github.com> +!jbCodeHub +!juztamau5 +!legoffant <58195095+legoffant@users.noreply.github.com> +!liang3zy22 <35164941+liang3zy22@users.noreply.github.com> +!luzpaz +!luzpaz +!matoro +!mcp292 +!mgunyho <20118130+mgunyho@users.noreply.github.com> +!msavinash <73682349+msavinash@users.noreply.github.com> +!mykykh <49101849+mykykh@users.noreply.github.com> +!ogidig5 <82846833+ogidig5@users.noreply.github.com> +!partev +!pkubaj +!pmvz +!pojaghi <36278217+pojaghi@users.noreply.github.com> +!pratiklp00 +!sfolje0 +!spacescientist +!stefan6419846 +!stefan6419846 <96178532+stefan6419846@users.noreply.github.com> +!tajbinjohn +!tautaus +!undermyumbrella1 +!vahidmech +!xoviat <49173759+xoviat@users.noreply.github.com> +!xoviat <49173759+xoviat@users.noreply.github.com> +!yan-wyb +!yetanothercheer Aaron Baecker Adrin Jalali Arun Kota @@ -64,6 +66,7 @@ Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS +Ajay Kumar Janapareddi Alan Fontenot Alan Fontenot <36168460+logeaux@users.noreply.github.com> Abdul Muneer @@ -117,6 +120,7 @@ Andrea Sangalli <53617841+and-sang@users.noreply.github.c Andreas Klöckner Andreas Schwab Andrei Kucharavy +Andrej Zhilenkov Andrew Lawson Anirudh Subramanian Anne Archibald @@ -127,9 +131,11 @@ Antoine Pitrou Anton Prosekin Anže Starič Arfy Slowy +Arnaud Ma Aron Ahmadia Arun Kota Arun Kota +Arun Pa Arun Palaniappen Arun Persaud Ashutosh Singh @@ -171,6 +177,8 @@ Bui Duc Minh <41239569+Mibu287@users.noreply.github.co Caio Agiani Carl Kleffner Carl Leake +Carlos Henrique Hermanny Moreira da Silva +Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -296,6 +304,7 @@ Imen Rajhi Inessa Pawson Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com> Irvin Probst +Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> Ivan Meleshko Isabela Presedo-Floyd Ganesh Kathiresan @@ -304,6 +313,7 @@ Giannis Zapantis Guillaume Peillex Jack J. Woehr Jacob M. Casey +Jakob Stevens Haas <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Jaime Fernandez Jaime Fernandez Jaime Fernandez @@ -314,6 +324,8 @@ Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Joseph Thomas +James Joseph Thomas quotuva James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey @@ -356,8 +368,11 @@ Joseph Fox-Rabinovitz Joshua Himmens Joyce Brum +Joren Hammudoglu Jory Klaverstijn Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +Julia Poo +Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor Julian Taylor Julian Taylor @@ -371,6 +386,8 @@ Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> +Karthik Kaiplody Keller Meier Kenny Huynh Kevin Granados @@ -395,6 +412,7 @@ Lars Grüter Leona Taric Leona Taric <92495067+LeonaTaric@users.noreply.github.com> Leonardus Chen +Liangyu Zhang Licht Takeuchi Lorenzo Mammana Lillian Zha @@ -472,6 +490,8 @@ Michel Fruchart Miki Watanabe (渡邉 美希) Miles Cranmer +Milica Dančuk +Milica Dančuk love-bees <33499899+love-bees@users.noreply.github.com> Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> @@ -483,6 +503,8 @@ Mukulika Pahari <60316606+Mukulikaa@users.noreply.git Munira Alduraibi Namami Shanker Namami Shanker NamamiShanker +Nathan Goldbaum +Nathan Goldbaum Nathaniel J. Smith Naveen Arunachalam naveenarun Neil Girdhar @@ -514,6 +536,8 @@ Pat Miller patmiller Paul Ivanov Paul Ivanov Paul Jacobson +Paul Juma Otieno +Paul Juma Otieno <103896399+otieno-juma@users.noreply.github.com> Paul Reece Paul YS Lee Paul Pey Lian Lim @@ -597,6 +621,7 @@ Simon Gasse Simon Gasse Sista Seetaram Sista Seetaram <65669128+sistaseetaram@users.noreply.github.com> +Slava Gorloff <31761951+gorloffslava@users.noreply.github.com> Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> Spencer Hill Srimukh Sripada @@ -644,6 +669,8 @@ Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> Wansoo Kim +Warrick Ball +Warrick Ball Warren Weckesser Warren Weckesser Weitang Li diff --git a/.spin/cmds.py b/.spin/cmds.py index 0773578de913..ee9fa38346a7 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,13 +1,11 @@ import os import shutil import pathlib -import shutil -import pathlib import importlib import subprocess import click -from spin import util +import spin from spin.cmds import meson @@ -38,8 +36,7 @@ def _get_numpy_tools(filename): "revision-range", required=True ) -@click.pass_context -def changelog(ctx, token, revision_range): +def changelog(token, revision_range): """👩 Get change log for provided revision range \b @@ -74,71 +71,20 @@ def changelog(ctx, token, revision_range): ) -@click.command() -@click.option( - "-j", "--jobs", - help="Number of parallel tasks to launch", - type=int -) -@click.option( - "--clean", is_flag=True, - help="Clean build directory before build" -) -@click.option( - "-v", "--verbose", is_flag=True, - help="Print all build output, even installation" -) @click.option( "--with-scipy-openblas", type=click.Choice(["32", "64"]), default=None, help="Build with pre-installed scipy-openblas32 or scipy-openblas64 wheel" ) -@click.argument("meson_args", nargs=-1) -@click.pass_context -def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False, *args, **kwargs): - """🔧 Build package with Meson/ninja and install - - MESON_ARGS are passed through e.g.: - - spin build -- -Dpkg_config_path=/lib64/pkgconfig - - The package is installed to build-install - - By default builds for release, to be able to use a debugger set CFLAGS - appropriately. For example, for linux use - - CFLAGS="-O0 -g" spin build - """ - # XXX keep in sync with upstream build +@spin.util.extend_command(spin.cmds.meson.build) +def build(*, parent_callback, with_scipy_openblas, **kwargs): if with_scipy_openblas: _config_openblas(with_scipy_openblas) - ctx.params.pop("with_scipy_openblas", None) - ctx.forward(meson.build) + parent_callback(**kwargs) -@click.command() -@click.argument("sphinx_target", default="html") -@click.option( - "--clean", is_flag=True, - default=False, - help="Clean previously built docs before building" -) -@click.option( - "--build/--no-build", - "first_build", - default=True, - help="Build numpy before generating docs", -) -@click.option( - '--jobs', '-j', - metavar='N_JOBS', - # Avoids pydata_sphinx_theme extension warning from default="auto". - default="1", - help=("Number of parallel build jobs." - "Can be set to `auto` to use all cores.") -) -@click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): +@spin.util.extend_command(spin.cmds.meson.docs) +def docs(*, parent_callback, **kwargs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -159,22 +105,12 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): spin docs dist """ - meson.docs.ignore_unknown_options = True - - # See https://github.com/scientific-python/spin/pull/199 - # Can be changed when spin updates to 0.11, and moved to pyproject.toml - if clean: - clean_dirs = [ - './doc/build/', - './doc/source/reference/generated', - './doc/source/reference/random/bit_generators/generated', - './doc/source/reference/random/generated', - ] - - for target_dir in clean_dirs: - if os.path.isdir(target_dir): - print(f"Removing {target_dir!r}") - shutil.rmtree(target_dir) + kwargs['clean_dirs'] = [ + './doc/build/', + './doc/source/reference/generated', + './doc/source/reference/random/bit_generators/generated', + './doc/source/reference/random/generated', + ] # Run towncrier without staging anything for commit. This is the way to get # release notes snippets included in a local doc build. @@ -184,11 +120,14 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): with open(outfile, 'w') as f: f.write(p.stdout) - ctx.forward(meson.docs) + parent_callback(**kwargs) + + +# Override default jobs to 1 +jobs_param = next(p for p in docs.params if p.name == 'jobs') +jobs_param.default = 1 -@click.command() -@click.argument("pytest_args", nargs=-1) @click.option( "-m", "markexpr", @@ -196,101 +135,25 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): default="not slow", help="Run tests with the given markers" ) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - "--tests", "-t", - metavar='TESTS', - help=(""" -Which tests to run. Can be a module, function, class, or method: - - \b - numpy.random - numpy.random.tests.test_generator_mt19937 - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases - \b -""") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): - """🔧 Run tests - - PYTEST_ARGS are passed through directly to pytest, e.g.: - - spin test -- --pdb - - To run tests on a directory or file: - - \b - spin test numpy/linalg - spin test numpy/linalg/tests/test_linalg.py - - To report the durations of the N slowest tests: - - spin test -- --durations=N - - To run tests that match a given pattern: - - \b - spin test -- -k "geometric" - spin test -- -k "geometric and not rgeometric" - +@spin.util.extend_command(spin.cmds.meson.test) +def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): + """ By default, spin will run `-m 'not slow'`. To run the full test suite, use `spin test -m full` - - For more, see `pytest --help`. """ # noqa: E501 if (not pytest_args) and (not tests): - pytest_args = ('numpy',) + pytest_args = ('--pyargs', 'numpy') if '-m' not in pytest_args: - if len(pytest_args) == 1 and not tests: - tests = pytest_args[0] - pytest_args = () if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - - if tests and not ('--pyargs' in pytest_args): - pytest_args = ('--pyargs', tests) + pytest_args - - if verbose: - pytest_args = ('-v',) + pytest_args - - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): - del ctx.params[extra_param] - ctx.forward(meson.test) + kwargs['pytest_args'] = pytest_args + parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs}) -@click.command() -@click.argument("pytest_args", nargs=-1) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): +@spin.util.extend_command(test, doc='') +def check_docs(*, parent_callback, pytest_args, **kwargs): """🔧 Run doctests of objects in the public API. PYTEST_ARGS are passed through directly to pytest, e.g.: @@ -327,14 +190,9 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): import scipy_doctest except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e - if (not pytest_args): - pytest_args = ('numpy',) - - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - if verbose: - pytest_args = ('-v',) + pytest_args + if (not pytest_args): + pytest_args = ('--pyargs', 'numpy') # turn doctesting on: doctest_args = ( @@ -344,39 +202,21 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): pytest_args = pytest_args + doctest_args - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('n_jobs', 'verbose'): - del ctx.params[extra_param] + parent_callback(**{'pytest_args': pytest_args, **kwargs}) - ctx.forward(meson.test) - -@click.command() -@click.argument("pytest_args", nargs=-1) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): +@spin.util.extend_command(test, doc='') +def check_tutorials(*, parent_callback, pytest_args, **kwargs): """🔧 Run doctests of user-facing rst tutorials. - To test all tutorials in the numpy/doc/source/user/ directory, use + To test all tutorials in the numpy doc/source/user/ directory, use spin check-tutorials To run tests on a specific RST file: \b - spin check-tutorials numpy/doc/source/user/absolute-beginners.rst + spin check-tutorials doc/source/user/absolute-beginners.rst \b Note: @@ -393,20 +233,14 @@ def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): # - `spin check-tutorials path/to/rst`, and # - `spin check-tutorials path/to/rst -- --durations=3` if (not pytest_args) or all(arg.startswith('-') for arg in pytest_args): - pytest_args = ('numpy/doc/source/user',) + pytest_args + pytest_args = ('doc/source/user',) + pytest_args # make all paths relative to the numpy source folder pytest_args = tuple( - str(curdir / '..' / '..' / arg) if not arg.startswith('-') else arg + str(curdir / '..' / arg) if not arg.startswith('-') else arg for arg in pytest_args ) - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - - if verbose: - pytest_args = ('-v',) + pytest_args - # turn doctesting on: doctest_args = ( '--doctest-glob=*rst', @@ -414,12 +248,7 @@ def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): pytest_args = pytest_args + doctest_args - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('n_jobs', 'verbose'): - del ctx.params[extra_param] - - ctx.forward(meson.test) + parent_callback(**{'pytest_args': pytest_args, **kwargs}) # From scipy: benchmarks/benchmarks/common.py @@ -446,7 +275,7 @@ def _set_mem_rlimit(max_mem=None): def _commit_to_sha(commit): - p = util.run(['git', 'rev-parse', commit], output=False, echo=False) + p = spin.util.run(['git', 'rev-parse', commit], output=False, echo=False) if p.returncode != 0: raise( click.ClickException( @@ -459,10 +288,10 @@ def _commit_to_sha(commit): def _dirty_git_working_dir(): # Changes to the working directory - p0 = util.run(['git', 'diff-files', '--quiet']) + p0 = spin.util.run(['git', 'diff-files', '--quiet']) # Staged changes - p1 = util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) + p1 = spin.util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) return (p0.returncode != 0 or p1.returncode != 0) @@ -487,7 +316,7 @@ def _run_asv(cmd): except (ImportError, RuntimeError): pass - util.run(cmd, cwd='benchmarks', env=env) + spin.util.run(cmd, cwd='benchmarks', env=env) @click.command() @click.option( @@ -510,7 +339,7 @@ def lint(ctx, branch, uncommitted): Examples: \b - For lint checks of your development brach with `main` or a custom branch: + For lint checks of your development branch with `main` or a custom branch: \b $ spin lint # defaults to main @@ -558,8 +387,9 @@ def lint(ctx, branch, uncommitted): required=False, nargs=-1 ) +@meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, commits): +def bench(ctx, tests, compare, verbose, quick, commits, build_dir): """🏋 Run benchmarks. \b @@ -611,9 +441,9 @@ def bench(ctx, tests, compare, verbose, quick, commits): ) ctx.invoke(build) - meson._set_pythonpath() + meson._set_pythonpath(build_dir) - p = util.run( + p = spin.util.run( ['python', '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, @@ -647,29 +477,20 @@ def bench(ctx, tests, compare, verbose, quick, commits): _run_asv(cmd_compare) -@click.command(context_settings={ - 'ignore_unknown_options': True -}) -@click.argument("python_args", metavar='', nargs=-1) -@click.pass_context -def python(ctx, python_args, *args, **kwargs): - """🐍 Launch Python shell with PYTHONPATH set - - OPTIONS are passed through directly to Python, e.g.: - - spin python -c 'import sys; print(sys.path)' - """ +@spin.util.extend_command(meson.python) +def python(*, parent_callback, **kwargs): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') - ctx.forward(meson.python) + + parent_callback(**kwargs) @click.command(context_settings={ 'ignore_unknown_options': True }) @click.argument("ipython_args", metavar='', nargs=-1) -@click.pass_context -def ipython(ctx, ipython_args): +@meson.build_dir_option +def ipython(*, ipython_args, build_dir): """💻 Launch IPython shell with PYTHONPATH set OPTIONS are passed through directly to IPython, e.g.: @@ -679,16 +500,19 @@ def ipython(ctx, ipython_args): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') + ctx = click.get_current_context() ctx.invoke(build) - ppath = meson._set_pythonpath() + ppath = meson._set_pythonpath(build_dir) print(f'💻 Launching IPython with PYTHONPATH="{ppath}"') + + # In spin >= 0.13.1, can replace with extended command, setting `pre_import` preimport = (r"import numpy as np; " r"print(f'\nPreimported NumPy {np.__version__} as np')") - util.run(["ipython", "--ignore-cwd", - f"--TerminalIPythonApp.exec_lines={preimport}"] + - list(ipython_args)) + spin.util.run(["ipython", "--ignore-cwd", + f"--TerminalIPythonApp.exec_lines={preimport}"] + + list(ipython_args)) @click.command(context_settings={"ignore_unknown_options": True}) @@ -702,6 +526,7 @@ def mypy(ctx): ctx.params['markexpr'] = 'full' ctx.forward(test) + @click.command(context_settings={ 'ignore_unknown_options': True }) @@ -747,8 +572,7 @@ def _config_openblas(blas_variant): help="NumPy version of release", required=False ) -@click.pass_context -def notes(ctx, version_override): +def notes(version_override): """🎉 Generate release notes and validate \b @@ -763,7 +587,7 @@ def notes(ctx, version_override): \b $ spin notes """ - project_config = util.get_config() + project_config = spin.util.get_config() version = version_override or project_config['project.version'] click.secho( @@ -774,7 +598,7 @@ def notes(ctx, version_override): # Check if `towncrier` is installed if not shutil.which("towncrier"): raise click.ClickException( - f"please install `towncrier` to use this command" + "please install `towncrier` to use this command" ) click.secho( @@ -783,7 +607,7 @@ def notes(ctx, version_override): ) # towncrier build --version 2.1 --yes cmd = ["towncrier", "build", "--version", version, "--yes"] - p = util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") + p = spin.util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") if p.returncode != 0: raise click.ClickException( f"`towncrier` failed returned {p.returncode} with error `{p.stderr}`" diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index 815c9a1dba33..b3d8aa8bed06 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -29,3 +29,8 @@ Name: spin Files: .spin/cmds.py License: BSD-3 For license text, see .spin/LICENSE + +Name: tempita +Files: numpy/_build_utils/tempita/* +License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/doc/changelog/2.0.2-changelog.rst b/doc/changelog/2.0.2-changelog.rst new file mode 100644 index 000000000000..6622407dd8f6 --- /dev/null +++ b/doc/changelog/2.0.2-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst new file mode 100644 index 000000000000..af7f5a3b07c7 --- /dev/null +++ b/doc/changelog/2.1.0-changelog.rst @@ -0,0 +1,592 @@ + +Contributors +============ + +A total of 110 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !ogidig5 + +* !partev +* !vahidmech + +* !h-vetinari +* Aaron Meurer +* Adrin Jalali + +* Agriya Khetarpal +* Ajay Kumar Janapareddi + +* Alex Herbert + +* Andras Deak +* Andrej Zhilenkov + +* Andrew Nelson +* Anne Gunn + +* Antony Lee +* Arnaud Ma + +* Arun Kannawadi + +* Arun Pa + +* Bas van Beek +* Ben Woodruff + +* Bruno Oliveira + +* Carlos Henrique Hermanny Moreira da Silva + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christian Lorentzen +* Christopher Sidebottom +* Christopher Titchen + +* Clément Robert +* Cobalt Yang + +* Devyani Chavan + +* Dimitri Papadopoulos Orfanos +* Ebigide Jude + +* Eric Xie + +* Evgeni Burovski +* Fabian Vogt + +* Francisco Sousa + +* GUAN MING + +* Gabriel Fougeron + +* Gagandeep Singh +* Giovanni Del Monte + +* Gonzalo Tornaría + +* Gonçalo Bárias + +* Hugo van Kemenade +* Jakob Stevens Haas + +* Jakob Unfried + +* James Joseph Thomas + +* Jean Lecordier + +* Joren Hammudoglu + +* Joris Van den Bossche +* Julia Poo + +* Justus Magin +* Jyn Spring 琴春 +* KIU Shueng Chuan +* Karthik Gangula + +* Karthik Kaiplody + +* Kevin Sheppard +* Kristoffer Pedersen + +* Leo Singer +* Liang Yan +* Liangyu Zhang + +* Lucas Colley +* Luiz Eduardo Amaral + +* Lysandros Nikolaou +* Marcel Loose + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Roeschke + +* Matthew Thompson + +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Milica Dančuk + +* Moritz Schreiber + +* Nathan Goldbaum +* Olivier Grisel +* Patrick J. Roddy + +* Paul Juma Otieno + +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Raquel Braunschweig + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ross Barnowski +* Rostan Tabet + +* Sam Morley + +* Sayed Adel +* Sean Cheah +* Sebastian Berg +* Serge Guelton +* Slobodan + +* Stefan van der Walt +* Thomas A Caswell +* Thomas Li +* Timo Röhling + +* Tsvika Shapira + +* Tuhin Sharma + +* Tyler Reddy +* Victor Eijkhout + +* Warren Weckesser +* Warrick Ball +* Xiangyi Wang + +* Yair Chuchem + +* Yang Liu + +* Yannik Wicke + +* Yevhen Amelin + +* Yuki K + +Pull requests merged +==================== + +A total of 469 pull requests were merged for this release. + +* `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... +* `#24448 `__: TST: add some tests of np.log for complex input. +* `#25704 `__: DOC: quantile: correct/simplify documentation +* `#25705 `__: DOC: Add documentation explaining our promotion rules +* `#25781 `__: ENH: Convert fp32 sin/cos from C universal intrinsics to C++... +* `#25908 `__: ENH: Add center/ljust/rjust/zfill ufuncs for unicode and bytes +* `#25913 `__: NEP: NEP 55 updates and add @mhvk as an author +* `#25963 `__: BUG: Fix bug in numpy.pad() +* `#25964 `__: CI: fix last docbuild warnings +* `#25970 `__: MAINT: Prepare main for NumPy 2.1.0 development +* `#25971 `__: DOC: Fix a note section markup in ``dtype.rst`` +* `#25972 `__: DOC: Fix module setting of ``MaskedArray`` +* `#25974 `__: BUG: Raise error for negative-sized fixed-width dtype +* `#25975 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#25981 `__: BLD: push a tag builds a wheel +* `#25985 `__: BLD: omit pp39-macosx_arm64 from matrix +* `#25988 `__: DOC: Remove unused parameter description +* `#25990 `__: CI: clean up some unused `choco install` invocations +* `#25995 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25999 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26000 `__: BUG: Filter out broken Highway platform +* `#26003 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26005 `__: DOC: indicate stringdtype support in docstrings for string operations +* `#26006 `__: TST: remove usage of ProcessPoolExecutor in stringdtype tests +* `#26007 `__: MAINT: Remove sdist task from pavement.py +* `#26011 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26012 `__: ENH: install StringDType promoter for add +* `#26014 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26015 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26016 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26017 `__: MAINT: avoid use of flexible array member in public header +* `#26024 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26025 `__: BUG: fix reference count leak in __array__ internals +* `#26027 `__: BUG: add missing error handling in string to int cast internals +* `#26033 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26045 `__: ENH: Optimize np.power for integer type +* `#26055 `__: ENH: Optimize np.power(x, 2) for double and float type +* `#26063 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26064 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26066 `__: BUG: Allow the new string dtype summation to work +* `#26067 `__: DOC: note stringdtype output support in np.strings docstrings +* `#26070 `__: DOC clarifications on debugging numpy +* `#26071 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26080 `__: BUG: adapt cython files to new complex declarations +* `#26081 `__: TYP: Make array _ShapeType bound and covariant +* `#26082 `__: ENH: Add partition/rpartition ufunc for string dtypes +* `#26083 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26089 `__: TYP: Adjust typing for ``np.random.integers`` and ``np.random.randint`` +* `#26090 `__: API: Require reduce promoters to start with None to match +* `#26095 `__: MAINT: Bump actions/dependency-review-action from 4.1.3 to 4.2.3 +* `#26097 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26099 `__: DOC: fix typo in doc/source/user/absolute_beginners.rst +* `#26103 `__: API: Default to hidden visibility for API tables +* `#26105 `__: MAINT: install all-string promoter for multiply +* `#26108 `__: MAINT: Remove unnecessarily defensive code from dlpack deleter +* `#26112 `__: TST: fix incorrect dtype in test +* `#26113 `__: BLD: Do not use -O3 flag when building in debug mode +* `#26116 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26119 `__: BUG: fix reference counting error in stringdtype setup +* `#26123 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26125 `__: DOC: Bump pydata-sphinx-theme version +* `#26128 `__: DOC: Update absolute_beginners.rst +* `#26129 `__: MAINT: add missing noexcept clauses +* `#26130 `__: ENH: Optimize performance of np.atleast_1d +* `#26133 `__: MAINT: Bump actions/dependency-review-action from 4.2.3 to 4.2.4 +* `#26134 `__: CI, BLD: Push NumPy's Emscripten/Pyodide wheels nightly to Anaconda.org... +* `#26135 `__: BUG: masked array division should ignore all FPEs in mask calculation +* `#26136 `__: BUG: fixed datetime64[ns] conversion issue in numpy.vectorize,... +* `#26138 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26139 `__: MAINT: Bump actions/dependency-review-action from 4.2.4 to 4.2.5 +* `#26142 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26147 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26149 `__: MAINT: Escalate import warning to an import error +* `#26151 `__: BUG: Fix test_impossible_feature_enable failing without BASELINE_FEAT +* `#26155 `__: NEP: add NEP 56 mailing list resolution +* `#26160 `__: ENH: Improve performance of np.broadcast_arrays and np.broadcast_shapes +* `#26162 `__: BUG: Infinite Loop in numpy.base_repr +* `#26168 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26172 `__: DOC, TST: make ``numpy.version`` officially public +* `#26174 `__: MAINT: Fix failure in routines.version.rst +* `#26182 `__: DOC: Update absolute_beginners.rst +* `#26185 `__: MAINT: Update Pyodide to 0.25.1 +* `#26187 `__: TST: Use platform.machine() for improved portability on riscv64 +* `#26189 `__: MNT: use pythoncapi_compat.h in npy_compat.h +* `#26190 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26207 `__: TST: account for immortal objects in test_iter_refcount +* `#26210 `__: API: Readd ``np.bool_`` typing stub +* `#26212 `__: BENCH: Add benchmarks for np.power(x,2) and np.power(x,0.5) +* `#26213 `__: MNT: try updating pythoncapi-compat +* `#26215 `__: API: Enforce one copy for ``__array__`` when ``copy=True`` +* `#26219 `__: ENH: Enable RVV CPU feature detection +* `#26222 `__: MAINT: Drop Python 3.9 +* `#26227 `__: MAINT: utilize ufunc API const correctness internally +* `#26229 `__: TST: skip limited API test on nogil python build +* `#26232 `__: MAINT: fix typo in _add_newdoc_ufunc docstring +* `#26235 `__: Update numpy.any documentation example +* `#26237 `__: MAINT: Update ``array-api-tests`` job +* `#26239 `__: DOC: add versionadded for copy keyword in np.asarray docstring +* `#26241 `__: DOC: Fixup intp/uintp documentation for ssize_t/size_t changes +* `#26245 `__: DOC: Update ``__array__`` ``copy`` keyword docs +* `#26246 `__: MNT: migrate PyList_GetItem usages to PyList_GetItemRef +* `#26248 `__: MAINT,BUG: Robust string meson template substitution +* `#26251 `__: MNT: disable the allocator cache for nogil builds +* `#26258 `__: BLD: update to OpenBLAS 0.3.27 +* `#26260 `__: BUG: Ensure seed sequences are restored through pickling +* `#26261 `__: ENH: introduce a notion of "compatible" stringdtype instances +* `#26263 `__: MAINT: fix typo +* `#26264 `__: MAINT: fix typo in #include example +* `#26267 `__: MAINT: Update URL in nep 0014 - domain change +* `#26268 `__: API: Disallow 0D input arrays in ``nonzero`` +* `#26270 `__: BUG: ensure np.vectorize doesn't truncate fixed-width strings +* `#26273 `__: ENH: Bump Highway to HEAD and remove platform filter +* `#26274 `__: BLD: use install-tags to optionally install tests +* `#26280 `__: ENH: Speedup clip for floating point +* `#26281 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26282 `__: MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef +* `#26284 `__: TST: run the smoke tests on more python versions +* `#26285 `__: ENH: Decrease wall time of ``ma.cov`` and ``ma.corrcoef`` +* `#26286 `__: BLD: ensure libnpymath and highway static libs use hidden visibility +* `#26292 `__: API: Add ``shape`` and ``copy`` arguments to ``numpy.reshape`` +* `#26294 `__: MNT: disable the coercion cache for the nogil build +* `#26295 `__: CI: add llvm/clang sanitizer tests +* `#26299 `__: MAINT: Pin sphinx to version 7.2.6 +* `#26302 `__: BLD: use newer openblas wheels [wheel build] +* `#26303 `__: DOC: add explanation of dtype to parameter values for np.append +* `#26304 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26309 `__: MAINT: Bump actions/upload-artifact from 4.3.1 to 4.3.2 +* `#26311 `__: DOC: Follow-up fixes for new theme +* `#26313 `__: MAINT: Cleanup ``vecdot``'s signature, typing, and importing +* `#26317 `__: BUG: use PyArray_SafeCast in array_astype +* `#26319 `__: BUG: fix spin bench not running on Windows +* `#26320 `__: DOC: Add replacement NEP links in superseded, replaced-by fields +* `#26322 `__: DOC: Documentation and examples for conversion of np.timedelta64... +* `#26324 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26325 `__: TST: Skip Cython test for editable install +* `#26329 `__: MAINT: Bump actions/upload-artifact from 4.3.2 to 4.3.3 +* `#26338 `__: MAINT: update x86-simd-sort to latest +* `#26340 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26347 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 +* `#26348 `__: NOGIL: Make loop data cache and dispatch cache thread-safe in... +* `#26353 `__: BUG: ensure text padding ufuncs handle stringdtype nan-like nulls +* `#26354 `__: BUG: Fix rfft for even input length. +* `#26355 `__: ENH: add support for nan-like null strings in string replace +* `#26359 `__: MAINT: Simplify bugfix for even rfft +* `#26362 `__: MAINT: Bump actions/dependency-review-action from 4.2.5 to 4.3.1 +* `#26363 `__: MAINT: Bump actions/dependency-review-action from 4.3.1 to 4.3.2 +* `#26364 `__: TST: static types are now immortal in the default build too +* `#26368 `__: [NOGIL] thread local promotion state +* `#26369 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26372 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26377 `__: TYP: Fix incorrect type hint for creating a recarray from fromrecords +* `#26378 `__: DOC: Update internal links for generator.rst and related +* `#26384 `__: BUG: Fix incorrect return type of item with length 0 from chararray.__getitem__ +* `#26385 `__: DOC: Updated remaining links in random folder +* `#26386 `__: DOC: Improve example on array broadcasting +* `#26388 `__: BUG: Use Python pickle protocol version 4 for np.save +* `#26391 `__: DOC: Add missing methods to numpy.strings docs +* `#26392 `__: BUG: support nan-like null strings in [l,r]strip +* `#26396 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26399 `__: DOC: Update INSTALL.rst +* `#26413 `__: DOC: Fix some typos and incorrect markups +* `#26415 `__: MAINT: updated instructions to get MachAr byte pattern +* `#26416 `__: MAINT: Bump ossf/scorecard-action from 2.3.1 to 2.3.3 +* `#26418 `__: DOC: add reference docs for NpyString C API +* `#26419 `__: MNT: clean up references to array_owned==2 case in StringDType +* `#26426 `__: TYP,TST: Bump mypy to 1.10.0 +* `#26428 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26429 `__: TYP: npyio: loadtxt: usecols: add None type +* `#26431 `__: TST: skip test_frompyfunc_leaks in the free-threaded build +* `#26432 `__: MAINT: Add some PR prefixes to the labeler. +* `#26436 `__: BUG: fixes for three related stringdtype issues +* `#26441 `__: BUG: int32 and intc should both appear in sctypes +* `#26442 `__: DOC: Adding links to polynomial table. +* `#26443 `__: TST: temporarily pin spin to work around issue in 0.9 release +* `#26444 `__: DOC: Remove outdated authentication instructions +* `#26445 `__: TST: fix xfailed tests on pypy 7.3.16 +* `#26447 `__: TST: attempt to fix intel SDE SIMD CI +* `#26449 `__: MAINT: fix typo +* `#26452 `__: DEP: Deprecate 'fix_imports' flag in numpy.save +* `#26456 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26463 `__: TST: add basic free-threaded CI testing +* `#26464 `__: BLD: update vendored-meson to current Meson master (1.4.99) +* `#26469 `__: MAINT: Bump github/codeql-action from 2.13.4 to 3.25.5 +* `#26471 `__: BLD: cp313 [wheel build] +* `#26474 `__: BLD: Make NumPy build reproducibly +* `#26476 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26478 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to SPHINXOPTS +* `#26480 `__: TYP: fix type annotation for ``newbyteorder`` +* `#26481 `__: Improve documentation of numpy.ma.filled +* `#26486 `__: MAINT: Bump github/codeql-action from 3.25.5 to 3.25.6 +* `#26487 `__: MAINT: Bump pypa/cibuildwheel from 2.18.0 to 2.18.1 +* `#26488 `__: DOC: add examples to get_printoptions +* `#26489 `__: DOC: add example to get_include +* `#26492 `__: DOC: fix rng.random example in numpy-for-matlab-users +* `#26501 `__: ENH: Implement DLPack version 1 +* `#26503 `__: TST: work around flaky test on free-threaded build +* `#26504 `__: DOC: Copy-edit numpy 2.0 migration guide. +* `#26505 `__: DOC: update the NumPy Roadmap +* `#26507 `__: MAINT: mark temp elision address cache as thread local +* `#26511 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26512 `__: CI: enable free-threaded wheel builds [wheel build] +* `#26514 `__: MAINT: Avoid gcc compiler warning +* `#26515 `__: MAINT: Fix GCC -Wmaybe-uninitialized warning +* `#26517 `__: DOC: Add missing functions to the migration guide +* `#26519 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26520 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26524 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26526 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26529 `__: BUG: ``PyDataMem_SetHandler`` check capsule name +* `#26531 `__: BUG: Fix entry-point of Texinfo docs +* `#26534 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI +* `#26537 `__: BUG: Fix F77 ! comment handling +* `#26538 `__: DOC: Update ``gradient`` docstrings +* `#26546 `__: MAINT: Remove redundant print from bug report issue template +* `#26548 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26550 `__: BUG: Make Polynomial evaluation adhere to nep 50 +* `#26552 `__: BUG: Fix in1d fast-path range +* `#26558 `__: BUG: fancy indexing copy +* `#26559 `__: BUG: fix setxor1d when input arrays aren't 1D +* `#26562 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26563 `__: BUG: Fix memory leaks found with valgrind +* `#26564 `__: CI, BLD: Upgrade to Pyodide 0.26.0 for Emscripten/Pyodide CI... +* `#26566 `__: DOC: update ufunc tutorials to use setuptools +* `#26567 `__: BUG: fix memory leaks found with valgrind (next) +* `#26568 `__: MAINT: Unpin pydata-sphinx-theme +* `#26571 `__: DOC: Added web docs for missing ma and strings routines +* `#26572 `__: ENH: Add array API inspection functions +* `#26579 `__: ENH: Add unstack() +* `#26580 `__: ENH: Add copy and device keyword to np.asanyarray to match np.asarray +* `#26582 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26585 `__: MAINT: Bump github/codeql-action from 3.25.6 to 3.25.7 +* `#26586 `__: BUG: Fix memory leaks found by valgrind +* `#26589 `__: BUG: catch invalid fixed-width dtype sizes +* `#26594 `__: DOC: Update constants.rst: fix URL redirect +* `#26597 `__: ENH: Better error message for axis=None in ``np.put_along_axis``... +* `#26599 `__: ENH: use size-zero dtype for broadcast-shapes +* `#26602 `__: TST: Re-enable int8/uint8 einsum tests +* `#26603 `__: BUG: Disallow string inputs for ``copy`` keyword in ``np.array``... +* `#26604 `__: refguide-check with pytest as a runner +* `#26605 `__: DOC: fix typos in numpy v2.0 documentation +* `#26606 `__: DOC: Update randn() to use rng.standard_normal() +* `#26607 `__: MNT: Reorganize non-constant global statics into structs +* `#26609 `__: DOC: Updated notes and examples for np.insert. +* `#26610 `__: BUG: np.take handle 64-bit indices on 32-bit platforms +* `#26611 `__: MNT: Remove ``set_string_function`` +* `#26614 `__: MAINT: Bump github/codeql-action from 3.25.7 to 3.25.8 +* `#26619 `__: TST: Re-enable ``test_shift_all_bits`` on clang-cl +* `#26626 `__: DOC: add ``getbufsize`` example +* `#26627 `__: DOC: add ``setbufsize`` example +* `#26628 `__: DOC: add ``matrix_transpose`` example +* `#26629 `__: DOC: add ``unique_all`` example +* `#26630 `__: DOC: add ``unique_counts`` example +* `#26631 `__: DOC: add ``unique_inverse`` example +* `#26632 `__: DOC: add ``unique_values`` example +* `#26633 `__: DOC: fix ``matrix_transpose`` doctest +* `#26634 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26636 `__: MAINT: Bump actions/dependency-review-action from 4.3.2 to 4.3.3 +* `#26637 `__: BUG: fix incorrect randomized parameterization in bench_linalg +* `#26638 `__: MNT: use reproducible RNG sequences in benchmarks +* `#26639 `__: MNT: more benchmark cleanup +* `#26641 `__: DOC: Update 2.0 migration guide +* `#26644 `__: DOC: Added clean_dirs to spin docs to remove generated folders +* `#26645 `__: DOC: Enable web docs for numpy.trapezoid and add back links +* `#26646 `__: DOC: Update docstring for invert function +* `#26655 `__: CI: modified CI job to test editable install +* `#26658 `__: MAINT: Bump pypa/cibuildwheel from 2.18.1 to 2.19.0 +* `#26662 `__: DOC: add CI and NEP commit acronyms +* `#26664 `__: CI: build and upload free-threaded nightly wheels for macOS +* `#26667 `__: BUG: Adds asanyarray to start of linalg.cross +* `#26670 `__: MAINT: Bump github/codeql-action from 3.25.8 to 3.25.9 +* `#26672 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26675 `__: CI: Use default llvm on Windows. +* `#26676 `__: MAINT: mark evil_global_disable_warn_O4O8_flag as thread-local +* `#26679 `__: DOC: add ``np.linalg`` examples +* `#26680 `__: remove doctesting from refguide-check, add ``spin check-tutorials`` +* `#26684 `__: MAINT: Bump pypa/cibuildwheel from 2.19.0 to 2.19.1 +* `#26685 `__: MAINT: Bump github/codeql-action from 3.25.9 to 3.25.10 +* `#26686 `__: MAINT: Add comment lost in previous PR. +* `#26691 `__: BUILD: check for scipy-doctest, remove it from requirements +* `#26692 `__: DOC: document workaround for deprecation of dim-2 inputs to ``cross`` +* `#26693 `__: BUG: allow replacement in the dispatch cache +* `#26702 `__: DOC: Added missing See Also sections in Polynomial module +* `#26703 `__: BUG: Handle ``--f77flags`` and ``--f90flags`` for ``meson`` +* `#26706 `__: TST: Skip an f2py module test on Windows +* `#26714 `__: MAINT: Update main after 2.0.0 release. +* `#26716 `__: DOC: Add clarifications np.argpartition +* `#26717 `__: DOC: Mention more error paths and try to consolidate import errors +* `#26721 `__: DOC, MAINT: Turn on version warning banner provided by PyData... +* `#26722 `__: DOC: Update roadmap a bit more +* `#26724 `__: ENH: Add Array API 2023.12 version support +* `#26737 `__: DOC: Extend release notes for #26611 +* `#26739 `__: DOC: Update NEPs statuses +* `#26741 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26742 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26744 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26750 `__: ENH: Add locking to umath_linalg if no lapack is detected at... +* `#26760 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26762 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26766 `__: ENH: Support integer dtype inputs in rounding functions +* `#26769 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26770 `__: DOC, NEP: Update NEP44 +* `#26771 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26776 `__: BUG: remove numpy.f2py from excludedimports +* `#26780 `__: MAINT: use an atomic load/store and a mutex to initialize the... +* `#26788 `__: TYP: fix missing ``sys`` import in numeric.pyi +* `#26789 `__: BUG: avoid side-effect of 'include complex.h' +* `#26790 `__: DOC: Update link to Python stdlib random. +* `#26795 `__: BUG: add order to out array of ``numpy.fft`` +* `#26797 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26799 `__: MNT: Update dlpack docs and typing stubs +* `#26802 `__: Missing meson pass-through argument +* `#26805 `__: DOC: Update 2.0 migration guide and release note +* `#26808 `__: DOC: Change selected hardlinks to NEPs to intersphinx mappings +* `#26811 `__: DOC: update notes on sign for complex numbers +* `#26812 `__: CI,TST: Fix meson tests needing gfortran [wheel build] +* `#26813 `__: TST: fix 'spin test single_test' for future versions of spin +* `#26814 `__: DOC: Add ``>>> import numpy as np`` stubs everywhere +* `#26815 `__: MAINT: Bump github/codeql-action from 3.25.10 to 3.25.11 +* `#26826 `__: DOC: remove hack to override _add_newdocs_scalars +* `#26827 `__: DOC: AI-Gen examples ctypeslib.as_ctypes_types +* `#26828 `__: DOC: AI generated examples for ma.left_shift. +* `#26829 `__: DOC: AI-Gen examples for ma.put +* `#26830 `__: DOC: AI generated examples for ma.reshape +* `#26831 `__: DOC: AI generated examples for ma.correlate. +* `#26833 `__: MAINT: Bump pypa/cibuildwheel from 2.19.1 to 2.19.2 +* `#26841 `__: BENCH: Missing ufunc in benchmarks +* `#26842 `__: BUILD: clean out py2 stuff from npy_3kcompat.h +* `#26846 `__: MAINT: back printoptions with a true context variable +* `#26847 `__: TYP: fix ``ufunc`` method type annotations +* `#26848 `__: TYP: include the ``|`` prefix for ``dtype`` char codes +* `#26849 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26858 `__: TYP: Annotate type aliases as ``typing.TypeAlias`` +* `#26866 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.4 +* `#26867 `__: TYP,BUG: fix ``numpy.__dir__`` annotations +* `#26871 `__: TYP: adopt ``typing.LiteralString`` and use more of ``typing.Literal`` +* `#26872 `__: TYP: use ``types.CapsuleType`` on python>=3.13 +* `#26873 `__: TYP: improved ``numpy._array_api_info`` typing +* `#26875 `__: TYP,BUG: Replace ``numpy._typing._UnknownType`` with ``typing.Never`` +* `#26877 `__: BUG: start applying ruff/flake8-implicit-str-concat rules (ISC) +* `#26879 `__: MAINT: start applying ruff/flake8-simplify rules (SIM) +* `#26880 `__: DOC: Fix small incorrect markup +* `#26881 `__: DOC, MAINT: fix typos found by codespell +* `#26882 `__: MAINT: start applying ruff/pyupgrade rules (UP) +* `#26883 `__: BUG: Make issctype always return bool. +* `#26884 `__: MAINT: Remove a redundant import from the generated __ufunc_api.h. +* `#26889 `__: API: Add ``device`` and ``to_device`` to scalars +* `#26891 `__: DOC: Add a note that one should free the proto struct +* `#26892 `__: ENH: Allow use of clip with Python integers to always succeed +* `#26894 `__: MAINT: Bump actions/setup-node from 4.0.2 to 4.0.3 +* `#26895 `__: DOC: Change documentation copyright strings to use a dynamic... +* `#26896 `__: DOC: Change NEP hardlinks to intersphinx mappings. +* `#26897 `__: TYP: type hint ``numpy.polynomial`` +* `#26901 `__: BUG: ``np.loadtxt`` return F_CONTIGUOUS ndarray if row size is... +* `#26902 `__: Apply some ruff/flake8-bugbear rules (B004 and B005) +* `#26903 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26904 `__: BUG,ENH: Fix generic scalar infinite recursion issues +* `#26905 `__: API: Do not consider subclasses for NEP 50 weak promotion +* `#26906 `__: MAINT: Bump actions/setup-python from 5.1.0 to 5.1.1 +* `#26908 `__: ENH: Provide a hook for gufuncs to process core dimensions. +* `#26913 `__: MAINT: declare that NumPy's C extensions support running without... +* `#26914 `__: API: Partially revert unique with return_inverse +* `#26919 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26923 `__: MAINT: Bump actions/dependency-review-action from 4.3.3 to 4.3.4 +* `#26924 `__: MAINT: Bump github/codeql-action from 3.25.11 to 3.25.12 +* `#26927 `__: TYP: Transparent ``__array__`` shape-type +* `#26928 `__: TYP: Covariant ``numpy.flatiter`` type parameter +* `#26929 `__: TYP: Positional-only dunder binop method parameters +* `#26930 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26931 `__: DOC, BUG: Fix running full test command in docstring +* `#26934 `__: MAINT: add PyArray_ZeroContiguousBuffer helper and use it in... +* `#26935 `__: BUG: fix ``f2py`` tests to work with v2 API +* `#26937 `__: TYP,BUG: Remove ``numpy.cast`` and ``numpy.disp`` from the typing... +* `#26938 `__: TYP,BUG: Fix ``dtype`` type alias specialization issue in ``__init__.pyi`` +* `#26942 `__: TYP: Improved ``numpy.generic`` rich comparison operator type... +* `#26943 `__: TYP,BUG: Remove non-existant ``numpy.__git_version__`` in the... +* `#26946 `__: TYP: Add missing typecodes in ``numpy._core.numerictypes.typecodes`` +* `#26950 `__: MAINT: add freethreading_compatible directive to cython build +* `#26953 `__: TYP: Replace ``typing.Union`` with ``|`` in ``numpy._typing`` +* `#26954 `__: TYP: Replace ``typing.Optional[T]`` with ``T | None`` in the... +* `#26964 `__: DOC: Issue template for static typing +* `#26968 `__: MAINT: add a 'tests' install tag to the `numpy._core._simd` extension... +* `#26969 `__: BUG: Fix unicode strip +* `#26972 `__: BUG: Off by one in memory overlap check +* `#26975 `__: TYP: Use ``Final`` and ``LiteralString`` for the constants in... +* `#26980 `__: DOC: add sphinx-copybutton +* `#26981 `__: ENH: add support in f2py to declare gil-disabled support +* `#26983 `__: TYP,BUG: Type annotations for ``numpy.trapezoid`` +* `#26984 `__: TYP,BUG: Fix potentially unresolved typevar in ``median`` and... +* `#26985 `__: BUG: Add object cast to avoid warning with limited API +* `#26989 `__: DOC: fix ctypes example +* `#26991 `__: MAINT: mark scipy-openblas nightly tests as allowed to fail +* `#26992 `__: TYP: Covariant ``numpy.ndenumerate`` type parameter +* `#26993 `__: TYP,BUG: FIx ``numpy.ndenumerate`` annotations for ``object_``... +* `#26996 `__: ENH: Add ``__slots__`` to private (sub-)classes in ``numpy.lib._index_tricks_impl`` +* `#27002 `__: MAINT: Update main after 2.0.1 release. +* `#27008 `__: TYP,BUG: Complete type stubs for ``numpy.dtypes`` +* `#27009 `__: TST, MAINT: Loosen required test precision +* `#27010 `__: DOC: update tutorials link +* `#27011 `__: MAINT: replace PyThread_type_lock with PyMutex on Python >= 3.13.0b3 +* `#27013 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27014 `__: BUG: fix gcd inf +* `#27015 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27017 `__: DOC: Release note for feature added in gh-26908. +* `#27019 `__: TYP: improved ``numpy.array`` type hints for array-like input +* `#27025 `__: DOC: Replace np.matrix in .view() docstring example. +* `#27026 `__: DOC: fix tiny typo +* `#27027 `__: BUG: Fix simd loadable stride logic +* `#27031 `__: DOC: document 'floatmode' and 'legacy' keys from np.get_printoptions'... +* `#27034 `__: BUG: random: Fix edge case of Johnk's algorithm for the beta... +* `#27041 `__: MAINT: Bump github/codeql-action from 3.25.12 to 3.25.14 +* `#27043 `__: CI: unify free-threaded wheel builds with other builds +* `#27046 `__: BUG: random: prevent zipf from hanging when parameter is large. +* `#27047 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27048 `__: BUG: random: Fix long delays/hangs with zipf(a) when a near 1. +* `#27050 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27051 `__: TST: Refactor to consistently use CompilerChecker +* `#27052 `__: TST: fix issues with tests that use numpy.testing.extbuild +* `#27055 `__: MAINT: Bump ossf/scorecard-action from 2.3.3 to 2.4.0 +* `#27056 `__: MAINT: Bump github/codeql-action from 3.25.14 to 3.25.15 +* `#27057 `__: BUG: fix another cast setup in array_assign_subscript +* `#27058 `__: DOC: Add some missing examples for ``np.strings`` methods +* `#27059 `__: ENH: Disable name suggestions on some AttributeErrors +* `#27060 `__: MAINT: linalg: Simplify some linalg gufuncs. +* `#27070 `__: BUG: Bump Highway to latest master +* `#27076 `__: DEP: lib: Deprecate acceptance of float (and more) in bincount. +* `#27079 `__: MAINT: 3.9/10 cleanups +* `#27081 `__: CI: Upgrade ``array-api-tests`` +* `#27085 `__: ENH: fixes for warnings on free-threaded wheel builds +* `#27087 `__: ENH: mark the dragon4 scratch space as thread-local +* `#27090 `__: DOC: update np.shares_memory() docs +* `#27091 `__: API,BUG: Fix copyto (and ufunc) handling of scalar cast safety +* `#27094 `__: DOC: Add release note about deprecation introduced in gh-27076. +* `#27095 `__: DOC: Fix indentation of a few release notes. +* `#27096 `__: BUG: Complex printing tests fail on Windows ARM64 +* `#27097 `__: MAINT: Bump actions/upload-artifact from 4.3.4 to 4.3.5 +* `#27098 `__: BUG: add missing error handling in public_dtype_api.c +* `#27102 `__: DOC: Fixup promotion doc +* `#27104 `__: BUG: Fix building NumPy in FIPS mode +* `#27108 `__: DOC: remove incorrect docstring comment +* `#27110 `__: BLD: cp313 cp313t linux_aarch64 [wheel build] +* `#27112 `__: BUG: Fix repr for integer scalar subclasses +* `#27113 `__: DEV: make linter.py runnable from outside the root of the repo +* `#27114 `__: MAINT: Bump pypa/cibuildwheel from 2.19.2 to 2.20.0 +* `#27115 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27117 `__: BUG: Ensure that scalar binops prioritize __array_ufunc__ +* `#27118 `__: BLD: update vendored Meson for cross-compilation patches +* `#27123 `__: BUG: Bump Highway to latest +* `#27124 `__: MAINT: Bump github/codeql-action from 3.25.15 to 3.26.0 +* `#27125 `__: MAINT: Bump actions/upload-artifact from 4.3.5 to 4.3.6 +* `#27127 `__: BUG: Fix missing error return in copyto +* `#27144 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27149 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27162 `__: BLD: use smaller scipy-openblas builds +* `#27166 `__: ENH: fix thread-unsafe C API usages +* `#27173 `__: MAINT: Bump pythoncapi-compat version. +* `#27176 `__: REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] +* `#27180 `__: DOC: Add release notes for #26897 +* `#27181 `__: DOC: Add release notes for #27008 +* `#27190 `__: BUILD: use a shrunken version of scipy-openblas wheels [wheel... +* `#27193 `__: REV: Revert undef I and document it +* `#27196 `__: BUILD: improve download script +* `#27197 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27200 `__: DOC: add free-threading release notes +* `#27209 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27216 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27217 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27229 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27233 `__: DOC: add docs on thread safety in NumPy +* `#27234 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit diff --git a/doc/changelog/2.1.1-changelog.rst b/doc/changelog/2.1.1-changelog.rst new file mode 100644 index 000000000000..d18636771e1a --- /dev/null +++ b/doc/changelog/2.1.1-changelog.rst @@ -0,0 +1,30 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines diff --git a/doc/changelog/2.1.2-changelog.rst b/doc/changelog/2.1.2-changelog.rst new file mode 100644 index 000000000000..bd0f7bd2422c --- /dev/null +++ b/doc/changelog/2.1.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/changelog/2.1.3-changelog.rst b/doc/changelog/2.1.3-changelog.rst new file mode 100644 index 000000000000..073bd002e7ca --- /dev/null +++ b/doc/changelog/2.1.3-changelog.rst @@ -0,0 +1,49 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters diff --git a/doc/release/upcoming_changes/12150.improvement.rst b/doc/release/upcoming_changes/12150.improvement.rst deleted file mode 100644 index f73a6d2aaa28..000000000000 --- a/doc/release/upcoming_changes/12150.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``histogram`` auto-binning now returns bin sizes >=1 for integer input data ---------------------------------------------------------------------------- -For integer input data, bin sizes smaller than 1 result in spurious empty -bins. This is now avoided when the number of bins is computed using one of the -algorithms provided by `histogram_bin_edges`. diff --git a/doc/release/upcoming_changes/26081.improvement.rst b/doc/release/upcoming_changes/26081.improvement.rst deleted file mode 100644 index bac5c197caa0..000000000000 --- a/doc/release/upcoming_changes/26081.improvement.rst +++ /dev/null @@ -1,11 +0,0 @@ -``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` ----------------------------------------------------------------------------------- -Static typing for ``ndarray`` is a long-term effort that continues -with this change. It is a generic type with type parameters for -the shape and the data type. Previously, the shape type parameter could be -any value. This change restricts it to a tuple of ints, as one would expect -from using ``ndarray.shape``. Further, the shape-type parameter has been -changed from invariant to covariant. This change also applies to the subtypes -of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the -`typing docs `_ -for more information. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26103.c_api.rst b/doc/release/upcoming_changes/26103.c_api.rst deleted file mode 100644 index 9d0d998e2dfc..000000000000 --- a/doc/release/upcoming_changes/26103.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -API symbols now hidden but customizable ---------------------------------------- -NumPy now defaults to hide the API symbols it adds to allow all NumPy API -usage. -This means that by default you cannot dynamically fetch the NumPy API from -another library (this was never possible on windows). - -If you are experiencing linking errors related to ``PyArray_API`` or -``PyArray_RUNTIME_VERSION``, you can define the -:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. - -If you are experiencing problems due to an upstream header including NumPy, -the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before -their header and import NumPy yourself based on :ref:`including-the-c-api`. - diff --git a/doc/release/upcoming_changes/26268.expired.rst b/doc/release/upcoming_changes/26268.expired.rst deleted file mode 100644 index 932fdbfae6d7..000000000000 --- a/doc/release/upcoming_changes/26268.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Scalars and 0D arrays are disallowed for `numpy.nonzero` and `numpy.ndarray.nonzero`. diff --git a/doc/release/upcoming_changes/26285.change.rst b/doc/release/upcoming_changes/26285.change.rst deleted file mode 100644 index d652c58dc799..000000000000 --- a/doc/release/upcoming_changes/26285.change.rst +++ /dev/null @@ -1,13 +0,0 @@ -``ma.corrcoef`` may return a slightly different result ------------------------------------------------------- -A pairwise observation approach is currently used in `ma.corrcoef` to -calculate the standard deviations for each pair of variables. This has been -changed as it is being used to normalise the covariance, estimated using -`ma.cov`, which does not consider the observations for each variable in a -pairwise manner, rendering it unnecessary. The normalisation has been -replaced by the more appropriate standard deviation for each variable, -which significantly reduces the wall time, but will return slightly different -estimates of the correlation coefficients in cases where the observations -between a pair of variables are not aligned. However, it will return the same -estimates in all other cases, including returning the same correlation matrix -as `corrcoef` when using a masked array with no masked values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26285.performance.rst b/doc/release/upcoming_changes/26285.performance.rst deleted file mode 100644 index 79009f662a0f..000000000000 --- a/doc/release/upcoming_changes/26285.performance.rst +++ /dev/null @@ -1,5 +0,0 @@ -``ma.cov`` and ``ma.corrcoef`` are now significantly faster ------------------------------------------------------------ -The private function has been refactored along with `ma.cov` and -`ma.corrcoef`. They are now significantly faster, particularly on large, -masked arrays. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26292.new_feature.rst b/doc/release/upcoming_changes/26292.new_feature.rst deleted file mode 100644 index fc2c33571d77..000000000000 --- a/doc/release/upcoming_changes/26292.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.reshape` and `numpy.ndarray.reshape` now support ``shape`` and ``copy`` arguments. diff --git a/doc/release/upcoming_changes/26313.change.rst b/doc/release/upcoming_changes/26313.change.rst deleted file mode 100644 index 99c8b1d879f9..000000000000 --- a/doc/release/upcoming_changes/26313.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* As `numpy.vecdot` is now a ufunc it has a less precise signature. - This is due to the limitations of ufunc's typing stub. diff --git a/doc/release/upcoming_changes/26388.performance.rst b/doc/release/upcoming_changes/26388.performance.rst deleted file mode 100644 index 2e99f9452c1e..000000000000 --- a/doc/release/upcoming_changes/26388.performance.rst +++ /dev/null @@ -1,3 +0,0 @@ -* `numpy.save` now uses pickle protocol version 4 for saving arrays with - object dtype, which allows for pickle objects larger than 4GB and improves - saving speed by about 5% for large arrays. diff --git a/doc/release/upcoming_changes/26452.deprecation.rst b/doc/release/upcoming_changes/26452.deprecation.rst deleted file mode 100644 index cc4a10bfafee..000000000000 --- a/doc/release/upcoming_changes/26452.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The `fix_imports` keyword argument in `numpy.save` is deprecated. Since - NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports - Python 2, and ignored `fix_imports` keyword. This keyword is kept only - for backward compatibility. It is now deprecated. diff --git a/doc/release/upcoming_changes/26501.new_feature.rst b/doc/release/upcoming_changes/26501.new_feature.rst deleted file mode 100644 index c7465925295c..000000000000 --- a/doc/release/upcoming_changes/26501.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* NumPy now supports DLPack v1, support for older versions will - be deprecated in the future. diff --git a/doc/release/upcoming_changes/26579.new_function.rst b/doc/release/upcoming_changes/26579.new_function.rst deleted file mode 100644 index 168d12189323..000000000000 --- a/doc/release/upcoming_changes/26579.new_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -New function `numpy.unstack` ----------------------------- - -A new function ``np.unstack(array, axis=...)`` was added, which splits -an array into a tuple of arrays along an axis. It serves as the inverse -of `numpy.stack`. diff --git a/doc/release/upcoming_changes/26580.new_feature.rst b/doc/release/upcoming_changes/26580.new_feature.rst deleted file mode 100644 index c625e9b9d8a2..000000000000 --- a/doc/release/upcoming_changes/26580.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.asanyarray` now supports ``copy`` and ``device`` arguments, matching `numpy.asarray`. diff --git a/doc/release/upcoming_changes/26611.expired.rst b/doc/release/upcoming_changes/26611.expired.rst deleted file mode 100644 index 1df220d2b2a7..000000000000 --- a/doc/release/upcoming_changes/26611.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` - was stubbed out. diff --git a/doc/release/upcoming_changes/26611.new_feature.rst b/doc/release/upcoming_changes/26611.new_feature.rst deleted file mode 100644 index 6178049cf4ed..000000000000 --- a/doc/release/upcoming_changes/26611.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.printoptions`, `numpy.get_printoptions`, and `numpy.set_printoptions` now support - a new option, ``override_repr``, for defining custom ``repr(array)`` behavior. diff --git a/doc/release/upcoming_changes/26656.improvement.rst b/doc/release/upcoming_changes/26656.improvement.rst deleted file mode 100644 index 66d7508d2738..000000000000 --- a/doc/release/upcoming_changes/26656.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -`np.quantile` with method ``closest_observation`` chooses nearest even order statistic --------------------------------------------------------------------------------------- -This changes the definition of nearest for border cases from the nearest odd -order statistic to nearest even order statistic. The numpy implementation now -matches other reference implementations. diff --git a/doc/release/upcoming_changes/26724.new_feature.rst b/doc/release/upcoming_changes/26724.new_feature.rst deleted file mode 100644 index 3c6a830728a4..000000000000 --- a/doc/release/upcoming_changes/26724.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -* `numpy.cumulative_sum` and `numpy.cumulative_prod` were added as Array API - compatible alternatives for `numpy.cumsum` and `numpy.cumprod`. The new functions - can include a fixed initial (zeros for ``sum`` and ones for ``prod``) in the result. -* `numpy.clip` now supports ``max`` and ``min`` keyword arguments which are meant - to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or ``np.clip(a, None, None)`` - a copy of the input array will be returned instead of raising an error. -* `numpy.astype` now supports ``device`` argument. diff --git a/doc/release/upcoming_changes/26750.improvement.rst b/doc/release/upcoming_changes/26750.improvement.rst deleted file mode 100644 index 858061dbe48a..000000000000 --- a/doc/release/upcoming_changes/26750.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -`lapack_lite` is now thread safe --------------------------------- - -NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` -that can be used if no BLAS/LAPACK system is detected at build time. - -Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did -not hit any issues, but running linear algebra operations in multiple threads -could lead to errors, incorrect results, or seg faults due to data races. - -We have added a global lock, serializing access to ``lapack_lite`` in multiple -threads. diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst deleted file mode 100644 index 923dbe816dd1..000000000000 --- a/doc/release/upcoming_changes/26766.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.floor`, `numpy.ceil`, and `numpy.trunc` now won't perform casting - to a floating dtype for integer and boolean dtype input arrays. diff --git a/doc/release/upcoming_changes/26842.c_api.rst b/doc/release/upcoming_changes/26842.c_api.rst deleted file mode 100644 index 7e50dd385006..000000000000 --- a/doc/release/upcoming_changes/26842.c_api.rst +++ /dev/null @@ -1,5 +0,0 @@ -Many shims removed from npy_3kcompat.h --------------------------------------- -Many of the old shims and helper functions were removed from -``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous -version of the file into your codebase. diff --git a/doc/release/upcoming_changes/26846.improvement.rst b/doc/release/upcoming_changes/26846.improvement.rst deleted file mode 100644 index ae9b72d195bf..000000000000 --- a/doc/release/upcoming_changes/26846.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -The `numpy.printoptions` context manager is now thread and async-safe ---------------------------------------------------------------------- - -In prior versions of NumPy, the printoptions were defined using a combination -of Python and C global variables. We have refactored so the state is stored in -a python ``ContextVar``, making the context manager thread and async-safe. diff --git a/doc/release/upcoming_changes/26908.c_api.rst b/doc/release/upcoming_changes/26908.c_api.rst deleted file mode 100644 index d6e43591819d..000000000000 --- a/doc/release/upcoming_changes/26908.c_api.rst +++ /dev/null @@ -1,8 +0,0 @@ -New ``PyUFuncObject`` field ``process_core_dims_func`` ------------------------------------------------------- -The field ``process_core_dims_func`` was added to the structure -``PyUFuncObject``. For generalized ufuncs, this field can be set to a -function of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the -ufunc is called. It allows the ufunc author to check that core dimensions -satisfy additional constraints, and to set output core dimension sizes if they -have not been provided. diff --git a/doc/release/upcoming_changes/26981.new_feature.rst b/doc/release/upcoming_changes/26981.new_feature.rst deleted file mode 100644 index f466faeb7590..000000000000 --- a/doc/release/upcoming_changes/26981.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -``f2py`` can generate freethreading-compatible C extensions ------------------------------------------------------------ - -Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C -extension marked as compatible with the free threading CPython -interpreter. Doing so prevents the interpreter from re-enabling the GIL at -runtime when it imports the C extension. Note that ``f2py`` does not analyze -fortran code for thread safety, so you must verify that the wrapped fortran -code is thread safe before marking the extension as compatible. diff --git a/doc/release/upcoming_changes/27076.deprecation.rst b/doc/release/upcoming_changes/27076.deprecation.rst deleted file mode 100644 index f692b814c17d..000000000000 --- a/doc/release/upcoming_changes/27076.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Passing non-integer inputs as the first argument of `bincount` is now - deprecated, because such inputs are silently cast to integers with no - warning about loss of precision. diff --git a/doc/release/upcoming_changes/27091.change.rst b/doc/release/upcoming_changes/27091.change.rst deleted file mode 100644 index 5b71692efabd..000000000000 --- a/doc/release/upcoming_changes/27091.change.rst +++ /dev/null @@ -1,24 +0,0 @@ -Cast-safety fixes in ``copyto`` and ``full`` --------------------------------------------- -``copyto`` now uses NEP 50 correctly and applies this to its cast safety. -Python integer to NumPy integer casts and Python float to NumPy float casts -are now considered "safe" even if assignment may fail or precision may be lost. -This means the following examples change slightly: - -* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast - of the Python integer. It will now always raise, to achieve an unsafe cast - you must pass an array or NumPy scalar. -* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError - rather than a TypeError due to same-kind casting. -* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` - (float32 cannot hold ``1e300``) rather raising a TypeError. - -Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), -meaning that the following behaves differently: - -* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. -* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. - Previously, NumPy checked whether the 100 fits the ``int8_arr``. - -This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 -behavior. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27695.improvement.rst b/doc/release/upcoming_changes/27695.improvement.rst new file mode 100644 index 000000000000..95584b6e90ce --- /dev/null +++ b/doc/release/upcoming_changes/27695.improvement.rst @@ -0,0 +1,5 @@ +``f2py`` handles multiple modules and exposes variables again +------------------------------------------------------------- +A regression has been fixed which allows F2PY users to expose variables to +Python in modules with only assignments, and also fixes situations where +multiple modules are present within a single source file. diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 2ff49b162fe4..55d4696a114d 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -220,6 +220,19 @@ using the NumPy types. You can still write cython code using the ``c.real`` and ``c.imag`` attributes (using the native typedefs), but you can no longer use in-place operators ``c.imag += 1`` in Cython's c++ mode. +Because NumPy 2 now includes ``complex.h`` code that uses a variable named +``I`` may see an error such as + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +to use the name ``I`` requires an ``#undef I`` now. + +.. note:: + NumPy 2.0.1 briefly included the ``#undef I`` to help users not already + including ``complex.h``. + Changes to namespaces ===================== diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 68fbb6ef3d66..e6f26f92cdf5 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1264,6 +1264,13 @@ User-defined data types registered (checked only by the address of the pointer), then return the previously-assigned type-number. + The number of user DTypes known to numpy is stored in + ``NPY_NUMUSERTYPES``, a static global variable that is public in the + C API. Accessing this symbol is inherently *not* thread-safe. If + for some reason you need to use this API in a multithreaded context, + you will need to add your own locking, NumPy does not ensure new + data types can be added in a thread-safe manner. + .. c:function:: int PyArray_RegisterCastFunc( \ PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc) @@ -4092,8 +4099,8 @@ Memory management .. c:function:: char* PyDataMem_RENEW(void * ptr, size_t newbytes) - Macros to allocate, free, and reallocate memory. These macros are used - internally to create arrays. + Functions to allocate, free, and reallocate memory. These are used + internally to manage array data memory unless overridden. .. c:function:: npy_intp* PyDimMem_NEW(int nd) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 8d57153d8803..4565e602193f 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1611,3 +1611,29 @@ for completeness and assistance in understanding the code. ``arrayobject.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. + + +NumPy C-API and C complex +========================= +When you use the NumPy C-API, you will have access to complex real declarations +``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C +standard types from ``complex.h``. Unfortunately, ``complex.h`` contains +`#define I ...`` (where the actual definition depends on the compiler), which +means that any downstream user that does ``#include `` +could get ``I`` defined, and using something like declaring ``double I;`` in +their code will result in an obscure compiler error like + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +This error can be avoided by adding:: + + #undef I + +to your code. + +.. versionchanged:: 2.0 + The inclusion of ``complex.h`` was new in NumPy 2, so that code defining + a different ``I`` may not have required the ``#undef I`` on older versions. + NumPy 2.0.1 briefly included the ``#under I`` \ No newline at end of file diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index e0ab1bb2a7ba..e66c86faf1b3 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -1,14 +1,13 @@ .. _global_state: -************ -Global state -************ - -NumPy has a few import-time, compile-time, or runtime options -which change the global behaviour. -Most of these are related to performance or for debugging -purposes and will not be interesting to the vast majority -of users. +**************************** +Global Configuration Options +**************************** + +NumPy has a few import-time, compile-time, or runtime configuration +options which change the global behaviour. Most of these are related to +performance or for debugging purposes and will not be interesting to the +vast majority of users. Performance-related options diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index ed9641409014..02e3248953fb 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -58,6 +58,7 @@ Other topics array_api simd/index + thread_safety global_state security distutils_status_migration diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst new file mode 100644 index 000000000000..84590bfac39c --- /dev/null +++ b/doc/source/reference/thread_safety.rst @@ -0,0 +1,51 @@ +.. _thread_safety: + +************* +Thread Safety +************* + +NumPy supports use in a multithreaded context via the `threading` module in the +standard library. Many NumPy operations release the GIL, so unlike many +situations in Python, it is possible to improve parallel performance by +exploiting multithreaded parallelism in Python. + +The easiest performance gains happen when each worker thread owns its own array +or set of array objects, with no data directly shared between threads. Because +NumPy releases the GIL for many low-level operations, threads that spend most of +the time in low-level code will run in parallel. + +It is possible to share NumPy arrays between threads, but extreme care must be +taken to avoid creating thread safety issues when mutating arrays that are +shared between multiple threads. If two threads simultaneously read from and +write to the same array, they will at best produce inconsistent, racey results that +are not reproducible, let alone correct. It is also possible to crash the Python +interpreter by, for example, resizing an array while another thread is reading +from it to compute a ufunc operation. + +In the future, we may add locking to ndarray to make writing multithreaded +algorithms using NumPy arrays safer, but for now we suggest focusing on +read-only access of arrays that are shared between threads, or adding your own +locking if you need to mutation and multithreading. + +Note that operations that *do not* release the GIL will see no performance gains +from use of the `threading` module, and instead might be better served with +`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do +not release the GIL. + +Free-threaded Python +-------------------- + +.. versionadded:: 2.1 + +Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support +for python runtimes with the GIL disabled. See +https://py-free-threading.github.io for more information about installing and +using free-threaded Python, as well as information about supporting it in +libraries that depend on NumPy. + +Because free-threaded Python does not have a global interpreter lock to +serialize access to Python objects, there are more opportunities for threads to +mutate shared state and create thread safety issues. In addition to the +limitations about locking of the ndarray object noted above, this also means +that arrays with ``dtype=object`` are not protected by the GIL, creating data +races for python objects that are not possible outside free-threaded python. diff --git a/doc/source/release.rst b/doc/source/release.rst index cad71725fe94..5f3e184b9478 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,7 +5,12 @@ Release notes .. toctree:: :maxdepth: 2 + 2.1.4 + 2.1.3 + 2.1.2 + 2.1.1 2.1.0 + 2.0.2 2.0.1 2.0.0 1.26.4 diff --git a/doc/source/release/2.0.2-notes.rst b/doc/source/release/2.0.2-notes.rst new file mode 100644 index 000000000000..ae5c26250ba7 --- /dev/null +++ b/doc/source/release/2.0.2-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.2 Release Notes +========================== + +NumPy 2.0.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.1 release. + +The Python versions supported by this release are 3.9-3.12. + + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types + diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index d0b0b6f1b785..bb9c71079062 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -1,19 +1,362 @@ .. currentmodule:: numpy -========================== +========================= NumPy 2.1.0 Release Notes -========================== +========================= +NumPy 2.1.0 provides support for the upcoming Python 3.13 release and drops +support for Python 3.9. In addition to the usual bug fixes and updated Python +support, it helps get us back into our usual release cycle after the extended +development of 2.0. The highlights for this release are: -Highlights -========== +- Support for the array-api 2023.12 standard. +- Support for Python 3.13. +- Preliminary support for free threaded Python 3.13. -*We'll choose highlights for this release near the end of the release cycle.* +Python versions 3.10-3.13 are supported in this release. -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +New functions +============= -.. **Content from release note snippets in doc/release/upcoming_changes:** +New function ``numpy.unstack`` +------------------------------ + +A new function ``np.unstack(array, axis=...)`` was added, which splits +an array into a tuple of arrays along an axis. It serves as the inverse +of `numpy.stack`. + +(`gh-26579 `__) + + +Deprecations +============ + +* The ``fix_imports`` keyword argument in ``numpy.save`` is deprecated. Since + NumPy 1.17, ``numpy.save`` uses a pickle protocol that no longer supports + Python 2, and ignored ``fix_imports`` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. + + (`gh-26452 `__) + +* Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. + + (`gh-27076 `__) + + +Expired deprecations +==================== + +* Scalars and 0D arrays are disallowed for ``numpy.nonzero`` and ``numpy.ndarray.nonzero``. + + (`gh-26268 `__) + +* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` + was stubbed out. + + (`gh-26611 `__) + + +C API changes +============= + +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. This means that by default you cannot dynamically fetch the NumPy API +from another library (this was never possible on windows). + +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, you can define the +``NPY_API_SYMBOL_ATTRIBUTE`` to opt-out of this change. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on ``including-the-c-api``. + +(`gh-26103 `__) + +Many shims removed from npy_3kcompat.h +-------------------------------------- +Many of the old shims and helper functions were removed from +``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous +version of the file into your codebase. + +(`gh-26842 `__) + +New ``PyUFuncObject`` field ``process_core_dims_func`` +------------------------------------------------------ +The field ``process_core_dims_func`` was added to the structure +``PyUFuncObject``. For generalized ufuncs, this field can be set to a function +of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the ufunc is +called. It allows the ufunc author to check that core dimensions satisfy +additional constraints, and to set output core dimension sizes if they have not +been provided. + +(`gh-26908 `__) + + +New Features +============ + +Preliminary Support for Free-Threaded CPython 3.13 +-------------------------------------------------- + +CPython 3.13 will be available as an experimental free-threaded build. See +https://py-free-threading.github.io, `PEP 703 +`_ and the `CPython 3.13 release notes +`_ for +more detail about free-threaded Python. + +NumPy 2.1 has preliminary support for the free-threaded build of CPython +3.13. This support was enabled by fixing a number of C thread-safety issues in +NumPy. Before NumPy 2.1, NumPy used a large number of C global static variables +to store runtime caches and other state. We have either refactored to avoid the +need for global state, converted the global state to thread-local state, or +added locking. + +Support for free-threaded Python does not mean that NumPy is thread +safe. Read-only shared access to ndarray should be safe. NumPy exposes shared +mutable state and we have not added any locking to the array object itself to +serialize access to shared state. Care must be taken in user code to avoid +races if you would like to mutate the same array in multiple threads. It is +certainly possible to crash NumPy by mutating an array simultaneously in +multiple threads, for example by calling a ufunc and the ``resize`` method +simultaneously. For now our guidance is: "don't do that". In the future we would +like to provide stronger guarantees. + +Object arrays in particular need special care, since the GIL +previously provided locking for object array access and no longer does. See +`Issue #27199 `_ for more +information about object arrays in the free-threaded build. + +If you are interested in free-threaded Python, for example because you have a +multiprocessing-based workflow that you are interested in running with Python +threads, we encourage testing and experimentation. + +If you run into problems that you suspect are because of NumPy, please `open an +issue `_, checking first if +the bug also occurs in the "regular" non-free-threaded CPython 3.13 build. Many +threading bugs can also occur in code that releases the GIL; disabling the GIL +only makes it easier to hit threading bugs. + +(`gh-26157 `__) + +* ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and + ``copy`` arguments. + + (`gh-26292 `__) + +* NumPy now supports DLPack v1, support for older versions will + be deprecated in the future. + + (`gh-26501 `__) + +* ``numpy.asanyarray`` now supports ``copy`` and ``device`` arguments, matching + ``numpy.asarray``. + + (`gh-26580 `__) + +* ``numpy.printoptions``, ``numpy.get_printoptions``, and + ``numpy.set_printoptions`` now support a new option, ``override_repr``, for + defining custom ``repr(array)`` behavior. + + (`gh-26611 `__) + +* ``numpy.cumulative_sum`` and ``numpy.cumulative_prod`` were added as Array + API compatible alternatives for ``numpy.cumsum`` and ``numpy.cumprod``. The + new functions can include a fixed initial (zeros for ``sum`` and ones for + ``prod``) in the result. + + (`gh-26724 `__) + +* ``numpy.clip`` now supports ``max`` and ``min`` keyword arguments which are + meant to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or + ``np.clip(a, None, None)`` a copy of the input array will be returned instead + of raising an error. + + (`gh-26724 `__) + +* ``numpy.astype`` now supports ``device`` argument. + + (`gh-26724 `__) + +``f2py`` can generate freethreading-compatible C extensions +----------------------------------------------------------- +Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C +extension marked as compatible with the free threading CPython +interpreter. Doing so prevents the interpreter from re-enabling the GIL at +runtime when it imports the C extension. Note that ``f2py`` does not analyze +fortran code for thread safety, so you must verify that the wrapped fortran +code is thread safe before marking the extension as compatible. + +(`gh-26981 `__) + + +Improvements +============ + +``histogram`` auto-binning now returns bin sizes >=1 for integer input data +--------------------------------------------------------------------------- +For integer input data, bin sizes smaller than 1 result in spurious empty +bins. This is now avoided when the number of bins is computed using one of the +algorithms provided by ``histogram_bin_edges``. + +(`gh-12150 `__) + +``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` +---------------------------------------------------------------------------------- +Static typing for ``ndarray`` is a long-term effort that continues +with this change. It is a generic type with type parameters for +the shape and the data type. Previously, the shape type parameter could be +any value. This change restricts it to a tuple of ints, as one would expect +from using ``ndarray.shape``. Further, the shape-type parameter has been +changed from invariant to covariant. This change also applies to the subtypes +of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the +`typing docs `_ +for more information. + +(`gh-26081 `__) + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +``lapack_lite`` is now thread safe +---------------------------------- +NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` +that can be used if no BLAS/LAPACK system is detected at build time. + +Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did +not hit any issues, but running linear algebra operations in multiple threads +could lead to errors, incorrect results, or segfaults due to data races. + +We have added a global lock, serializing access to ``lapack_lite`` in multiple +threads. + +(`gh-26750 `__) + +The ``numpy.printoptions`` context manager is now thread and async-safe +----------------------------------------------------------------------- +In prior versions of NumPy, the printoptions were defined using a combination +of Python and C global variables. We have refactored so the state is stored in +a python ``ContextVar``, making the context manager thread and async-safe. + +(`gh-26846 `__) + +Type hinting ``numpy.polynomial`` +--------------------------------- +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. + +(`gh-26897 `__) + +Improved ``numpy.dtypes`` type hints +------------------------------------ +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: The ``numpy.dtype`` type-aliases have been replaced with specialized +``dtype`` *subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. + +(`gh-27008 `__) + + +Performance improvements and changes +==================================== + +* ``numpy.save`` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. + + (`gh-26388 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + + (`gh-27147 `__) + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + + (`gh-27147 `__) + +* Due to a regression in OpenBLAS on windows, the performance improvements when + using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +``ma.cov`` and ``ma.corrcoef`` are now significantly faster +----------------------------------------------------------- +The private function has been refactored along with ``ma.cov`` and +``ma.corrcoef``. They are now significantly faster, particularly on large, +masked arrays. + +(`gh-26285 `__) + + +Changes +======= + +* As ``numpy.vecdot`` is now a ufunc it has a less precise signature. + This is due to the limitations of ufunc's typing stub. + + (`gh-26313 `__) + +* ``numpy.floor``, ``numpy.ceil``, and ``numpy.trunc`` now won't perform + casting to a floating dtype for integer and boolean dtype input arrays. + + (`gh-26766 `__) + +``ma.corrcoef`` may return a slightly different result +------------------------------------------------------ +A pairwise observation approach is currently used in ``ma.corrcoef`` to +calculate the standard deviations for each pair of variables. This has been +changed as it is being used to normalise the covariance, estimated using +``ma.cov``, which does not consider the observations for each variable in a +pairwise manner, rendering it unnecessary. The normalisation has been replaced +by the more appropriate standard deviation for each variable, which +significantly reduces the wall time, but will return slightly different +estimates of the correlation coefficients in cases where the observations +between a pair of variables are not aligned. However, it will return the same +estimates in all other cases, including returning the same correlation matrix +as ``corrcoef`` when using a masked array with no masked values. + +(`gh-26285 `__) + +Cast-safety fixes in ``copyto`` and ``full`` +-------------------------------------------- +``copyto`` now uses NEP 50 correctly and applies this to its cast safety. +Python integer to NumPy integer casts and Python float to NumPy float casts +are now considered "safe" even if assignment may fail or precision may be lost. +This means the following examples change slightly: + +* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast + of the Python integer. It will now always raise, to achieve an unsafe cast + you must pass an array or NumPy scalar. + +* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError + rather than a TypeError due to same-kind casting. + +* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` + (float32 cannot hold ``1e300``) rather raising a TypeError. + +Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), +meaning that the following behaves differently: + +* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. + +* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. + Previously, NumPy checked whether the 100 fits the ``int8_arr``. + +This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 +behavior. + +(`gh-27091 `__) -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.1.1-notes.rst b/doc/source/release/2.1.1-notes.rst new file mode 100644 index 000000000000..79c63514695c --- /dev/null +++ b/doc/source/release/2.1.1-notes.rst @@ -0,0 +1,41 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.1 Release Notes +========================== + +NumPy 2.1.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.0 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines + diff --git a/doc/source/release/2.1.2-notes.rst b/doc/source/release/2.1.2-notes.rst new file mode 100644 index 000000000000..1a187dbd3365 --- /dev/null +++ b/doc/source/release/2.1.2-notes.rst @@ -0,0 +1,48 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.2 Release Notes +========================== + +NumPy 2.1.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.1 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/source/release/2.1.3-notes.rst b/doc/source/release/2.1.3-notes.rst new file mode 100644 index 000000000000..cd797e0062a0 --- /dev/null +++ b/doc/source/release/2.1.3-notes.rst @@ -0,0 +1,81 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.3 Release Notes +========================== + +NumPy 2.1.3 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.2 release. + +The Python versions supported by this release are 3.10-3.13. + + +Improvements +============ + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters + diff --git a/doc/source/release/2.1.4-notes.rst b/doc/source/release/2.1.4-notes.rst new file mode 100644 index 000000000000..4e9d9f1a03aa --- /dev/null +++ b/doc/source/release/2.1.4-notes.rst @@ -0,0 +1,23 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.4 Release Notes +========================== + +NumPy 2.1.4 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.3 release. + +The Python versions supported by this release are 3.10-3.13. + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 697c0c045e4f..7bf793ae2e47 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,6 +268,9 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. +Note that this API is inherently thread-unsafe. See `thread_safety` for more +details about thread safety in NumPy. + Registering a casting function ------------------------------ diff --git a/environment.yml b/environment.yml index 86ee1058f440..82f0856668c3 100644 --- a/environment.yml +++ b/environment.yml @@ -7,17 +7,17 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.11 #need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas - nomkl - - setuptools + - setuptools==65.5.1 - ninja - pkg-config - meson-python + - spin==0.13 - pip - - spin=0.8 # Unpin when spin 0.9.1 is released - ccache # For testing - pytest diff --git a/numpy/_build_utils/tempita.py b/numpy/_build_utils/tempita.py index 0743b892436b..32e400f9c907 100644 --- a/numpy/_build_utils/tempita.py +++ b/numpy/_build_utils/tempita.py @@ -3,9 +3,7 @@ import os import argparse -from Cython import Tempita as tempita - -# XXX: If this import ever fails (does it really?), vendor cython.tempita +import tempita def process_tempita(fromfile, outfile=None): diff --git a/numpy/_build_utils/tempita/LICENSE.txt b/numpy/_build_utils/tempita/LICENSE.txt new file mode 100644 index 000000000000..0ba6f23c440f --- /dev/null +++ b/numpy/_build_utils/tempita/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008 Ian Bicking and Contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/numpy/_build_utils/tempita/__init__.py b/numpy/_build_utils/tempita/__init__.py new file mode 100644 index 000000000000..41a0ce3d0efa --- /dev/null +++ b/numpy/_build_utils/tempita/__init__.py @@ -0,0 +1,4 @@ +# The original Tempita implements all of its templating code here. +# Moved it to _tempita.py to make the compilation portable. + +from ._tempita import * diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py new file mode 100644 index 000000000000..4864f2949605 --- /dev/null +++ b/numpy/_build_utils/tempita/_looper.py @@ -0,0 +1,156 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print loop.number, item + ... if not loop.last: + ... print '---' + 1 a + --- + 2 b + --- + 3 c + +""" + +basestring_ = (bytes, str) + +__all__ = ['looper'] + + +class looper: + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return '<%s for %r>' % ( + self.__class__.__name__, self.seq) + + +class looper_iter: + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + +class loop_pos: + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return '' % ( + self.seq[self.pos], self.pos) + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) + and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif hasattr(getter, '__call__'): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py new file mode 100644 index 000000000000..c30b6547ade6 --- /dev/null +++ b/numpy/_build_utils/tempita/_tempita.py @@ -0,0 +1,1132 @@ +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" + +import re +import sys +import os +import tokenize +from io import StringIO + +from ._looper import looper + +__all__ = ["TemplateError", "Template", "sub", "bunch"] + +in_re = re.compile(r"\s+in\s+") +var_re = re.compile(r"^[a-z_][a-z0-9_]*$", re.I) +basestring_ = (bytes, str) + + +def coerce_text(v): + if not isinstance(v, basestring_): + if hasattr(v, "__str__"): + return str(v) + else: + return bytes(v) + return v + + +class TemplateError(Exception): + """Exception raised while parsing a template""" + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = " ".join(self.args) + if self.position: + msg = "%s at line %s column %s" % (msg, self.position[0], self.position[1]) + if self.name: + msg += " in %s" % self.name + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, get_template=from_template.get_template + ) + + +class Template: + default_namespace = { + "start_braces": "{{", + "end_braces": "}}", + "looper": looper, + } + + default_encoding = "utf8" + default_inherit = None + + def __init__( + self, + content, + name=None, + namespace=None, + stacklevel=None, + get_template=None, + default_inherit=None, + line_offset=0, + delimiters=None, + ): + self.content = content + + # set delimiters + if delimiters is None: + delimiters = ( + self.default_namespace["start_braces"], + self.default_namespace["end_braces"], + ) + else: + # assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # for delimiter in delimiters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace["start_braces"] = delimiters[0] + self.default_namespace["end_braces"] = delimiters[1] + self.delimiters = delimiters + + self._unicode = isinstance(content, str) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if "__file__" in globals: + name = globals["__file__"] + if name.endswith(".pyc") or name.endswith(".pyo"): + name = name[:-1] + elif "__name__" in globals: + name = globals["__name__"] + else: + name = "" + if lineno: + name += ":%s" % lineno + self.name = name + self._parsed = parse( + content, name=name, line_offset=line_offset, delimiters=self.delimiters + ) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + def from_filename( + cls, + filename, + namespace=None, + encoding=None, + default_inherit=None, + get_template=get_file_template, + ): + with open(filename, "rb") as f: + c = f.read() + if encoding: + c = c.decode(encoding) + return cls( + content=c, + name=filename, + namespace=namespace, + default_inherit=default_inherit, + get_template=get_template, + ) + + from_filename = classmethod(from_filename) + + def __repr__(self): + return "<%s %s name=%r>" % ( + self.__class__.__name__, + hex(id(self))[2:], + self.name, + ) + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError("You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError("You can only give one positional argument") + if not hasattr(args[0], "items"): + raise TypeError( + "If you pass in a single argument, you must pass in a " + "dictionary-like object (with a .items() method); you gave %r" + % (args[0],) + ) + kw = args[0] + ns = kw + ns["__template_name__"] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if "__inherit__" in defs: + inherit = defs.pop("__inherit__") + else: + inherit = None + return "".join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + "You cannot use inheritance without passing in get_template", + position=None, + name=self.name, + ) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in defs.items(): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns["self"] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + __traceback_hide__ = True + name, pos = code[0], code[1] + if name == "py": + self._exec(code[2], ns, pos) + elif name == "continue": + raise _TemplateContinue() + elif name == "break": + raise _TemplateBreak() + elif name == "for": + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == "cond": + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == "expr": + parts = code[2].split("|") + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == "default": + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == "inherit": + expr = code[2] + value = self._eval(expr, ns, pos) + defs["__inherit__"] = value + elif name == "def": + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef( + self, name, signature, body=parts, ns=ns, pos=pos + ) + elif name == "comment": + return + else: + assert 0, "Unknown code: %r" % name + + def _interpret_for(self, vars, expr, content, ns, out, defs): + __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + "Need %i items to unpack (got %i items)" + % (len(vars), len(item)) + ) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == "else": + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError("invalid syntax in expression: %s" % code) + return value + except Exception as e: + if getattr(e, "args", None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + raise + + def _exec(self, code, ns, pos): + __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except Exception as e: + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + raise + + def _repr(self, value, pos): + __traceback_hide__ = True + try: + if value is None: + return "" + if self._unicode: + try: + value = str(value) + except UnicodeDecodeError: + value = bytes(value) + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if isinstance(value, str) and self.default_encoding: + value = value.encode(self.default_encoding) + except Exception as e: + e.args = (self._add_line_info(e.args[0], pos),) + raise + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + "Cannot decode bytes value %r into unicode " + "(no default_encoding provided)" % value + ) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + " in string %r" % value, + ) + elif not self._unicode and isinstance(value, str): + if not self.default_encoding: + raise UnicodeEncodeError( + "Cannot encode unicode value %r into bytes " + "(no default_encoding provided)" % value + ) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = "%s at line %s column %s" % (msg, pos[0], pos[1]) + if self.name: + msg += " in file %s" % self.name + return msg + + +def sub(content, delimiters=None, **kw): + name = kw.get("__name") + tmpl = Template(content, name=name, delimiters=delimiters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + def __init__(self, **kw): + for name, value in kw.items(): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if "default" in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, "default") + else: + return dict.__getitem__(self, key) + + def __repr__(self): + return "<%s %s>" % ( + self.__class__.__name__, + " ".join(["%s=%r" % (k, v) for k, v in sorted(self.items())]), + ) + + +class TemplateDef: + def __init__( + self, template, func_name, func_signature, body, ns, pos, bound_self=None + ): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return "" % ( + self._func_name, + self._func_signature, + self._template.name, + self._pos, + ) + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns["self"] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return "".join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, + self._func_name, + self._func_signature, + self._body, + self._ns, + self._pos, + bound_self=obj, + ) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in kw.items(): + if not var_kw and name not in sig_args: + raise TypeError("Unexpected argument %s" % name) + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + raise TypeError( + "Extra position arguments: %s" % ", ".join([repr(v) for v in args]) + ) + for name, value_expr in defaults.items(): + if name not in values: + values[name] = self._template._eval(value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError("Missing argument: %s" % name) + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject: + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, self.__name) + + +class TemplateObjectGetter: + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return "<%s around %r>" % (self.__class__.__name__, self.__template_obj) + + +class _Empty: + def __call__(self, *args, **kw): + return self + + def __str__(self): + return "" + + def __repr__(self): + return "Empty" + + def __unicode__(self): + return "" + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + + +Empty = _Empty() +del _Empty + +############################################################ +## Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): + """ + Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + + """ + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + + token_re = re.compile( + r"%s|%s" % (re.escape(delimiters[0]), re.escape(delimiters[1])) + ) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimiters[0] and in_expr: + raise TemplateError( + "%s inside expression" % delimiters[0], position=pos, name=name + ) + elif expr == delimiters[1] and not in_expr: + raise TemplateError( + "%s outside expression" % delimiters[1], position=pos, name=name + ) + if expr == delimiters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last: match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError( + "No %s to finish last expression" % delimiters[1], + name=name, + position=last_pos, + ) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + + +statement_re = re.compile(r"^(?:if |elif |for |def |inherit |default |py:)") +single_statements = ["else", "endif", "endfor", "enddef", "continue", "break"] +trail_whitespace_re = re.compile(r"\n\r?[\t ]*$") +lead_whitespace_re = re.compile(r"^[\t ]*\n") + + +def trim_lex(tokens): + r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + last_trim = None + for i, current in enumerate(tokens): + if isinstance(current, basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = "" + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = "" + else: + next_chunk = tokens[i + 1] + if not isinstance(next_chunk, basestring_) or not isinstance(prev, basestring_): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = "last" + if prev_ok and ( + not next_chunk + or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()) + ): + if prev: + if (i == 1 and not prev.strip()) or prev_ok == "last": + tokens[i - 1] = "" + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[: m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = "" + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + + +def find_position(string, index, last_index, last_pos): + """Given a string and index, return (line, column)""" + lines = string.count("\n", last_index, index) + if lines > 0: + column = index - string.rfind("\n", last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimiters=None): + r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') + ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] # noqa: E501 + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 + """ + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith("py:"): + expr = expr[3:].lstrip(" \t") + if expr.startswith("\n") or expr.startswith("\r"): + expr = expr.lstrip("\r\n") + if "\r" in expr: + expr = expr.replace("\r\n", "\n") + expr = expr.replace("\r", "") + expr += "\n" + else: + if "\n" in expr: + raise TemplateError( + "Multi-line py blocks must start with a newline", + position=pos, + name=name, + ) + return ("py", pos, expr), tokens[1:] + elif expr in ("continue", "break"): + if "for" not in context: + raise TemplateError("continue outside of for loop", position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith("if "): + return parse_cond(tokens, name, context) + elif expr.startswith("elif ") or expr == "else": + raise TemplateError( + "%s outside of an if block" % expr.split()[0], position=pos, name=name + ) + elif expr in ("if", "elif", "for"): + raise TemplateError("%s with no expression" % expr, position=pos, name=name) + elif expr in ("endif", "endfor", "enddef"): + raise TemplateError("Unexpected %s" % expr, position=pos, name=name) + elif expr.startswith("for "): + return parse_for(tokens, name, context) + elif expr.startswith("default "): + return parse_default(tokens, name, context) + elif expr.startswith("inherit "): + return parse_inherit(tokens, name, context) + elif expr.startswith("def "): + return parse_def(tokens, name, context) + elif expr.startswith("#"): + return ("comment", pos, tokens[0][0]), tokens[1:] + return ("expr", pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ("if",) + while 1: + if not tokens: + raise TemplateError("Missing {{endif}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endif": + return ("cond", start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + if first.endswith(":"): + first = first[:-1] + if first.startswith("if "): + part = ("if", pos, first[3:].lstrip(), content) + elif first.startswith("elif "): + part = ("elif", pos, first[5:].lstrip(), content) + elif first == "else": + part = ("else", pos, None, content) + else: + assert 0, "Unexpected token %r at %s" % (first, pos) + while 1: + if not tokens: + raise TemplateError("No {{endif}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and ( + tokens[0][0] == "endif" + or tokens[0][0].startswith("elif ") + or tokens[0][0] == "else" + ): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ("for",) + context + content = [] + assert first.startswith("for "), first + if first.endswith(":"): + first = first[:-1] + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError('Bad for (no "in") in %r' % first, position=pos, name=name) + vars = first[: match.start()] + if "(" in vars: + raise TemplateError( + "You cannot have () in the variable section of a for loop (%r)" % vars, + position=pos, + name=name, + ) + vars = tuple([v.strip() for v in first[: match.start()].split(",") if v.strip()]) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError("No {{endfor}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endfor": + return ("for", pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("default ") + first = first.split(None, 1)[1] + parts = first.split("=", 1) + if len(parts) == 1: + raise TemplateError( + "Expression must be {{default var=value}}; no = found in %r" % first, + position=pos, + name=name, + ) + var = parts[0].strip() + if "," in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", position=pos, name=name + ) + if not var_re.search(var): + raise TemplateError( + "Not a valid variable name for {{default}}: %r" % var, + position=pos, + name=name, + ) + expr = parts[1].strip() + return ("default", pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("inherit ") + expr = first.split(None, 1)[1] + return ("inherit", pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith("def ") + first = first.split(None, 1)[1] + if first.endswith(":"): + first = first[:-1] + if "(" not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(")"): + raise TemplateError( + "Function definition doesn't end with ): %s" % first, + position=start, + name=name, + ) + else: + first = first[:-1] + func_name, sig_text = first.split("(", 1) + sig = parse_signature(sig_text, name, start) + context = context + ("def",) + content = [] + while 1: + if not tokens: + raise TemplateError("Missing {{enddef}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "enddef": + return ("def", start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) + except StopIteration: + return tokenize.ENDMARKER, "" + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and (tok_string == "*" or tok_string == "**"): + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or ( + tok_type == tokenize.OP and tok_string == "," + ): + if var_arg_type == "*": + var_arg = var_name + elif var_arg_type == "**": + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if tok_type == tokenize.OP and tok_string == "=": + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if not nest_count and ( + tok_type == tokenize.ENDMARKER + or (tok_type == tokenize.OP and tok_string == ",") + ): + default_expr = isolate_expression(sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count and tok_type == tokenize.OP and tok_string == nest_type: + nest_count += 1 + elif ( + nest_count and tok_type == tokenize.OP and tok_string == unnest_type + ): + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif ( + not nest_count + and tok_type == tokenize.OP + and tok_string in ("(", "[", "{") + ): + nest_type = tok_string + nest_count = 1 + unnest_type = {"(": ")", "[": "]", "{": "}"}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow + 1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return "".join(parts) + + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import sys + import optparse + import pkg_resources + import os + + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution("Paste") + parser = optparse.OptionParser(version=coerce_text(dist), usage=_fill_command_usage) + parser.add_option( + "-o", + "--output", + dest="output", + metavar="FILENAME", + help="File to write output to (default stdout)", + ) + parser.add_option( + "--env", + dest="use_env", + action="store_true", + help="Put the environment in as top-level variables", + ) + options, args = parser.parse_args(args) + if len(args) < 1: + print("You must give a template filename") + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if "=" not in value: + print("Bad argument: %r" % value) + sys.exit(2) + name, value = value.split("=", 1) + if name.startswith("py:"): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == "-": + template_content = sys.stdin.read() + template_name = "" + else: + with open(template_name, "rb") as f: + template_content = f.read() + template = Template(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + with open(options.output, "wb") as f: + f.write(result) + else: + sys.stdout.write(result) + + +if __name__ == "__main__": + fill_command() diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 4297e109ce8a..fde0d7d4a162 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -280,6 +280,15 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) """ + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, suppress, nanstr, infstr, sign, formatter, floatmode, legacy) @@ -293,8 +302,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, if updated_opt['legacy'] == 113: updated_opt['sign'] = '-' - token = format_options.set(updated_opt) - return token + return format_options.set(updated_opt) @set_module('numpy') @@ -378,8 +386,9 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions - """ - token = set_printoptions(*args, **kwargs) + """ + token = _set_printoptions(*args, **kwargs) + try: yield get_printoptions() finally: diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 2e4d694065fb..cf000506e096 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -795,7 +795,7 @@ def add_newdoc(place, name, doc): Returns ------- y : ndarray or scalar - The ceiling of each element in `x`, with `float` dtype. + The ceiling of each element in `x`. $OUT_SCALAR_1 See Also diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 45614511ecf0..a3d8712764e0 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -207,13 +207,13 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, *, newshape=None, order=None, +def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): +def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): """ Gives a new shape to an array without changing its data. @@ -226,10 +226,6 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. order : {'C', 'F', 'A'}, optional Read the elements of ``a`` using this index order, and place the elements into the reshaped array using this index order. 'C' @@ -243,6 +239,10 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): 'A' means to read / write the elements in Fortran-like index order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. + newshape : int or tuple of ints + .. deprecated:: 2.1 + Replaced by ``shape`` argument. Retained for backward + compatibility. copy : bool, optional If ``True``, then the array data is copied. If ``None``, a copy will only be made if it's required by ``order``. For ``False`` it raises diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0d4e30ce8101..08e791789c82 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -92,15 +92,21 @@ def take( @overload def reshape( a: _ArrayLike[_SCT], - newshape: _ShapeLike, + /, + shape: _ShapeLike = ..., order: _OrderACF = ..., + *, + newshape: _ShapeLike = ..., copy: None | bool = ..., ) -> NDArray[_SCT]: ... @overload def reshape( a: ArrayLike, - newshape: _ShapeLike, + /, + shape: _ShapeLike = ..., order: _OrderACF = ..., + *, + newshape: _ShapeLike = ..., copy: None | bool = ..., ) -> NDArray[Any]: ... diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 80bb4088c812..e39e65aedea7 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -125,7 +125,7 @@ PyArray_ImportNumPyAPI(void) #define NPY_DEFAULT_INT \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) #define NPY_RAVEL_AXIS \ - (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? -1 : 32) + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) #define NPY_MAXARGS \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) #endif diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 3132b602a7c8..79ad8ad78cb2 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -379,11 +379,6 @@ typedef struct #include -// Downstream libraries like sympy would like to use I -// see https://github.com/numpy/numpy/issues/26787 -#ifdef I -#undef I -#endif #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index b49d215614ac..46ecade41ada 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -121,8 +121,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.9 support) */ - #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION + /* Use the default (increase when dropping Python 3.10 support) */ + #define NPY_FEATURE_VERSION NPY_1_21_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -130,7 +130,14 @@ #error "NPY_TARGET_VERSION higher than NumPy headers!" #elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION /* No support for irrelevant old targets, no need for error, but warn. */ - #warning "Requested NumPy target lower than supported NumPy 1.15." + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif #endif /* diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dbf1a144ed93..544af3665be7 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -681,7 +681,6 @@ c_args_common = [ # Same as NPY_CXX_FLAGS (TODO: extend for what ccompiler_opt adds) cpp_args_common = c_args_common + [ - '-D__STDC_VERSION__=0', # for compatibility with C headers ] if cc.get_argument_syntax() != 'msvc' cpp_args_common += [ @@ -829,7 +828,7 @@ foreach gen_mtargets : [ 'highway_qsort.dispatch.h', 'src/npysort/highway_qsort.dispatch.cpp', use_highway ? [ - SVE, ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault + ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault ] : [] ], [ diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 39b3de44fabe..61518d5ab56f 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1272,7 +1272,7 @@ def roll(a, shift, axis=None): "'shift' and 'axis' should be scalars or 1D sequences") shifts = {ax: 0 for ax in range(a.ndim)} for sh, ax in broadcasted: - shifts[ax] += sh + shifts[ax] += int(sh) rolls = [((slice(None), slice(None)),)] * a.ndim for ax, offset in shifts.items(): @@ -2554,17 +2554,17 @@ def array_equal(a1, a2, equal_nan=False): if a1.shape != a2.shape: return False if not equal_nan: - return builtins.bool((a1 == a2).all()) - cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) - and _dtype_cannot_hold_nan(a2.dtype)) - if cannot_have_nan: - if a1 is a2: - return True - return builtins.bool((a1 == a2).all()) + return builtins.bool((asanyarray(a1 == a2)).all()) if a1 is a2: # nan will compare equal so an array will compare equal to itself. return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + # Handling NaN values if equal_nan is True a1nan, a2nan = isnan(a1), isnan(a2) # NaN's occur at different locations @@ -2624,7 +2624,7 @@ def array_equiv(a1, a2): except Exception: return False - return builtins.bool((a1 == a2).all()) + return builtins.bool(asanyarray(a1 == a2).all()) def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index b92d58d583c0..910028dcde7c 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -9,7 +9,8 @@ #include "numpy/npy_common.h" -#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ + && !defined(__STDC_NO_ATOMICS__) // TODO: support C++ atomics as well if this header is ever needed in C++ #include #include @@ -53,15 +54,15 @@ npy_atomic_load_ptr(const void *obj) { #elif defined(MSC_ATOMICS) #if SIZEOF_VOID_P == 8 #if defined(_M_X64) || defined(_M_IX86) - return *(volatile uint64_t *)obj; + return (void *)*(volatile uint64_t *)obj; #elif defined(_M_ARM64) - return (uint64_t)__ldar64((unsigned __int64 volatile *)obj); + return (void *)__ldar64((unsigned __int64 volatile *)obj); #endif #else #if defined(_M_X64) || defined(_M_IX86) - return *(volatile uint32_t *)obj; + return (void *)*(volatile uint32_t *)obj; #elif defined(_M_ARM64) - return (uint32_t)__ldar32((unsigned __int32 volatile *)obj); + return (void *)__ldar32((unsigned __int32 volatile *)obj); #endif #endif #elif defined(GCC_ATOMICS) diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 5c745ba388cd..596e62cf8354 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -29,18 +29,6 @@ #define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ #endif -#ifdef Py_GIL_DISABLED -#define LOCK_TABLE(tb) PyMutex_Lock(&tb->mutex) -#define UNLOCK_TABLE(tb) PyMutex_Unlock(&tb->mutex) -#define INITIALIZE_LOCK(tb) memset(&tb->mutex, 0, sizeof(PyMutex)) -#else -// the GIL serializes access to the table so no need -// for locking if it is enabled -#define LOCK_TABLE(tb) -#define UNLOCK_TABLE(tb) -#define INITIALIZE_LOCK(tb) -#endif - /* * This hashing function is basically the Python tuple hash with the type * identity hash inlined. The tuple hash itself is a reduced version of xxHash. @@ -112,8 +100,6 @@ PyArrayIdentityHash_New(int key_len) res->size = 4; /* Start with a size of 4 */ res->nelem = 0; - INITIALIZE_LOCK(res); - res->buckets = PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); if (res->buckets == NULL) { PyErr_NoMemory(); @@ -206,17 +192,14 @@ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace) { - LOCK_TABLE(tb); if (value != NULL && _resize_if_necessary(tb) < 0) { /* Shrink, only if a new value is added. */ - UNLOCK_TABLE(tb); return -1; } PyObject **tb_item = find_item(tb, key); if (value != NULL) { if (tb_item[0] != NULL && tb_item[0] != value && !replace) { - UNLOCK_TABLE(tb); PyErr_SetString(PyExc_RuntimeError, "Identity cache already includes an item with this key."); return -1; @@ -230,7 +213,6 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, memset(tb_item, 0, (tb->key_len + 1) * sizeof(PyObject *)); } - UNLOCK_TABLE(tb); return 0; } @@ -238,8 +220,6 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) { - LOCK_TABLE(tb); PyObject *res = find_item(tb, key)[0]; - UNLOCK_TABLE(tb); return res; } diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index 583f3d9861a6..a4252da87aff 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -13,13 +13,6 @@ typedef struct { PyObject **buckets; npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ -#ifdef Py_GIL_DISABLED -#if PY_VERSION_HEX < 0x30d00b3 -#error "GIL-disabled builds require Python 3.13.0b3 or newer" -#else - PyMutex mutex; -#endif -#endif } PyArrayIdentityHash; diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 01341acbbef0..0f1d42a10a3f 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 01341acbbef0ca85cf2fa31b63307ddf4d9a87fb +Subproject commit 0f1d42a10a3f594ad48894912396df31b2c2d55d diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 5975f5ef76c3..68b0fdebffb1 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 5975f5ef76c3e4364844d869454046f0f8420ef8 +Subproject commit 68b0fdebffb14f3b8473fed1c33ce368efc431e7 diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 214833737792..ae7a8ec1506c 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -177,7 +177,6 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other) /* This is a back-compat fallback to usually do the right thing... */ PyArray_DTypeMeta *uint8_dt = &PyArray_UInt8DType; PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, uint8_dt); - Py_DECREF(uint8_dt); if (res == NULL) { PyErr_Clear(); } @@ -477,7 +476,6 @@ npy_find_descr_for_scalar( /* If the DType doesn't know the scalar type, guess at default. */ !NPY_DT_CALL_is_known_scalar_type(common, Py_TYPE(scalar))) { if (common->singleton != NULL) { - Py_INCREF(common->singleton); res = common->singleton; Py_INCREF(res); } diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index b7e7c9948ce1..396a7adb3148 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -238,7 +238,11 @@ PyDataMem_NEW(size_t size) assert(size != 0); result = malloc(size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -251,7 +255,11 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) void *result; result = calloc(nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -274,11 +282,13 @@ PyDataMem_RENEW(void *ptr, size_t size) void *result; assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = realloc(ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } @@ -362,7 +372,11 @@ PyDataMem_UserNEW(size_t size, PyObject *mem_handler) } assert(size != 0); result = handler->allocator.malloc(handler->allocator.ctx, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -376,7 +390,11 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) return NULL; } result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -406,11 +424,13 @@ PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) } assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 69da09875bfb..0cffcc6bab22 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -6,6 +6,7 @@ #include #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "lowlevel_strided_loops.h" #include "numpy/arrayobject.h" @@ -224,24 +225,23 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) PyObject *DType; if (pytype == &PyArray_Type) { - DType = Py_None; + DType = Py_NewRef(Py_None); } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatDType; + DType = Py_NewRef((PyObject *)&PyArray_PyFloatDType); } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyLongDType; + DType = Py_NewRef((PyObject *)&PyArray_PyLongDType); } else { - DType = PyDict_GetItem(_global_pytype_to_type_dict, - (PyObject *)pytype); + int res = PyDict_GetItemRef(_global_pytype_to_type_dict, + (PyObject *)pytype, (PyObject **)&DType); - if (DType == NULL) { - /* the python type is not known */ + if (res <= 0) { + /* the python type is not known or an error was set */ return NULL; } } - Py_INCREF(DType); assert(DType == Py_None || PyObject_TypeCheck(DType, (PyTypeObject *)&PyArrayDTypeMeta_Type)); return (PyArray_DTypeMeta *)DType; } diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 655122ff7f09..9a7351e313ae 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -62,7 +62,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type) { - int itemsize; + npy_intp itemsize; if (string_type == NPY_STRING) { PyObject *temp = PyObject_Str(obj); @@ -75,6 +75,12 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT) { + /* We can allow this, but should audit code paths before we do. */ + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } } else if (string_type == NPY_UNICODE) { PyObject *temp = PyObject_Str(obj); @@ -86,6 +92,11 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } itemsize *= 4; /* convert UCS4 codepoints to bytes */ } else { diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 550d3e253868..e83ef6076c39 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -49,7 +49,7 @@ */ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; -static NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; +static NPY_TLS int npy_promotion_state = NPY_USE_WEAK_PROMOTION; NPY_NO_EXPORT int get_npy_promotion_state() { @@ -897,18 +897,29 @@ can_cast_pyscalar_scalar_to( } /* - * For all other cases we use the default dtype. + * For all other cases we need to make a bit of a dance to find the cast + * safety. We do so by finding the descriptor for the "scalar" (without + * a value; for parametric user dtypes a value may be needed eventually). */ - PyArray_Descr *from; + PyArray_DTypeMeta *from_DType; + PyArray_Descr *default_dtype; if (flags & NPY_ARRAY_WAS_PYTHON_INT) { - from = PyArray_DescrFromType(NPY_LONG); + default_dtype = PyArray_DescrNewFromType(NPY_INTP); + from_DType = &PyArray_PyLongDType; } else if (flags & NPY_ARRAY_WAS_PYTHON_FLOAT) { - from = PyArray_DescrFromType(NPY_DOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_FLOAT64); + from_DType = &PyArray_PyFloatDType; } else { - from = PyArray_DescrFromType(NPY_CDOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_COMPLEX128); + from_DType = &PyArray_PyComplexDType; } + + PyArray_Descr *from = npy_find_descr_for_scalar( + NULL, default_dtype, from_DType, NPY_DTYPE(to)); + Py_DECREF(default_dtype); + int res = PyArray_CanCastTypeTo(from, to, casting); Py_DECREF(from); return res; @@ -2810,6 +2821,11 @@ cast_to_string_resolve_descriptors( return -1; } if (dtypes[1]->type_num == NPY_UNICODE) { + if (size > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", size); + return -1; + } size *= 4; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 5c1a78daf0c5..c659dfa356cd 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2234,8 +2234,8 @@ PyArray_FromInterface(PyObject *origin) Py_SETREF(dtype, new_dtype); } } + Py_DECREF(descr); } - Py_DECREF(descr); } Py_CLEAR(attr); diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index a47a71d39196..1e8e6e337d0c 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -273,8 +273,16 @@ _convert_from_tuple(PyObject *obj, int align) if (PyDataType_ISUNSIZED(type)) { /* interpret next item as a typesize */ int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - - if (error_converting(itemsize)) { + if (type->type_num == NPY_UNICODE) { + if (itemsize > NPY_MAX_INT / 4) { + itemsize = -1; + } + else { + itemsize *= 4; + } + } + if (itemsize < 0) { + /* Error may or may not be set by PyIntAsInt. */ PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type tuple"); Py_DECREF(type); @@ -284,12 +292,8 @@ _convert_from_tuple(PyObject *obj, int align) if (type == NULL) { return NULL; } - if (type->type_num == NPY_UNICODE) { - type->elsize = itemsize << 2; - } - else { - type->elsize = itemsize; - } + + type->elsize = itemsize; return type; } else if (type->metadata && (PyDict_Check(val) || PyDictProxy_Check(val))) { @@ -1855,7 +1859,10 @@ _convert_from_str(PyObject *obj, int align) */ case NPY_UNICODELTR: check_num = NPY_UNICODE; - elsize <<= 2; + if (elsize > (NPY_MAX_INT / 4)) { + goto fail; + } + elsize *= 4; break; case NPY_VOIDLTR: diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 244b47250786..6adb00d16925 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -494,12 +494,14 @@ string_discover_descr_from_pyobject( itemsize = PyUnicode_GetLength(obj); } if (itemsize != -1) { - if (cls->type_num == NPY_UNICODE) { - itemsize *= 4; - } - if (itemsize > NPY_MAX_INT) { + if (itemsize > NPY_MAX_INT || ( + cls->type_num == NPY_UNICODE && itemsize > NPY_MAX_INT / 4)) { PyErr_SetString(PyExc_TypeError, "string too large to store inside array."); + return NULL; + } + if (cls->type_num == NPY_UNICODE) { + itemsize *= 4; } PyArray_Descr *res = PyArray_DescrNewFromType(cls->type_num); if (res == NULL) { @@ -1256,22 +1258,22 @@ dtypemeta_wrap_legacy_descriptor( static PyObject * -dtypemeta_get_abstract(PyArray_DTypeMeta *self) { +dtypemeta_get_abstract(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_abstract(self)); } static PyObject * -dtypemeta_get_legacy(PyArray_DTypeMeta *self) { +dtypemeta_get_legacy(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_legacy(self)); } static PyObject * -dtypemeta_get_parametric(PyArray_DTypeMeta *self) { +dtypemeta_get_parametric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_parametric(self)); } static PyObject * -dtypemeta_get_is_numeric(PyArray_DTypeMeta *self) { +dtypemeta_get_is_numeric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_numeric(self)); } diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 4d98ce0c350c..f3ce35f3092f 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -785,21 +785,21 @@ static NPY_GCC_OPT_3 inline int npy_fastrepeat_impl( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { npy_intp i, j, k; for (i = 0; i < n_outer; i++) { for (j = 0; j < n; j++) { npy_intp tmp = broadcast ? counts[0] : counts[j]; for (k = 0; k < tmp; k++) { - if (!needs_refcounting) { + if (!needs_custom_copy) { memcpy(new_data, old_data, chunk); } else { char *data[2] = {old_data, new_data}; npy_intp strides[2] = {elsize, elsize}; - if (cast_info.func(&cast_info.context, data, &nel, - strides, cast_info.auxdata) < 0) { + if (cast_info->func(&cast_info->context, data, &nel, + strides, cast_info->auxdata) < 0) { return -1; } } @@ -811,48 +811,53 @@ npy_fastrepeat_impl( return 0; } + +/* + * Helper to allow the compiler to specialize for all direct element copy + * cases (e.g. all numerical dtypes). + */ static NPY_GCC_OPT_3 int npy_fastrepeat( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { - if (!needs_refcounting) { + if (!needs_custom_copy) { if (chunk == 1) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 2) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 4) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 8) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 16) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 32) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } } return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, elsize, - cast_info, needs_refcounting); + cast_info, needs_custom_copy); } @@ -872,7 +877,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) char *new_data, *old_data; NPY_cast_info cast_info; NPY_ARRAYMETHOD_FLAGS flags; - int needs_refcounting; repeats = (PyArrayObject *)PyArray_ContiguousFromAny(op, NPY_INTP, 0, 1); if (repeats == NULL) { @@ -897,7 +901,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) aop = (PyArrayObject *)ap; n = PyArray_DIM(aop, axis); NPY_cast_info_init(&cast_info); - needs_refcounting = PyDataType_REFCHK(PyArray_DESCR(aop)); if (!broadcast && PyArray_SIZE(repeats) != n) { PyErr_Format(PyExc_ValueError, @@ -947,16 +950,18 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) n_outer *= PyArray_DIMS(aop)[i]; } - if (needs_refcounting) { + int needs_custom_copy = 0; + if (PyDataType_REFCHK(PyArray_DESCR(ret))) { + needs_custom_copy = 1; if (PyArray_GetDTypeTransferFunction( - 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(aop), 0, + 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(ret), 0, &cast_info, &flags) < 0) { goto fail; } } if (npy_fastrepeat(n_outer, n, nel, chunk, broadcast, counts, new_data, - old_data, elsize, cast_info, needs_refcounting) < 0) { + old_data, elsize, &cast_info, needs_custom_copy) < 0) { goto fail; } diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 4a6c1f093769..62204ce762f8 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1667,7 +1667,7 @@ array_subscript(PyArrayObject *self, PyObject *op) if (PyArray_GetDTypeTransferFunction(1, itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + PyArray_DESCR(self), PyArray_DESCR(mit->extra_op), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto finish; } @@ -2034,7 +2034,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) goto fail; } - int allocated_array = 0; if (tmp_arr == NULL) { /* Fill extra op, need to swap first */ tmp_arr = mit->extra_op; @@ -2048,7 +2047,11 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (PyArray_CopyObject(tmp_arr, op) < 0) { goto fail; } - allocated_array = 1; + /* + * In this branch we copy directly from a newly allocated array which + * may have a new descr: + */ + descr = PyArray_DESCR(tmp_arr); } if (PyArray_MapIterCheckIndices(mit) < 0) { @@ -2096,8 +2099,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, - allocated_array ? PyArray_DESCR(mit->extra_op) : PyArray_DESCR(self), - PyArray_DESCR(self), + descr, PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 4a8e1ea4579e..2a950d6ca5d1 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1120,7 +1120,14 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw &func, &types, &args, &kwargs)) { return NULL; } - + if (!PyTuple_CheckExact(args)) { + PyErr_SetString(PyExc_TypeError, "args must be a tuple."); + return NULL; + } + if (!PyDict_CheckExact(kwargs)) { + PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); + return NULL; + } types = PySequence_Fast( types, "types argument to ndarray.__array_function__ must be iterable"); diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index 427dd3d876bc..ab1a540cb283 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -1315,8 +1315,10 @@ npyiter_check_casting(int nop, PyArrayObject **op, printf("\n"); #endif /* If the types aren't equivalent, a cast is necessary */ - if (op[iop] != NULL && !PyArray_EquivTypes(PyArray_DESCR(op[iop]), - op_dtype[iop])) { + npy_intp view_offset = NPY_MIN_INTP; + if (op[iop] != NULL && !(PyArray_SafeCast( + PyArray_DESCR(op[iop]), op_dtype[iop], &view_offset, + NPY_NO_CASTING, 1) && view_offset == 0)) { /* Check read (op -> temp) casting */ if ((op_itflags[iop] & NPY_OP_ITFLAG_READ) && !PyArray_CanCastArrayTo(op[iop], diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 0da40cbdc60e..571b50372684 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -83,14 +83,16 @@ PyArray_ZeroContiguousBuffer( if (get_fill_zero_loop( NULL, descr, aligned, descr->elsize, &(zero_info.func), &(zero_info.auxdata), &flags_unused) < 0) { - goto fail; + return -1; } } else { + assert(zero_info.func == NULL); + } + if (zero_info.func == NULL) { /* the multiply here should never overflow, since we already checked if the new array size doesn't overflow */ memset(data, 0, size*stride); - NPY_traverse_info_xfree(&zero_info); return 0; } @@ -98,10 +100,6 @@ PyArray_ZeroContiguousBuffer( NULL, descr, data, size, stride, zero_info.auxdata); NPY_traverse_info_xfree(&zero_info); return res; - - fail: - NPY_traverse_info_xfree(&zero_info); - return -1; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 8fe13d0d3532..214c5c499ad8 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -6,6 +6,7 @@ #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "alloc.h" #include @@ -58,13 +59,16 @@ create_conv_funcs( PyObject *key, *value; Py_ssize_t pos = 0; + int error = 0; + Py_BEGIN_CRITICAL_SECTION(converters); while (PyDict_Next(converters, &pos, &key, &value)) { Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, "keys of the converters dictionary must be integers; " "got %.100R", key); - goto error; + error = 1; + break; } if (usecols != NULL) { /* @@ -92,7 +96,8 @@ create_conv_funcs( PyErr_Format(PyExc_ValueError, "converter specified for column %zd, which is invalid " "for the number of fields %zd.", column, num_fields); - goto error; + error = 1; + break; } if (column < 0) { column += num_fields; @@ -102,11 +107,18 @@ create_conv_funcs( PyErr_Format(PyExc_TypeError, "values of the converters dictionary must be callable, " "but the value associated with key %R is not", key); - goto error; + error = 1; + break; } Py_INCREF(value); conv_funcs[column] = value; } + Py_END_CRITICAL_SECTION(); + + if (error) { + goto error; + } + return conv_funcs; error: diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 110e2f40ab32..e76509ad7db2 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -976,6 +976,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } + int error_res = 0; + PyObject *all_dtypes; + PyArrayMethodObject *method; + Py_BEGIN_CRITICAL_SECTION((PyObject *)ufunc); int current_promotion_state = get_npy_promotion_state(); if (force_legacy_promotion && legacy_promotion_is_possible @@ -989,42 +993,51 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, int cacheable = 1; /* unused, as we modify the original `op_dtypes` */ if (legacy_promote_using_legacy_type_resolver(ufunc, ops, signature, op_dtypes, &cacheable, NPY_FALSE) < 0) { - goto handle_error; + error_res = -1; } } - /* Pause warnings and always use "new" path */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); - PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, legacy_promotion_is_possible); - set_npy_promotion_state(current_promotion_state); + PyObject *info = NULL; + if (error_res == 0) { + /* Pause warnings and always use "new" path */ + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); + info = promote_and_get_info_and_ufuncimpl(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); + set_npy_promotion_state(current_promotion_state); - if (info == NULL) { - goto handle_error; + if (info == NULL) { + error_res = -1; + } } - PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if (error_res == 0) { + method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + all_dtypes = PyTuple_GET_ITEM(info, 0); - /* If necessary, check if the old result would have been different */ - if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) - && (force_legacy_promotion || promoting_pyscalars) - && npy_give_promotion_warnings()) { - PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; - for (int i = 0; i < nargs; i++) { - check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( - all_dtypes, i); - } - /* Before calling to the legacy promotion, pretend that is the state: */ - set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); - int res = legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, check_dtypes, NULL, NPY_TRUE); - /* Reset the promotion state: */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); - if (res < 0) { - goto handle_error; + /* If necessary, check if the old result would have been different */ + if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) + && (force_legacy_promotion || promoting_pyscalars) + && npy_give_promotion_warnings()) { + PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; + for (int i = 0; i < nargs; i++) { + check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( + all_dtypes, i); + } + /* Before calling to the legacy promotion, pretend that is the state: */ + set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); + int res = legacy_promote_using_legacy_type_resolver(ufunc, + ops, signature, check_dtypes, NULL, NPY_TRUE); + /* Reset the promotion state: */ + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); + if (res < 0) { + error_res = 0; + } } } + Py_END_CRITICAL_SECTION(); + if (error_res < 0) { + goto handle_error; + } /* * In certain cases (only the logical ufuncs really), the loop we found may diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index cd28e4405b6d..3b7b65e97fab 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1369,7 +1369,7 @@ static PyObject * */ PyObject *ret; npy_float64 arg1, arg2, other_val; - @type@ other_val_conv; + @type@ other_val_conv = 0; int is_forward; if (Py_TYPE(a) == &Py@Name@ArrType_Type) { diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 61abdcb5ad19..96c1e2d30140 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -9,6 +9,7 @@ #include #include +#include #include diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 2bc4ce20acd6..0e28240ee5f0 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -643,6 +643,20 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { + npy_intp result_itemsize = given_descrs[0]->elsize + given_descrs[1]->elsize; + + /* NOTE: elsize can fit more than MAX_INT, but some code may still use ints */ + if (result_itemsize > NPY_MAX_INT || result_itemsize < 0) { + npy_intp length = result_itemsize; + if (given_descrs[0]->type == NPY_UNICODE) { + length /= 4; + } + PyErr_Format(PyExc_TypeError, + "addition result string of length %zd is too large to store inside array.", + length); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -650,11 +664,14 @@ string_addition_resolve_descriptors( loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2] = PyArray_DescrNew(loop_descrs[0]); if (loop_descrs[2] == NULL) { + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2]->elsize += loop_descrs[1]->elsize; diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ed9f62077589..8e25b3968cfe 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1598,6 +1598,20 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, return -1; } +static int +string_center_ljust_rjust_promoter( + PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[3] = NPY_DT_NewRef(&PyArray_StringDType); + return 0; +} + static NPY_CASTING center_ljust_rjust_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), @@ -2595,10 +2609,17 @@ init_stringdtype_ufuncs(PyObject *umath) "find", "rfind", "index", "rindex", "count", }; - PyArray_DTypeMeta *findlike_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_DefaultIntDType, + PyArray_DTypeMeta *findlike_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, }; find_like_function *findlike_functions[] = { @@ -2618,11 +2639,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, findlike_names[i], - findlike_promoter_dtypes, - 5, string_findlike_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, findlike_names[i], + findlike_promoter_dtypes[j], + 5, string_findlike_promoter) < 0) { + return -1; + } } } @@ -2636,10 +2658,17 @@ init_stringdtype_ufuncs(PyObject *umath) "startswith", "endswith", }; - PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_BoolDType, + PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, }; static STARTPOSITION startswith_endswith_startposition[] = { @@ -2656,11 +2685,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, startswith_endswith_names[i], - startswith_endswith_promoter_dtypes, - 5, string_startswith_endswith_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, startswith_endswith_names[i], + startswith_endswith_promoter_dtypes[j], + 5, string_startswith_endswith_promoter) < 0) { + return -1; + } } } @@ -2732,24 +2762,38 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *replace_promoter_pyint_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_StringDType, - }; - - if (add_promoter(umath, "_replace", replace_promoter_pyint_dtypes, 5, - string_replace_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *replace_promoter_int64_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_Int64DType, &PyArray_StringDType, + PyArray_DTypeMeta *replace_promoter_unicode_dtypes[6][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, }; - if (add_promoter(umath, "_replace", replace_promoter_int64_dtypes, 5, - string_replace_promoter) < 0) { - return -1; + for (int j=0; j<6; j++) { + if (add_promoter(umath, "_replace", replace_promoter_unicode_dtypes[j], 5, + string_replace_promoter) < 0) { + return -1; + } } PyArray_DTypeMeta *expandtabs_dtypes[] = { @@ -2767,9 +2811,9 @@ init_stringdtype_ufuncs(PyObject *umath) } PyArray_DTypeMeta *expandtabs_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_StringDType + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType }; if (add_promoter(umath, "_expandtabs", expandtabs_promoter_dtypes, @@ -2801,30 +2845,33 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_StringDType, - &PyArray_StringDType, - }; - - if (add_promoter(umath, center_ljust_rjust_names[i], - int_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *unicode_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_UnicodeDType, - &PyArray_StringDType, + PyArray_DTypeMeta *promoter_dtypes[3][4] = { + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, }; - if (add_promoter(umath, center_ljust_rjust_names[i], - unicode_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; + for (int j=0; j<3; j++) { + if (add_promoter(umath, center_ljust_rjust_names[i], + promoter_dtypes[j], 4, + string_center_ljust_rjust_promoter) < 0) { + return -1; + } } } @@ -2840,13 +2887,13 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { + PyArray_DTypeMeta *zfill_promoter_dtypes[] = { &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, + &PyArray_IntAbstractDType, &PyArray_StringDType, }; - if (add_promoter(umath, "_zfill", int_promoter_dtypes, 3, + if (add_promoter(umath, "_zfill", zfill_promoter_dtypes, 3, string_multiply_promoter) < 0) { return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6bd02b0fec87..fa76455243dd 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2916,8 +2916,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - needs_api = PyDataType_REFCHK(descrs[0]); - if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(count); } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 0820411840ea..4f732fdcfdbc 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -669,20 +669,27 @@ def center(a, width, fillchar=' '): array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype=' 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + pytest.skip("python failed to create huge string") + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 5e52039864b7..9495321e2c20 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -17,6 +17,7 @@ from typing import ( Protocol, NoReturn, ) +from typing_extensions import LiteralString from numpy import ufunc, _CastingKind, _OrderKACF from numpy.typing import NDArray @@ -32,9 +33,9 @@ _3Tuple = tuple[_T, _T, _T] _4Tuple = tuple[_T, _T, _T, _T] _NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", bound=Any, covariant=True) -_NameType = TypeVar("_NameType", bound=str, covariant=True) -_Signature = TypeVar("_Signature", bound=str, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) class _SupportsArrayUFunc(Protocol): diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 4763f41ad326..ac0c206f96cf 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -24,7 +24,13 @@ import distutils.cygwinccompiler from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version + +try: + from distutils.msvccompiler import get_build_version as get_build_msvc_version +except ImportError: + def get_build_msvc_version(): + return None + from distutils.errors import UnknownFileError from numpy.distutils.misc_util import (msvc_runtime_library, msvc_runtime_version, diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 68b56c5a640c..77cf6ee2b167 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -36,16 +36,15 @@ 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', 'islogicalfunction', 'islong_complex', 'islong_double', 'islong_doublefunction', 'islong_long', 'islong_longfunction', - 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', - 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', - 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', - 'issubroutine', 'get_f2py_modulename', - 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', - 'isunsigned_chararray', 'isunsigned_long_long', - 'isunsigned_long_longarray', 'isunsigned_short', - 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', - 'getuseblocks', 'process_f2cmap_dict' + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict', 'containscommon' ] @@ -518,6 +517,15 @@ def isprivate(var): return 'attrspec' in var and 'private' in var['attrspec'] +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + def hasinitvalue(var): return '=' in var diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index db53beaf616b..b1cd15320657 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -97,9 +97,6 @@ def dadd(line, s=doc): usenames = getuseblocks(pymod) for m in findf90modules(pymod): - contains_functions_or_subroutines = any( - item for item in m["body"] if item["block"] in ["function", "subroutine"] - ) sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ m['name']], [] sargsp = [] @@ -110,13 +107,19 @@ def dadd(line, s=doc): notvars.append(b['name']) for n in m['vars'].keys(): var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): onlyvars.append(n) mfargs.append(n) outmess('\t\tConstructing F90 module support for "%s"...\n' % (m['name'])) - if m['name'] in usenames and not contains_functions_or_subroutines: - outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + + # gh-25186 + if m['name'] in usenames and containscommon(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue if onlyvars: outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 000000000000..07adce591f35 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 000000000000..b7fb95b010a6 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/regression/datonly.f90 b/numpy/f2py/tests/src/regression/datonly.f90 new file mode 100644 index 000000000000..67fc4aca82e3 --- /dev/null +++ b/numpy/f2py/tests/src/regression/datonly.f90 @@ -0,0 +1,17 @@ +module datonly + implicit none + integer, parameter :: max_value = 100 + real, dimension(:), allocatable :: data_array +end module datonly + +module dat + implicit none + integer, parameter :: max_= 1009 +end module dat + +subroutine simple_subroutine(ain, aout) + use dat, only: max_ + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + max_ +end subroutine simple_subroutine diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 009ae3365cd5..436e0c700017 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -5,6 +5,37 @@ from numpy.testing import IS_PYPY +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + @pytest.mark.slow class TestModuleDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index e11ed1a0efa3..cbc81508ae42 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -24,6 +24,18 @@ def test_inout(self): assert np.allclose(x, [3, 1, 2]) +class TestDataOnlyMultiModule(util.F2PyTest): + # Check that modules without subroutines work + sources = [util.getpath("tests", "src", "regression", "datonly.f90")] + + @pytest.mark.slow + def test_mdat(self): + assert self.module.datonly.max_value == 100 + assert self.module.dat.max_ == 1009 + int_in = 5 + assert self.module.simple_subroutine(5) == 1014 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 9cad71a9cf5c..61e20c13ffc8 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -59,7 +59,6 @@ def check_language(lang, code_snippet=None): return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) - return False fortran77_code = ''' diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 2199797ad900..4edeecc075ad 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -1401,7 +1401,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): a = asarray(a) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm, out=out) - for ii in range(len(axes)-1): + for ii in range(len(axes)-2, -1, -1): a = fft(a, s[ii], axes[ii], norm, out=out) return a diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index d1e4da2eb831..fc6592e4f4f6 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -307,6 +307,14 @@ def test_rfftn(self): np.fft.rfftn(x, norm="ortho"), atol=1e-6) assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) def test_irfftn(self): x = random((30, 20, 10)) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index d90070e19e8c..840b501bacae 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4870,6 +4870,13 @@ def _quantile( # returns 2 instead of 1 because 0.4 is not binary representable. if quantiles.dtype.kind == "f": cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 def find_cdf_1d(arr, cdf): indices = np.searchsorted(cdf, quantiles, side="left") diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 4096976871d7..c4690a4304bd 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -2,6 +2,7 @@ import builtins from collections.abc import Callable, Sequence from typing import ( Any, + TypeAlias, overload, TypeVar, Literal as L, @@ -16,6 +17,7 @@ from numpy import ( int_, intp, float64, + complex128, signedinteger, floating, complexfloating, @@ -29,6 +31,7 @@ from numpy._typing import ( ArrayLike, _ArrayLike, NDArray, + _SupportsArray, _SupportsArrayFunc, _ArrayLikeInt_co, _ArrayLikeFloat_co, @@ -164,44 +167,220 @@ def vander( increasing: bool = ..., ) -> NDArray[object_]: ... + +_Int_co: TypeAlias = np.integer[Any] | np.bool +_Float_co: TypeAlias = np.floating[Any] | _Int_co +_Number_co: TypeAlias = np.number[Any] | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] +_ArrayLike2D: TypeAlias = ( + _SupportsArray[np.dtype[_SCT]] + | Sequence[_ArrayLike1D[_SCT]] +) + +_ArrayLike1DInt_co = ( + _SupportsArray[np.dtype[_Int_co]] + | Sequence[int | _Int_co] +) +_ArrayLike1DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[float | int | _Float_co] +) +_ArrayLike2DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[_ArrayLike1DFloat_co] +) +_ArrayLike1DNumber_co = ( + _SupportsArray[np.dtype[_Number_co]] + | Sequence[int | float | complex | _Number_co] +) + +_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any]) +_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any]) +_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) + @overload -def histogram2d( # type: ignore[misc] - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, +def histogram2d( + x: _ArrayLike1D[_SCT_complex], + y: _ArrayLike1D[_SCT_complex | _Float_co], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[floating[Any]], - NDArray[floating[Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... @overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, + x: _ArrayLike1D[_SCT_complex | _Float_co], + y: _ArrayLike1D[_SCT_complex], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[complexfloating[Any, Any]], - NDArray[complexfloating[Any, Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... -@overload # TODO: Sort out `bins` +@overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - bins: Sequence[_ArrayLikeInt_co], - range: None | _ArrayLikeFloat_co = ..., + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact | _Int_co], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact | _Int_co], + y: _ArrayLike1D[_SCT_inexact], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[complex128 | float64], + NDArray[complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_SCT_number_co] | Sequence[_ArrayLike1D[_SCT_number_co]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co], + NDArray[_SCT_number_co], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | _SCT_inexact], + NDArray[_SCT_number_co | _SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | float64], + NDArray[_SCT_number_co | float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | complex128 | float64], + NDArray[_SCT_number_co | complex128 | float64] , +]: ... + +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.bool], + NDArray[np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.int_ | np.bool], + NDArray[np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.float64 | np.int_ | np.bool], + NDArray[np.float64 | np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex | float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[Any], - NDArray[Any], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], ]: ... # NOTE: we're assuming/demanding here the `mask_func` returns diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 3fc5a32d33a6..3f026a2ce79c 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -21,12 +21,12 @@ def fix(x, out=None): Round to nearest integer towards zero. Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. + The rounded values have the same data-type as the input. Parameters ---------- x : array_like - An array of floats to be rounded + An array to be rounded out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a @@ -35,12 +35,12 @@ def fix(x, out=None): Returns ------- out : ndarray of floats - A float array with the same dimensions as the input. - If second argument is not supplied then a float array is returned + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned with the rounded values. If a second argument is supplied the result is stored there. - The return value `out` is then a reference to that array. + The return value ``out`` is then a reference to that array. See Also -------- @@ -53,7 +53,7 @@ def fix(x, out=None): >>> np.fix(3.14) 3.0 >>> np.fix(3) - 3.0 + 3 >>> np.fix([2.1, 2.9, -2.1, -2.9]) array([ 2., 2., -2., -2.]) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 8e14dfe4bcab..a90403459848 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -271,6 +271,8 @@ def dtype_to_descr(dtype): warnings.warn("metadata on a dtype is not saved to an npy/npz. " "Use another format (such as pickle) to store it.", UserWarning, stacklevel=2) + dtype = new_dtype + if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 31352864b7e2..bb262e048cba 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -998,32 +998,30 @@ def test_header_growth_axis(): assert len(fp.getvalue()) == expected_header_length -@pytest.mark.parametrize('dt, fail', [ - (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', - metadata={'some': 'stuff'})]}), True), - (np.dtype(int, metadata={'some': 'stuff'}), False), - (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False), +@pytest.mark.parametrize('dt', [ + np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', + metadata={'some': 'stuff'})]}), + np.dtype(int, metadata={'some': 'stuff'}), + np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), # recursive: metadata on the field of a dtype - (np.dtype({'names': ['a', 'b'], 'formats': [ + np.dtype({'names': ['a', 'b'], 'formats': [ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) - ]}), False) + ]}), ]) @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), reason="PyPy bug in error formatting") -def test_metadata_dtype(dt, fail): +def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() with assert_warns(UserWarning): np.save(buf, arr) buf.seek(0) - if fail: - with assert_raises(ValueError): - np.load(buf) - else: - arr2 = np.load(buf) - # BUG: assert_array_equal does not check metadata - from numpy.lib._utils_impl import drop_metadata - assert_array_equal(arr, arr2) - assert drop_metadata(arr.dtype) is not arr.dtype - assert drop_metadata(arr2.dtype) is arr2.dtype + + # Loading should work (metadata was stripped): + arr2 = np.load(buf) + # BUG: assert_array_equal does not check metadata + from numpy.lib._utils_impl import drop_metadata + assert_array_equal(arr, arr2) + assert drop_metadata(arr.dtype) is not arr.dtype + assert drop_metadata(arr2.dtype) is arr2.dtype diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index bc3ce6409f1c..b51564619051 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4010,6 +4010,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + def test_quantile_weights_raises_negative_weights(self): y = [1, 2] w = [-0.5, 1] diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index e2f72ac90c92..644912d941e3 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -43,7 +43,7 @@ def _compare_dtypes(dt1, dt2): assert dt_m.metadata is None assert dt_m['l1'].metadata is None assert dt_m['l1']['l2'].metadata is None - + # alignment dt = np.dtype([('x', '=0.13.1 Cython>=3.0.6 ninja -spin==0.8 +spin==0.13 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index e134b0dae82e..7b67bcb846a7 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin +spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.3 +scipy-openblas32==0.3.27.44.6 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index f688bfb6eb3a..3f6c2c1b77ae 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin +spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.3 -scipy-openblas64==0.3.27.44.3 +scipy-openblas32==0.3.27.44.6 +scipy-openblas64==0.3.27.44.6 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 79de7a9f0802..74ef448182af 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -16,3 +16,6 @@ pickleshare # needed to build release notes towncrier toml + +# for doctests, also needs pytz which is in test_requirements +scipy-doctest diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index ec7827b7e50e..5c19c3a914ec 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,8 +1,7 @@ Cython wheel==0.38.1 -#setuptools==65.5.1 ; python_version < '3.12' -#setuptools ; python_version >= '3.12' -setuptools +setuptools==65.5.1 ; python_version < '3.12' +setuptools ; python_version >= '3.12' hypothesis==6.104.1 pytest==7.4.0 pytz==2023.3.post1 diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index f63274e5af3f..1516725593df 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -65,7 +65,7 @@ macosx_arm64_task: matrix: - env: - CIBW_BUILD: cp310-* cp311 + CIBW_BUILD: cp310-* cp311-* - env: CIBW_BUILD: cp312-* cp313-* - env: @@ -78,7 +78,7 @@ macosx_arm64_task: build_script: | brew install micromamba gfortran - micromamba shell init -s bash -p ~/micromamba + micromamba shell init -s bash --root-prefix ~/micromamba source ~/.bash_profile micromamba create -n numpydev diff --git a/tools/download-wheels.py b/tools/download-wheels.py index e5753eb2148c..54dbdf1200a8 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -56,15 +56,20 @@ def get_wheel_names(version): The release version. For instance, "1.18.3". """ + ret = [] http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}") - index_url = f"{STAGING_URL}/files" - index_html = http.request("GET", index_url) - soup = BeautifulSoup(index_html.data, "html.parser") - return soup.find_all(string=tmpl) + # TODO: generalize this by searching for `showing 1 of N` and + # looping over N pages, starting from 1 + for i in range(1, 3): + index_url = f"{STAGING_URL}/files?page={i}" + index_html = http.request("GET", index_url) + soup = BeautifulSoup(index_html.data, "html.parser") + ret += soup.find_all(string=tmpl) + return ret -def download_wheels(version, wheelhouse): +def download_wheels(version, wheelhouse, test=False): """Download release wheels. The release wheels for the given NumPy version are downloaded @@ -86,8 +91,15 @@ def download_wheels(version, wheelhouse): wheel_path = os.path.join(wheelhouse, wheel_name) with open(wheel_path, "wb") as f: with http.request("GET", wheel_url, preload_content=False,) as r: - print(f"{i + 1:<4}{wheel_name}") - shutil.copyfileobj(r, f) + info = r.info() + length = int(info.get('Content-Length', '0')) + if length == 0: + length = 'unknown size' + else: + length = f"{(length / 1024 / 1024):.2f}MB" + print(f"{i + 1:<4}{wheel_name} {length}") + if not test: + shutil.copyfileobj(r, f) print(f"\nTotal files downloaded: {len(wheel_names)}") @@ -101,6 +113,10 @@ def download_wheels(version, wheelhouse): default=os.path.join(os.getcwd(), "release", "installers"), help="Directory in which to store downloaded wheels\n" "[defaults to /release/installers]") + parser.add_argument( + "-t", "--test", + action = 'store_true', + help="only list available wheels, do not download") args = parser.parse_args() @@ -110,4 +126,4 @@ def download_wheels(version, wheelhouse): f"{wheelhouse} wheelhouse directory is not present." " Perhaps you need to use the '-w' flag to specify one.") - download_wheels(args.version, wheelhouse) + download_wheels(args.version, wheelhouse, test=args.test) diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index dbebe483b4ab..810e265d4dec 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -1,5 +1,5 @@ [pycodestyle] -max_line_length = 79 +max_line_length = 88 statistics = True ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 exclude = numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index c8c26cbcd3d6..747446648c8b 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -1989,7 +1989,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY1[ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) @@ -2018,7 +2018,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) @@ -2047,7 +2047,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) @@ -2065,7 +2065,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) @@ -2083,7 +2083,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY]) @@ -2101,7 +2101,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /*****************************/ @@ -2126,7 +2126,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) @@ -2147,7 +2147,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2169,7 +2169,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) @@ -2191,7 +2191,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2213,7 +2213,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) @@ -2235,7 +2235,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2259,7 +2259,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2283,7 +2283,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2307,7 +2307,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2331,7 +2331,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2356,7 +2356,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2381,7 +2381,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2406,7 +2406,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2431,7 +2431,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /*************************************/ @@ -2465,7 +2465,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1) @@ -2495,7 +2495,7 @@ PyObject* cap = PyCapsule_New((void*)(*$2), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2526,7 +2526,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2) @@ -2557,7 +2557,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2588,7 +2588,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2) @@ -2619,7 +2619,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2652,7 +2652,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2685,7 +2685,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2718,7 +2718,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2751,7 +2751,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2785,7 +2785,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2819,7 +2819,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2853,7 +2853,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2887,7 +2887,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /**************************************/ diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index a5b5ae5c22e6..021b4b0289e7 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 1ebd5663d02c..81889131cfa7 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -4,7 +4,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -40,7 +40,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index f8eaaf1cae25..a2ccce66fbe5 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution @@ -96,7 +96,7 @@ License: BSD-3-Clause-Attribution Name: GCC runtime library -Files: numpy.libs\libgfortran*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran License: GPL-3.0-with-GCC-exception @@ -879,24 +879,3 @@ the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . -Name: libquadmath -Files: numpy.libs\libopenb*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/vendored-meson/meson b/vendored-meson/meson index 6f88e485f27b..0d93515fb826 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 6f88e485f27bb0a41d31638f0c55055362e0b1ac +Subproject commit 0d93515fb826440d19707eee47fd92655fe2f166