diff --git a/.flake8 b/.flake8 index 80124c9e8..0dede3f1d 100644 --- a/.flake8 +++ b/.flake8 @@ -12,6 +12,5 @@ extend-ignore = per-file-ignores = scripts/create_pickle.py:F403,F405, graphblas/tests/*.py:T201, - graphblas/core/agg.py:F401,F403, graphblas/core/ss/matrix.py:SIM113, graphblas/**/__init__.py:F401, diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b18fd2935..5ace4600a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ version: 2 updates: - - package-ecosystem: 'github-actions' - directory: '/' + - package-ecosystem: "github-actions" + directory: "/" schedule: - interval: 'weekly' + interval: "weekly" diff --git a/.github/workflows/debug.yml b/.github/workflows/debug.yml index c9dc231fe..6c2b202b1 100644 --- a/.github/workflows/debug.yml +++ b/.github/workflows/debug.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: inputs: debug_enabled: - description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)' + description: "Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)" required: false default: false @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - pyver: [3.9] + pyver: [3.10] testopts: - "--blocking" # - "--non-blocking --record --runslow" @@ -29,6 +29,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Setup conda env run: | source "$CONDA/etc/profile.d/conda.sh" diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 0116f615d..e24d0d4db 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -14,7 +14,7 @@ jobs: pyver: ${{ steps.pyver.outputs.selected }} steps: - name: RNG for os - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: os with: contents: | @@ -26,14 +26,14 @@ jobs: 1 1 - name: RNG for Python version - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: pyver with: contents: | - 3.9 3.10 3.11 3.12 + 3.13 weights: | 1 1 @@ -45,10 +45,13 @@ jobs: # runs-on: ${{ matrix.os }} # strategy: # matrix: - # python-version: ["3.9", "3.10", "3.11", "3.12"] + # python-version: ["3.10", "3.11", "3.12", "3.13"] # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false - uses: actions/setup-python@v5 with: python-version: ${{ needs.rngs.outputs.pyver }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 97bb856f6..655a576e5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,7 +17,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false - uses: actions/setup-python@v5 with: python-version: "3.10" - - uses: pre-commit/action@v3.0.0 + - uses: pre-commit/action@v3.0.1 diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index 406f7c269..32926c5c8 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -3,7 +3,7 @@ name: Publish to PyPI on: push: tags: - - '20*' + - "20*" jobs: build_and_deploy: @@ -17,17 +17,18 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install build dependencies run: | python -m pip install --upgrade pip python -m pip install build twine - name: Build wheel and sdist run: python -m build --sdist --wheel - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: releases path: dist @@ -35,7 +36,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.11 + uses: pypa/gh-action-pypi-publish@v1.12.4 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 4b9035cc3..af7525928 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -50,7 +50,7 @@ jobs: backend: ${{ steps.backend.outputs.selected }} steps: - name: RNG for mapnumpy - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: mapnumpy with: contents: | @@ -64,7 +64,7 @@ jobs: 1 1 - name: RNG for backend - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: backend with: contents: | @@ -84,14 +84,15 @@ jobs: run: shell: bash -l {0} strategy: - # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask` - fail-fast: true + # To "stress test" in CI, set `fail-fast` to `false` and use `repeat` in matrix below + fail-fast: false # The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype]. # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. matrix: os: ["ubuntu-latest", "macos-latest", "windows-latest"] slowtask: ["pytest_normal", "pytest_bizarro", "notebooks"] + # repeat: [1, 2, 3] # For stress testing env: # Wheels on OS X come with an OpenMP that conflicts with OpenMP from conda-forge. # Setting this is a workaround. @@ -101,25 +102,26 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: RNG for Python version - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: pyver with: - # We should support major Python versions for at least 36-42 months + # We should support major Python versions for at least 36 months as per SPEC 0 # We may be able to support pypy if anybody asks for it # 3.9.16 0_73_pypy contents: | - 3.9 3.10 3.11 3.12 + 3.13 weights: | 1 1 1 1 - name: RNG for source of python-suitesparse-graphblas - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: sourcetype with: # Weights must be natural numbers, so set weights to very large to skip one @@ -134,28 +136,14 @@ jobs: 1 1 1 - - name: Setup mamba - uses: conda-incubator/setup-miniconda@v3 - id: setup_mamba - continue-on-error: true - with: - miniforge-variant: Mambaforge - miniforge-version: latest - use-mamba: true - python-version: ${{ steps.pyver.outputs.selected }} - channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }} - channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} - activate-environment: graphblas - auto-activate-base: false - name: Setup conda uses: conda-incubator/setup-miniconda@v3 id: setup_conda - if: steps.setup_mamba.outcome == 'failure' - continue-on-error: false with: auto-update-conda: true python-version: ${{ steps.pyver.outputs.selected }} - channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }} + channels: conda-forge${{ contains(steps.pyver.outputs.selected, 'pypy') && ',defaults' || '' }} + conda-remove-defaults: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'false' || 'true' }} channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} activate-environment: graphblas auto-activate-base: false @@ -166,91 +154,159 @@ jobs: # # First let's randomly get versions of dependencies to install. # Consider removing old versions when they become problematic or very old (>=2 years). - nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", "=3.2", ""]))') - yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') - sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') - fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + # Randomly choosing versions of dependencies based on Python version works surprisingly well... - if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') - elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then + nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", "=3.3", "=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.24", "=1.25", "=1.26", "=2.0", "=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", "=2.6", "=2.7", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') - else # Python 3.12 - npver=$(python -c 'import random ; print(random.choice(["=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=2.4", "=2.5", ""]))') + nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", "=3.3", "=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.24", "=1.25", "=1.26", "=2.0", "=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", "=2.6", "=2.7", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') + elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]]; then + nxver=$(python -c 'import random ; print(random.choice(["=3.2", "=3.3", "=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.26", "=2.0", "=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=2.4", "=2.5", "=2.6", "=2.7", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.7", ""]))') + yamlver=$(python -c 'import random ; print(random.choice(["=6.0", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') + else # Python 3.13 + nxver=$(python -c 'import random ; print(random.choice(["=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.14", "=1.15", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=2.7", ""]))') + fmmver=NA # Not yet supported + yamlver=$(python -c 'import random ; print(random.choice(["=6.0", ""]))') + sparsever=NA # Not yet supported fi + # But there may be edge cases of incompatibility we need to handle (more handled below) - if [[ ${pdver} == "=2.1" && ${npver} == "=1.21" ]]; then - pdver="=2.0" - fi - if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then + if [[ ${{ steps.sourcetype.outputs.selected }} == "source" ]]; then # TODO: there are currently issues with some numpy versions when - # installing python-suitesparse-grphblas from source or upstream. + # installing python-suitesparse-grphblas from source. npver="" spver="" pdver="" fi + # We can have a tight coupling with python-suitesparse-graphblas. # That is, we don't need to support versions of it that are two years old. # But, it's still useful for us to test with different versions! psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "upstream" ]] ; then + # Upstream needs to build with numpy 2 psgver="" + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]]; then + npver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))') + else + npver=$(python -c 'import random ; print(random.choice(["=2.0", "=2.1", "=2.2", ""]))') + fi + elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then + if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))') + psg=python-suitesparse-graphblas${psgver} + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') + fi elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", "=8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", "=8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))') + fi psg=python-suitesparse-graphblas${psgver} else - psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", "==8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", "==8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') + fi fi + # python-suitsparse-graphblas support is the same for Python 3.10 and 3.11 elif [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", "=8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", "=8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))') + fi psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') + fi elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') + fi fi + + # Numba is tightly coupled to numpy versions if [[ ${npver} == "=1.26" ]] ; then - numbaver="" - if [[ ${spver} == "=1.8" || ${spver} == "=1.9" ]] ; then + numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", "=0.60", "=0.61", ""]))') + if [[ ${spver} == "=1.9" ]] ; then spver=$(python -c 'import random ; print(random.choice(["=1.10", "=1.11", ""]))') fi elif [[ ${npver} == "=1.25" ]] ; then - numbaver="" - if [[ ${spver} == "=1.8" ]] ; then - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') - fi + numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", "=0.60", "=0.61", ""]))') elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", ""]))') - elif [[ ${npver} == "=1.21" ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.55", "=0.56", "=0.57", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", "=0.59", "=0.60", "=0.61", ""]))') else - numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", "=0.58", ""]))') + numbaver="" + fi + # Only numba >=0.59 support Python 3.12 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then + numbaver=$(python -c 'import random ; print(random.choice(["=0.59", "=0.60", "=0.61", ""]))') + fi + + # Handle NumPy 2 + if [[ $npver != =1.* ]] ; then + # Only pandas >=2.2.2 supports NumPy 2 + pdver=$(python -c 'import random ; print(random.choice(["=2.2", ""]))') + + # Only awkward >=2.6.3 supports NumPy 2 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then + akver=$(python -c 'import random ; print(random.choice(["=2.7", ""]))') + else + akver=$(python -c 'import random ; print(random.choice(["=2.6", "=2.7", ""]))') + fi + + # Only scipy >=1.13 supports NumPy 2 + if [[ $spver == "=1.9" || $spver == "=1.10" || $spver == "=1.11" || $spver == "=1.12" ]] ; then + spver="=1.13" + fi fi + fmm=fast_matrix_market${fmmver} awkward=awkward${akver} + + # Don't install numba and sparse for some versions if [[ ${{ contains(steps.pyver.outputs.selected, 'pypy') || - startsWith(steps.pyver.outputs.selected, '3.12') }} == true || + startsWith(steps.pyver.outputs.selected, '3.14') }} == true || ( ${{ matrix.slowtask != 'notebooks'}} == true && ( ( ${{ matrix.os == 'windows-latest' }} == true && $(python -c 'import random ; print(random.random() < .2)') == True ) || ( ${{ matrix.os == 'windows-latest' }} == false && $(python -c 'import random ; print(random.random() < .4)') == True ))) ]] then - # Some packages aren't available for pypy or Python 3.12; randomly otherwise (if not running notebooks) + # Some packages aren't available for pypy or Python 3.13; randomly otherwise (if not running notebooks) echo "skipping numba" numba="" numbaver=NA @@ -267,7 +323,7 @@ jobs: pdver="" yamlver="" fi - elif [[ ${npver} == "=1.25" || ${npver} == "=1.26" ]] ; then + elif [[ ${npver} == =2.* ]] ; then # Don't install numba for unsupported versions of numpy numba="" numbaver=NA @@ -277,18 +333,34 @@ jobs: numba=numba${numbaver} sparse=sparse${sparsever} fi + + # sparse does not yet support Python 3.13 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then + sparse="" + sparsever=NA + fi + # fast_matrix_market does not yet support Python 3.13 or osx-arm64 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true || + ${{ matrix.os == 'macos-latest' }} == true ]] + then + fmm="" + fmmver=NA + fi + echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psg${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage pytest-randomly cffi donfig tomli c-compiler make \ + $(command -v mamba || command -v conda) install -c nodefaults \ + packaging pytest coverage pytest-randomly cffi donfig tomli c-compiler make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7" drawsvg' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ - ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4"' || '' }} \ + ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4,<9.5"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \ - ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} + ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} \ + # ${{ matrix.os != 'windows-latest' && 'pytest-forked' || '' }} # to investigate crashes - name: Build extension module run: | if [[ ${{ steps.sourcetype.outputs.selected }} == "wheel" ]]; then @@ -314,7 +386,11 @@ jobs: # Don't use our conftest.py ; allow `test_print_jit_config` to fail if it doesn't exist (cd .. pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config || true - pytest -v --pyargs suitesparse_graphblas) + pytest -v --pyargs suitesparse_graphblas || true) + - name: Print platform and sysconfig variables + run: | + python -c "import platform ; print(platform.uname())" + python -c "import pprint, sysconfig ; pprint.pprint(sysconfig.get_config_vars())" - name: Unit tests run: | A=${{ needs.rngs.outputs.mapnumpy == 'A' || '' }} ; B=${{ needs.rngs.outputs.mapnumpy == 'B' || '' }} @@ -343,6 +419,8 @@ jobs: if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi) echo ${args} set -x # echo on + # pytest ${{ matrix.os != 'windows-latest' && '--forked' || '' }} \ # to investigate crashes + # --color=yes --randomly -v -s ${args} \ coverage run -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }} - name: Unit tests (bizarro scalars) @@ -379,6 +457,8 @@ jobs: if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi) echo ${args} set -x # echo on + # pytest ${{ matrix.os != 'windows-latest' && '--forked' || '' }} \ # to investigate crashes + # --color=yes --randomly -v -s ${args} \ coverage run -a -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_bizarro' && '--runslow' || '' }} git checkout . # Undo changes to scalar default @@ -392,7 +472,7 @@ jobs: echo "from graphblas.agg import count" > script.py coverage run -a script.py echo "from graphblas import agg" > script.py # Does this still cover? - echo "from graphblas.core import agg" >> script.py + echo "from graphblas.core.operator import agg" >> script.py coverage run -a script.py # Tests lazy loading of lib, ffi, and NULL in gb.core echo "from graphblas.core import base" > script.py @@ -424,7 +504,7 @@ jobs: coverage xml coverage report --show-missing - name: codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v5 - name: Notebooks Execution check if: matrix.slowtask == 'notebooks' run: | diff --git a/.github/zizmor.yml b/.github/zizmor.yml new file mode 100644 index 000000000..61f32c2e0 --- /dev/null +++ b/.github/zizmor.yml @@ -0,0 +1,16 @@ +rules: + use-trusted-publishing: + # TODO: we should update to use trusted publishing + ignore: + - publish_pypi.yml + excessive-permissions: + # It is probably good practice to use narrow permissions + ignore: + - debug.yml + - imports.yml + - publish_pypi.yml + - test_and_build.yml + template-injection: + # We use templates pretty heavily + ignore: + - test_and_build.yml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b1d264509..43e28b8fe 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ ci: autoupdate_commit_msg: "chore: update pre-commit hooks" autofix_commit_msg: "style: pre-commit fixes" skip: [pylint, no-commit-to-branch] -fail_fast: true +fail_fast: false default_language_version: - python: python3 + python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - id: check-added-large-files - id: check-case-conflict @@ -25,6 +25,10 @@ repos: - id: check-ast - id: check-toml - id: check-yaml + - id: check-executables-have-shebangs + - id: check-vcs-permalinks + - id: destroyed-symlinks + - id: detect-private-key - id: debug-statements - id: end-of-file-fixer exclude_types: [svg] @@ -33,72 +37,68 @@ repos: - id: name-tests-test args: ["--pytest-test-first"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.15 + rev: v0.23 hooks: - id: validate-pyproject name: Validate pyproject.toml # I don't yet trust ruff to do what autoflake does - repo: https://github.com/PyCQA/autoflake - rev: v2.2.1 + rev: v2.3.1 hooks: - id: autoflake args: [--in-place] # We can probably remove `isort` if we come to trust `ruff --fix`, # but we'll need to figure out the configuration to do this in `ruff` - repo: https://github.com/pycqa/isort - rev: 5.13.1 + rev: 6.0.0 hooks: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.15.0 + rev: v3.19.1 hooks: - id: pyupgrade - args: [--py39-plus] + args: [--py310-plus] - repo: https://github.com/MarcoGorelli/auto-walrus - rev: v0.2.2 + rev: 0.3.4 hooks: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.12.0 + rev: 25.1.0 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.9.6 hooks: - id: ruff args: [--fix-only, --show-fixes] # Let's keep `flake8` even though `ruff` does much of the same. # `flake8-bugbear` and `flake8-simplify` have caught things missed by `ruff`. - repo: https://github.com/PyCQA/flake8 - rev: 6.1.0 + rev: 7.1.2 hooks: - id: flake8 - additional_dependencies: &flake8_dependencies - # These versions need updated manually - - flake8==6.1.0 - - flake8-bugbear==23.12.2 - - flake8-simplify==0.21.0 - - repo: https://github.com/asottile/yesqa - rev: v1.5.0 - hooks: - - id: yesqa - additional_dependencies: *flake8_dependencies + args: ["--config=.flake8"] + additional_dependencies: + &flake8_dependencies # These versions need updated manually + - flake8==7.1.2 + - flake8-bugbear==24.12.12 + - flake8-simplify==0.21.0 - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.4.1 hooks: - id: codespell types_or: [python, rst, markdown] additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.9.6 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.9.1 + rev: v1.0.0 hooks: - id: sphinx-lint args: [--enable, all, "--disable=line-too-long,leaked-markup"] @@ -110,9 +110,39 @@ repos: - id: pyroma args: [-n, "10", .] - repo: https://github.com/shellcheck-py/shellcheck-py - rev: "v0.9.0.6" + rev: "v0.10.0.1" hooks: - - id: shellcheck + - id: shellcheck + - repo: https://github.com/rbubley/mirrors-prettier + rev: v3.5.1 + hooks: + - id: prettier + - repo: https://github.com/ComPWA/taplo-pre-commit + rev: v0.9.3 + hooks: + - id: taplo-format + - repo: https://github.com/rhysd/actionlint + rev: v1.7.7 + hooks: + - id: actionlint + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.31.1 + hooks: + - id: check-dependabot + - id: check-github-workflows + - id: check-readthedocs + - repo: https://github.com/adrienverge/yamllint + rev: v1.35.1 + hooks: + - id: yamllint + - repo: https://github.com/woodruffw/zizmor-pre-commit + rev: v1.3.1 + hooks: + - id: zizmor + - repo: meta + hooks: + - id: check-hooks-apply + - id: check-useless-excludes - repo: local hooks: # Add `--hook-stage manual` to pre-commit command to run (very slow) @@ -126,9 +156,9 @@ repos: args: [graphblas/] pass_filenames: false - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - - id: no-commit-to-branch # no commit directly to main + - id: no-commit-to-branch # no commit directly to main # # Maybe: # @@ -145,8 +175,10 @@ repos: # additional_dependencies: [tomli] # # - repo: https://github.com/PyCQA/bandit -# rev: 1.7.4 +# rev: 1.8.2 # hooks: # - id: bandit +# args: ["-c", "pyproject.toml"] +# additional_dependencies: ["bandit[toml]"] # -# blacken-docs, blackdoc prettier, mypy, pydocstringformatter, velin, flynt, yamllint +# blacken-docs, blackdoc, mypy, pydocstringformatter, velin, flynt diff --git a/.yamllint.yaml b/.yamllint.yaml new file mode 100644 index 000000000..54e656293 --- /dev/null +++ b/.yamllint.yaml @@ -0,0 +1,6 @@ +--- +extends: default +rules: + document-start: disable + line-length: disable + truthy: disable diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 814c8052a..eebd2c372 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -13,13 +13,13 @@ educational level, family status, culture, or political belief. Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, such as physical or electronic +- The use of sexualized language or imagery +- Personal attacks +- Trolling or insulting/derogatory comments +- Public or private harassment +- Publishing other's private information, such as physical or electronic addresses, without explicit permission -* Other unethical or unprofessional conduct +- Other unethical or unprofessional conduct Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions @@ -52,7 +52,7 @@ that is deemed necessary and appropriate to the circumstances. Maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. -This Code of Conduct is adapted from the [Numba Code of Conduct][numba], which is based on the [Contributor Covenant][homepage], +This Code of Conduct is adapted from the [Numba Code of Conduct][numba], which is based on the [Contributor Covenant][homepage], version 1.3.0, available at [https://contributor-covenant.org/version/1/3/0/][version], and the [Swift Code of Conduct][swift]. diff --git a/README.md b/README.md index de942f88e..1080314c7 100644 --- a/README.md +++ b/README.md @@ -35,14 +35,19 @@ For algorithms, see

## Install + Install the latest version of Python-graphblas via conda: + ``` $ conda install -c conda-forge python-graphblas ``` + or pip: + ``` -$ pip install python-graphblas[default] +$ pip install 'python-graphblas[default]' ``` + This will also install the [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS) compiled C library. We currently support the [GraphBLAS C API 2.0 specification](https://graphblas.org/docs/GraphBLAS_API_C_v2.0.0.pdf). @@ -57,6 +62,7 @@ The following are not required by python-graphblas, but may be needed for certai - `fast-matrix-market` - for faster read/write of Matrix Market files with `gb.io.mmread` and `gb.io.mmwrite`. ## Description + Currently works with [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS), but the goal is to make it work with all implementations of the GraphBLAS spec. The approach taken with this library is to follow the C-API 2.0 specification as closely as possible while making improvements @@ -70,10 +76,12 @@ with how Python handles assignment, so instead we (ab)use the left-shift `<<` no assignment. This opens up all kinds of nice possibilities. This is an example of how the mapping works: + ```C // C call GrB_Matrix_mxm(M, mask, GrB_PLUS_INT64, GrB_MIN_PLUS_INT64, A, B, NULL) ``` + ```python # Python call M(mask.V, accum=binary.plus) << A.mxm(B, semiring.min_plus) @@ -91,10 +99,12 @@ is a much better approach, even if it doesn't feel very Pythonic. Descriptor flags are set on the appropriate elements to keep logic close to what it affects. Here is the same call with descriptor bits set. `ttcsr` indicates transpose the first and second matrices, complement the structure of the mask, and do a replacement on the output. + ```C // C call GrB_Matrix_mxm(M, mask, GrB_PLUS_INT64, GrB_MIN_PLUS_INT64, A, B, desc.ttcsr) ``` + ```python # Python call M(~mask.S, accum=binary.plus, replace=True) << A.T.mxm(B.T, semiring.min_plus) @@ -104,16 +114,20 @@ The objects receiving the flag operations (A.T, ~mask, etc) are also delayed obj do no computation, allowing the correct descriptor bits to be set in a single GraphBLAS call. **If no mask or accumulator is used, the call looks like this**: + ```python M << A.mxm(B, semiring.min_plus) ``` + The use of `<<` to indicate updating is actually just syntactic sugar for a real `.update()` method. The above expression could be written as: + ```python M.update(A.mxm(B, semiring.min_plus)) ``` ## Operations + ```python M(mask, accum) << A.mxm(B, semiring) # mxm w(mask, accum) << A.mxv(v, semiring) # mxv @@ -123,14 +137,18 @@ M(mask, accum) << A.ewise_mult(B, binaryop) # eWiseMult M(mask, accum) << A.kronecker(B, binaryop) # kronecker M(mask, accum) << A.T # transpose ``` + ## Extract + ```python M(mask, accum) << A[rows, cols] # rows and cols are a list or a slice w(mask, accum) << A[rows, col_index] # extract column w(mask, accum) << A[row_index, cols] # extract row s = A[row_index, col_index].value # extract single element ``` + ## Assign + ```python M(mask, accum)[rows, cols] << A # rows and cols are a list or a slice M(mask, accum)[rows, col_index] << v # assign column @@ -140,31 +158,42 @@ M[row_index, col_index] << s # assign scalar to single element # (mask and accum not allowed) del M[row_index, col_index] # remove single element ``` + ## Apply + ```python M(mask, accum) << A.apply(unaryop) M(mask, accum) << A.apply(binaryop, left=s) # bind-first M(mask, accum) << A.apply(binaryop, right=s) # bind-second ``` + ## Reduce + ```python v(mask, accum) << A.reduce_rowwise(op) # reduce row-wise v(mask, accum) << A.reduce_columnwise(op) # reduce column-wise s(accum) << A.reduce_scalar(op) s(accum) << v.reduce(op) ``` + ## Creating new Vectors / Matrices + ```python A = Matrix.new(dtype, num_rows, num_cols) # new_type B = A.dup() # dup A = Matrix.from_coo([row_indices], [col_indices], [values]) # build ``` + ## New from delayed + Delayed objects can be used to create a new object using `.new()` method + ```python C = A.mxm(B, semiring).new() ``` + ## Properties + ```python size = v.size # size nrows = M.nrows # nrows @@ -172,10 +201,13 @@ ncols = M.ncols # ncols nvals = M.nvals # nvals rindices, cindices, vals = M.to_coo() # extractTuples ``` + ## Initialization + There is a mechanism to initialize `graphblas` with a context prior to use. This allows for setting the backend to use as well as the blocking/non-blocking mode. If the context is not initialized, a default initialization will be performed automatically. + ```python import graphblas as gb @@ -186,10 +218,13 @@ gb.init("suitesparse", blocking=True) from graphblas import binary, semiring from graphblas import Matrix, Vector, Scalar ``` + ## Performant User Defined Functions + Python-graphblas requires `numba` which enables compiling user-defined Python functions to native C for use in GraphBLAS. Example customized UnaryOp: + ```python from graphblas import unary @@ -204,9 +239,11 @@ v = Vector.from_coo([0, 1, 3], [1, 2, 3]) w = v.apply(unary.force_odd).new() w # indexes=[0, 1, 3], values=[1, 3, 3] ``` + Similar methods exist for BinaryOp, Monoid, and Semiring. ## Relation to other network analysis libraries + Python-graphblas aims to provide an efficient and consistent expression of graph operations using linear algebra. This allows the development of high-performance implementations of existing and new graph algorithms @@ -223,7 +260,9 @@ other libraries, `graphblas.io` contains multiple connectors, see the following section. ## Import/Export connectors to the Python ecosystem + `graphblas.io` contains functions for converting to and from: + ```python import graphblas as gb diff --git a/binder/environment.yml b/binder/environment.yml index 11cd98e0c..9548f2126 100644 --- a/binder/environment.yml +++ b/binder/environment.yml @@ -1,12 +1,12 @@ name: graphblas channels: - - conda-forge + - conda-forge dependencies: - - python=3.11 - - python-graphblas - - matplotlib - - networkx - - pandas - - scipy - - drawsvg - - cairosvg + - python=3.11 + - python-graphblas + - matplotlib + - networkx + - pandas + - scipy + - drawsvg + - cairosvg diff --git a/docs/_static/custom.css b/docs/_static/custom.css index 1b14402cd..f7dd59b74 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,78 +1,78 @@ - /* Main Page Stylings */ .intro-card { - background-color: var(--pst-color-background); - margin-bottom: 30px; + background-color: var(--pst-color-background); + margin-bottom: 30px; } .intro-card:hover { - box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-link) !important; + box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-link) !important; } .intro-card .card-header { - background-color: inherit; + background-color: inherit; } .intro-card .card-header .card-text { - font-weight: bold; + font-weight: bold; } .intro-card .card-body { - margin-top: 0; + margin-top: 0; } .intro-card .card-body .card-text:first-child { - margin-bottom: 0; + margin-bottom: 0; } .shadow { - box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-text-muted) !important; + box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-text-muted) !important; } .table { - font-size: smaller; - width: inherit; + font-size: smaller; + width: inherit; } -.table td, .table th { - padding: 0 .75rem; +.table td, +.table th { + padding: 0 0.75rem; } .table.inline { - display: inline-table; - margin-right: 30px; + display: inline-table; + margin-right: 30px; } p.rubric { - border-bottom: none; + border-bottom: none; } button.navbar-btn.rounded-circle { - padding: 0.25rem; + padding: 0.25rem; } button.navbar-btn.search-button { - color: var(--pst-color-text-muted); - padding: 0; + color: var(--pst-color-text-muted); + padding: 0; } -button.navbar-btn:hover -{ - color: var(--pst-color-primary); +button.navbar-btn:hover { + color: var(--pst-color-primary); } button.theme-switch-button { - font-size: calc(var(--pst-font-size-icon) - .1rem); - border: none; + font-size: calc(var(--pst-font-size-icon) - 0.1rem); + border: none; } button span.theme-switch:hover { - color: var(--pst-color-primary); + color: var(--pst-color-primary); } /* Styling for Jupyter Notebook ReST Exports */ -.dataframe tbody th, .dataframe tbody td { - padding: 10px; +.dataframe tbody th, +.dataframe tbody td { + padding: 10px; } diff --git a/docs/_static/matrix.css b/docs/_static/matrix.css index 5700ea3fc..1937178e5 100644 --- a/docs/_static/matrix.css +++ b/docs/_static/matrix.css @@ -1,104 +1,104 @@ /* Based on the stylesheet used by matrepr (https://github.com/alugowski/matrepr) and modified for sphinx */ -table.matrix { - border-collapse: collapse; - border: 0px; +table.matrix { + border-collapse: collapse; + border: 0px; } /* Disable a horizintal line from the default stylesheet */ .table.matrix > :not(caption) > * > * { - border-bottom-width: 0px; + border-bottom-width: 0px; } /* row indices */ table.matrix > tbody tr th { - font-size: smaller; - font-weight: bolder; - vertical-align: middle; - text-align: right; + font-size: smaller; + font-weight: bolder; + vertical-align: middle; + text-align: right; } /* row indices are often made bold in the source data; here make them match the boldness of the th column label style*/ table.matrix strong { - font-weight: bold; + font-weight: bold; } /* column indices */ table.matrix > thead tr th { - font-size: smaller; - font-weight: bolder; - vertical-align: middle; - text-align: center; + font-size: smaller; + font-weight: bolder; + vertical-align: middle; + text-align: center; } /* cells */ table.matrix > tbody tr td { - vertical-align: middle; - text-align: center; - position: relative; + vertical-align: middle; + text-align: center; + position: relative; } /* left border */ table.matrix > tbody tr td:first-of-type { - border-left: solid 2px var(--pst-color-text-base); + border-left: solid 2px var(--pst-color-text-base); } /* right border */ table.matrix > tbody tr td:last-of-type { - border-right: solid 2px var(--pst-color-text-base); + border-right: solid 2px var(--pst-color-text-base); } /* prevents empty cells from collapsing, especially empty rows */ table.matrix > tbody tr td:empty::before { - /* basicaly fills empty cells with   */ - content: "\00a0\00a0\00a0"; - visibility: hidden; + /* basicaly fills empty cells with   */ + content: "\00a0\00a0\00a0"; + visibility: hidden; } table.matrix > tbody tr td:empty::after { - content: "\00a0\00a0\00a0"; - visibility: hidden; + content: "\00a0\00a0\00a0"; + visibility: hidden; } /* matrix bracket ticks */ table.matrix > tbody > tr:first-child > td:first-of-type::before { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: 0; - right: auto; - border-top: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: 0; + right: auto; + border-top: solid 2px var(--pst-color-text-base); } table.matrix > tbody > tr:last-child > td:first-of-type::before { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: 0; - right: auto; - border-bottom: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: 0; + right: auto; + border-bottom: solid 2px var(--pst-color-text-base); } table.matrix > tbody > tr:first-child > td:last-of-type::after { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: auto; - right: 0; - border-top: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: auto; + right: 0; + border-top: solid 2px var(--pst-color-text-base); } table.matrix > tbody > tr:last-child > td:last-of-type::after { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: auto; - right: 0; - border-bottom: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: auto; + right: 0; + border-bottom: solid 2px var(--pst-color-text-base); } diff --git a/docs/api_reference/io.rst b/docs/api_reference/io.rst index cd6057a31..1cfc98516 100644 --- a/docs/api_reference/io.rst +++ b/docs/api_reference/io.rst @@ -10,15 +10,18 @@ These methods require `networkx `_ to be installed. .. autofunction:: graphblas.io.to_networkx -Numpy +NumPy ~~~~~ -These methods require `scipy `_ to be installed, as some -of the scipy.sparse machinery is used during the conversion process. +These methods convert to and from dense arrays. For more, see :ref:`IO in the user guide `. -.. autofunction:: graphblas.io.from_numpy +.. automethod:: graphblas.core.matrix.Matrix.from_dense -.. autofunction:: graphblas.io.to_numpy +.. automethod:: graphblas.core.matrix.Matrix.to_dense + +.. automethod:: graphblas.core.vector.Vector.from_dense + +.. automethod:: graphblas.core.vector.Vector.to_dense Scipy Sparse ~~~~~~~~~~~~ diff --git a/docs/env.yml b/docs/env.yml index c0c4c8999..78a50afbe 100644 --- a/docs/env.yml +++ b/docs/env.yml @@ -1,23 +1,23 @@ name: python-graphblas-docs channels: - - conda-forge - - nodefaults + - conda-forge + - nodefaults dependencies: - - python=3.10 - - pip - # python-graphblas dependencies - - donfig - - numba - - python-suitesparse-graphblas>=7.4.0.0 - - pyyaml - # extra dependencies - - matplotlib - - networkx - - pandas - - scipy>=1.7.0 - # docs dependencies - - commonmark # For RTD - - nbsphinx - - numpydoc - - pydata-sphinx-theme=0.13.1 - - sphinx-panels=0.6.0 + - python=3.10 + - pip + # python-graphblas dependencies + - donfig + - numba + - python-suitesparse-graphblas>=7.4.0.0 + - pyyaml + # extra dependencies + - matplotlib + - networkx + - pandas + - scipy>=1.7.0 + # docs dependencies + - commonmark # For RTD + - nbsphinx + - numpydoc + - pydata-sphinx-theme=0.13.1 + - sphinx-panels=0.6.0 diff --git a/docs/getting_started/faq.rst b/docs/getting_started/faq.rst index 1e60a1bd4..2609e7929 100644 --- a/docs/getting_started/faq.rst +++ b/docs/getting_started/faq.rst @@ -101,11 +101,10 @@ Bugs are not considered deprecations and may be fixed immediately. What is the version support policy? +++++++++++++++++++++++++++++++++++ -Each major Python version will be supported for at least 36 to 42 months. +Each major Python version will be supported for at least 36. Major dependencies such as NumPy should be supported for at least 24 months. -This is motivated by these guidelines: +We aim to follow SPEC 0: -- https://numpy.org/neps/nep-0029-deprecation_policy.html - https://scientific-python.org/specs/spec-0000/ ``python-graphblas`` itself follows a "single trunk" versioning strategy. diff --git a/docs/user_guide/collections.rst b/docs/user_guide/collections.rst index 2ce759bf4..de7469c6d 100644 --- a/docs/user_guide/collections.rst +++ b/docs/user_guide/collections.rst @@ -145,7 +145,7 @@ The shape and dtype remain unchanged, but the collection will be fully sparse (i to_coo ~~~~~~ -To go from a collection back to the index and values, ``.to_coo()`` can be called. Numpy arrays +To go from a collection back to the index and values, ``.to_coo()`` can be called. NumPy arrays will be returned in a tuple. .. code-block:: python diff --git a/docs/user_guide/io.rst b/docs/user_guide/io.rst index ecb4c0862..f27b40bd3 100644 --- a/docs/user_guide/io.rst +++ b/docs/user_guide/io.rst @@ -4,6 +4,8 @@ Input/Output There are several ways to get data into and out of python-graphblas. +.. _from-to-values: + From/To Values -------------- diff --git a/docs/user_guide/operations.rst b/docs/user_guide/operations.rst index 3f710dc23..18d0352d7 100644 --- a/docs/user_guide/operations.rst +++ b/docs/user_guide/operations.rst @@ -8,7 +8,7 @@ Matrix Multiply The GraphBLAS spec contains three methods for matrix multiplication, depending on whether the inputs are Matrix or Vector. - - **mxm** -- Matrix-Matrix multplications + - **mxm** -- Matrix-Matrix multiplication - **mxv** -- Matrix-Vector multiplication - **vxm** -- Vector-Matrix multiplication diff --git a/environment.yml b/environment.yml index 1863d4006..2bae0b76e 100644 --- a/environment.yml +++ b/environment.yml @@ -11,103 +11,100 @@ # It is okay to comment out sections below that you don't need such as viz or building docs. name: graphblas-dev channels: - - conda-forge - - nodefaults # Only install packages from conda-forge for faster solving + - conda-forge + - nodefaults # Only install packages from conda-forge for faster solving dependencies: - - python - - donfig - - numba - - python-suitesparse-graphblas - - pyyaml - # For repr - - pandas - # For I/O - - awkward - - fast_matrix_market - - networkx - - scipy - - sparse - # For viz - - datashader - - hvplot - - matplotlib - # For linting - - pre-commit - # For testing - - packaging - - pytest-cov - - tomli - # For debugging - - icecream - - ipykernel - - ipython - # For type annotations - - mypy - # For building docs - - nbsphinx - - numpydoc - - pydata-sphinx-theme - - sphinx-panels - # For building logo - - drawsvg - - cairosvg - # EXTRA (optional; uncomment as desired) - # - autoflake - # - black - # - black-jupyter - # - build - # - codespell - # - commonmark - # - cython - # - cytoolz - # - distributed - # - flake8 - # - flake8-bugbear - # - flake8-comprehensions - # - flake8-print - # - flake8-quotes - # - flake8-simplify - # - gcc - # - gh - # - git - # - graph-tool - # - xorg-libxcursor # for graph-tool - # - grayskull - # - h5py - # - hiveplot - # - igraph - # - ipycytoscape - # - isort - # - jupyter - # - jupyterlab - # - line_profiler - # - lxml - # - make - # - memory_profiler - # - nbqa - # - netcdf4 - # - networkit - # - nxviz - # - pycodestyle - # - pydot - # - pygraphviz - # - pylint - # - pytest-runner - # - pytest-xdist - # - python-graphviz - # - python-igraph - # - python-louvain - # - pyupgrade - # - rich - # - ruff - # - scalene - # - scikit-network - # - setuptools-git-versioning - # - snakeviz - # - sphinx-lint - # - sympy - # - tuna - # - twine - # - vim - # - yesqa - # - zarr + - python + - donfig + - numba + - python-suitesparse-graphblas + - pyyaml + # For repr + - pandas + # For I/O + - awkward + - networkx + - scipy + - sparse + # For viz + - datashader + - hvplot + - matplotlib + # For linting + - pre-commit + # For testing + - packaging + - pytest-cov + - tomli + # For debugging + - icecream + - ipykernel + - ipython + # For type annotations + - mypy + # For building docs + - nbsphinx + - numpydoc + - pydata-sphinx-theme + - sphinx-panels + # For building logo + - drawsvg + - cairosvg + # EXTRA (optional; uncomment as desired) + # - autoflake + # - black + # - black-jupyter + # - codespell + # - commonmark + # - cython + # - cytoolz + # - distributed + # - flake8 + # - flake8-bugbear + # - flake8-comprehensions + # - flake8-print + # - flake8-quotes + # - flake8-simplify + # - gcc + # - gh + # - git + # - graph-tool + # - xorg-libxcursor # for graph-tool + # - grayskull + # - h5py + # - hiveplot + # - igraph + # - ipycytoscape + # - isort + # - jupyter + # - jupyterlab + # - line_profiler + # - lxml + # - make + # - memory_profiler + # - nbqa + # - netcdf4 + # - networkit + # - nxviz + # - pycodestyle + # - pydot + # - pygraphviz + # - pylint + # - pytest-runner + # - pytest-xdist + # - python-graphviz + # - python-igraph + # - python-louvain + # - pyupgrade + # - rich + # - ruff + # - scalene + # - scikit-network + # - setuptools-git-versioning + # - snakeviz + # - sphinx-lint + # - sympy + # - tuna + # - twine + # - vim + # - zarr diff --git a/graphblas/agg/__init__.py b/graphblas/agg/__init__.py index 9f6ead0b5..da7c13591 100644 --- a/graphblas/agg/__init__.py +++ b/graphblas/agg/__init__.py @@ -73,7 +73,8 @@ # - bxnor monoid: even bits # - bnor monoid: odd bits """ -# All items are dynamically added by classes in core/agg.py + +# All items are dynamically added by classes in core/operator/agg.py # This module acts as a container of Aggregator instances _deprecated = {} diff --git a/graphblas/binary/numpy.py b/graphblas/binary/numpy.py index 7c03977e4..bb22d0b07 100644 --- a/graphblas/binary/numpy.py +++ b/graphblas/binary/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import numpy as _np from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/core/agg.py b/graphblas/core/agg.py deleted file mode 100644 index 23848d3b9..000000000 --- a/graphblas/core/agg.py +++ /dev/null @@ -1,17 +0,0 @@ -"""graphblas.core.agg namespace is deprecated; please use graphblas.core.operator.agg instead. - -.. deprecated:: 2023.3.0 -``graphblas.core.agg`` will be removed in a future release. -Use ``graphblas.core.operator.agg`` instead. -Will be removed in version 2023.11.0 or later. - -""" -import warnings - -from .operator.agg import * # pylint: disable=wildcard-import,unused-wildcard-import - -warnings.warn( - "graphblas.core.agg namespace is deprecated; please use graphblas.core.operator.agg instead.", - DeprecationWarning, - stacklevel=1, -) diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py index 0a2aa208a..600a6e139 100644 --- a/graphblas/core/automethods.py +++ b/graphblas/core/automethods.py @@ -7,6 +7,7 @@ $ python -m graphblas.core.automethods """ + from .. import config @@ -281,10 +282,6 @@ def to_edgelist(self): return self._get_value("to_edgelist") -def to_values(self): - return self._get_value("to_values") - - def value(self): return self._get_value("value") @@ -398,7 +395,6 @@ def _main(): "ss", "to_coo", "to_dense", - "to_values", } vector = { "_as_matrix", diff --git a/graphblas/core/base.py b/graphblas/core/base.py index 5658e99c1..24a49ba1a 100644 --- a/graphblas/core/base.py +++ b/graphblas/core/base.py @@ -513,7 +513,7 @@ def _name_html(self): _expect_op = _expect_op # Don't let non-scalars be coerced to numpy arrays - def __array__(self, dtype=None): + def __array__(self, dtype=None, *, copy=None): raise TypeError( f"{type(self).__name__} can't be directly converted to a numpy array; " f"perhaps use `{self.name}.to_coo()` method instead." diff --git a/graphblas/core/dtypes.py b/graphblas/core/dtypes.py index d7a83c99b..2d4178b14 100644 --- a/graphblas/core/dtypes.py +++ b/graphblas/core/dtypes.py @@ -1,4 +1,5 @@ import warnings +from ast import literal_eval import numpy as np from numpy import promote_types, result_type @@ -97,7 +98,7 @@ def register_anonymous(dtype, name=None): # Allow dtypes such as `"INT64[3, 4]"` for convenience base_dtype, shape = dtype.split("[", 1) base_dtype = lookup_dtype(base_dtype) - shape = np.lib.format.safe_eval(f"[{shape}") + shape = literal_eval(f"[{shape}") dtype = np.dtype((base_dtype.np_type, shape)) else: raise @@ -115,7 +116,17 @@ def register_anonymous(dtype, name=None): from ..exceptions import check_status_carg gb_obj = ffi.new("GrB_Type*") - if backend == "suitesparse": + + if hasattr(lib, "GrB_Type_set_String"): + # We name this so that we can serialize and deserialize UDTs + # We don't yet have C definitions + np_repr = _dtype_to_string(dtype) + status = lib.GrB_Type_new(gb_obj, dtype.itemsize) + check_status_carg(status, "Type", gb_obj[0]) + val_obj = ffi.new("char[]", np_repr.encode()) + status = lib.GrB_Type_set_String(gb_obj[0], val_obj, lib.GrB_NAME) + elif backend == "suitesparse": + # For SuiteSparse < 9 # We name this so that we can serialize and deserialize UDTs # We don't yet have C definitions np_repr = _dtype_to_string(dtype).encode() @@ -375,8 +386,7 @@ def lookup_dtype(key, value=None): def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False): - """ - Returns a type that can hold both type1 and type2. + """Returns a type that can hold both type1 and type2. For example: unify(INT32, INT64) -> INT64 @@ -430,7 +440,7 @@ def _dtype_to_string(dtype): np_type = dtype.np_type s = str(np_type) try: - if np.dtype(np.lib.format.safe_eval(s)) == np_type: # pragma: no branch (safety) + if np.dtype(literal_eval(s)) == np_type: # pragma: no branch (safety) return s except Exception: pass @@ -449,5 +459,5 @@ def _string_to_dtype(s): return lookup_dtype(s) except Exception: pass - np_type = np.dtype(np.lib.format.safe_eval(s)) + np_type = np.dtype(literal_eval(s)) return lookup_dtype(np_type) diff --git a/graphblas/core/expr.py b/graphblas/core/expr.py index d803939a5..efec2db5f 100644 --- a/graphblas/core/expr.py +++ b/graphblas/core/expr.py @@ -147,13 +147,13 @@ def py_indices(self): return self.indices[0]._py_index() def parse_indices(self, indices, shape): - """ - Returns + """Returns ------- [(rows, rowsize), (cols, colsize)] for Matrix [(idx, idx_size)] for Vector Within each tuple, if the index is of type int, the size will be None + """ if len(shape) == 1: if type(indices) is tuple: @@ -312,8 +312,8 @@ def update(self, expr, **opts): Updater(self.parent, opts=opts)._setitem(self.resolved_indexes, expr, is_submask=False) def new(self, dtype=None, *, mask=None, input_mask=None, name=None, **opts): - """ - Force extraction of the indexes into a new object + """Force extraction of the indexes into a new object. + dtype and mask are the only controllable parameters. """ if input_mask is not None: diff --git a/graphblas/core/formatting.py b/graphblas/core/formatting.py index aefb87f94..0b6252101 100644 --- a/graphblas/core/formatting.py +++ b/graphblas/core/formatting.py @@ -630,7 +630,7 @@ def create_header(type_name, keys, vals, *, lower_border=False, name="", quote=T name = f'"{name}"' key_text = [] val_text = [] - for key, val in zip(keys, vals): + for key, val in zip(keys, vals, strict=True): width = max(len(key), len(val)) + 2 key_text.append(key.rjust(width)) val_text.append(val.rjust(width)) diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py index 51714633c..24c109639 100644 --- a/graphblas/core/infix.py +++ b/graphblas/core/infix.py @@ -236,7 +236,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo)) to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense)) to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict)) - to_values = wrapdoc(Vector.to_values)(property(automethods.to_values)) vxm = wrapdoc(Vector.vxm)(property(automethods.vxm)) wait = wrapdoc(Vector.wait)(property(automethods.wait)) # These raise exceptions @@ -317,6 +316,7 @@ class MatrixInfixExpr(InfixExprBase): ndim = 2 output_type = MatrixExpression _is_transposed = False + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__(self, left, right): @@ -396,7 +396,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense)) to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts)) to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist)) - to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values)) wait = wrapdoc(Matrix.wait)(property(automethods.wait)) # These raise exceptions __array__ = Matrix.__array__ diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 1ea24f479..bf20cc953 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -1,5 +1,4 @@ import itertools -import warnings from collections.abc import Sequence import numpy as np @@ -178,12 +177,14 @@ class Matrix(BaseType): Number of columns. name : str, optional Name to give the Matrix. This will be displayed in the ``__repr__``. + """ __slots__ = "_nrows", "_ncols", "_parent", "ss" ndim = 2 _is_transposed = False _name_counter = itertools.count() + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __new__(cls, dtype=FP64, nrows=0, ncols=0, *, name=None): @@ -297,6 +298,7 @@ def __delitem__(self, keys, **opts): Examples -------- >>> del M[1, 5] + """ del Updater(self, opts=opts)[keys] @@ -311,6 +313,7 @@ def __getitem__(self, keys): .. code-block:: python subM = M[[1, 3, 5], :].new() + """ resolved_indexes = IndexerResolver(self, keys) shape = resolved_indexes.shape @@ -332,6 +335,7 @@ def __setitem__(self, keys, expr, **opts): .. code-block:: python M[0, 0:3] = 17 + """ Updater(self, opts=opts)[keys] = expr @@ -343,6 +347,7 @@ def __contains__(self, index): .. code-block:: python (10, 15) in M + """ extractor = self[index] if not extractor._is_scalar: @@ -356,7 +361,7 @@ def __contains__(self, index): def __iter__(self): """Iterate over (row, col) indices which are present in the matrix.""" rows, columns, _ = self.to_coo(values=False) - return zip(rows.flat, columns.flat) + return zip(rows.flat, columns.flat, strict=True) def __sizeof__(self): if backend == "suitesparse": @@ -382,6 +387,7 @@ def isequal(self, other, *, check_dtype=False, **opts): See Also -------- :meth:`isclose` : For equality check of floating point dtypes + """ other = self._expect_type( other, (Matrix, TransposedMatrix), within="isequal", argname="other" @@ -428,6 +434,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False, **opts ------- bool Whether all values of the Matrix are close to the values in ``other``. + """ other = self._expect_type( other, (Matrix, TransposedMatrix), within="isclose", argname="other" @@ -515,42 +522,6 @@ def resize(self, nrows, ncols): self._nrows = nrows.value self._ncols = ncols.value - def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=True): - """Extract the indices and values as a 3-tuple of numpy arrays - corresponding to the COO format of the Matrix. - - .. deprecated:: 2022.11.0 - ``Matrix.to_values`` will be removed in a future release. - Use ``Matrix.to_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - dtype : - Requested dtype for the output values array. - rows : bool, default=True - Whether to return rows; will return ``None`` for rows if ``False`` - columns : bool, default=True - Whether to return columns; will return ``None`` for columns if ``False`` - values : bool, default=True - Whether to return values; will return ``None`` for values if ``False`` - sort : bool, default=True - Whether to require sorted indices. - If internally stored rowwise, the sorting will be first by rows, then by column. - If internally stored columnwise, the sorting will be first by column, then by row. - - Returns - ------- - np.ndarray[dtype=uint64] : Rows - np.ndarray[dtype=uint64] : Columns - np.ndarray : Values - """ - warnings.warn( - "`Matrix.to_values(...)` is deprecated; please use `Matrix.to_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.to_coo(dtype, rows=rows, columns=columns, values=values, sort=sort) - def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True): """Extract the indices and values as a 3-tuple of numpy arrays corresponding to the COO format of the Matrix. @@ -581,6 +552,7 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True) np.ndarray[dtype=uint64] : Rows np.ndarray[dtype=uint64] : Columns np.ndarray : Values + """ if sort and backend == "suitesparse": self.wait() # sort in SS @@ -647,6 +619,7 @@ def to_edgelist(self, dtype=None, *, values=True, sort=True): ------- np.ndarray[dtype=uint64] : Edgelist np.ndarray : Values + """ rows, columns, values = self.to_coo(dtype, values=values, sort=sort) return (np.column_stack([rows, columns]), values) @@ -727,6 +700,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): Returns ------- Matrix + """ if dtype is not None or mask is not None or clear: if dtype is None: @@ -758,6 +732,7 @@ def diag(self, k=0, dtype=None, *, name=None, **opts): Returns ------- :class:`~graphblas.Vector` + """ if backend == "suitesparse": from ..ss._core import diag @@ -801,6 +776,7 @@ def wait(self, how="materialize"): Use wait to force completion of the Matrix. Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__. + """ how = how.lower() if how == "materialize": @@ -827,6 +803,7 @@ def get(self, row, col, default=None): Returns ------- Python scalar + """ expr = self[row, col] if expr._is_scalar: @@ -837,61 +814,6 @@ def get(self, row, col, default=None): "Indices should get a single element, which will be extracted as a Python scalar." ) - @classmethod - def from_values( - cls, - rows, - columns, - values, - dtype=None, - *, - nrows=None, - ncols=None, - dup_op=None, - name=None, - ): - """Create a new Matrix from row and column indices and values. - - .. deprecated:: 2022.11.0 - ``Matrix.from_values`` will be removed in a future release. - Use ``Matrix.from_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - rows : list or np.ndarray - Row indices. - columns : list or np.ndarray - Column indices. - values : list or np.ndarray or scalar - List of values. If a scalar is provided, all values will be set to this single value. - dtype : - Data type of the Matrix. If not provided, the values will be inspected - to choose an appropriate dtype. - nrows : int, optional - Number of rows in the Matrix. If not provided, ``nrows`` is computed - from the maximum row index found in ``rows``. - ncols : int, optional - Number of columns in the Matrix. If not provided, ``ncols`` is computed - from the maximum column index found in ``columns``. - dup_op : :class:`~graphblas.core.operator.BinaryOp`, optional - Function used to combine values if duplicate indices are found. - Leaving ``dup_op=None`` will raise an error if duplicates are found. - name : str, optional - Name to give the Matrix. - - Returns - ------- - Matrix - """ - warnings.warn( - "`Matrix.from_values(...)` is deprecated; please use `Matrix.from_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return cls.from_coo( - rows, columns, values, dtype, nrows=nrows, ncols=ncols, dup_op=dup_op, name=name - ) - @classmethod def from_coo( cls, @@ -939,6 +861,7 @@ def from_coo( Returns ------- Matrix + """ rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices") columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices") @@ -1018,6 +941,7 @@ def from_edgelist( Returns ------- Matrix + """ edgelist_values = None if isinstance(edgelist, np.ndarray): @@ -1038,7 +962,7 @@ def from_edgelist( rows = edgelist[:, 0] cols = edgelist[:, 1] else: - unzipped = list(zip(*edgelist)) + unzipped = list(zip(*edgelist, strict=True)) if len(unzipped) == 2: rows, cols = unzipped elif len(unzipped) == 3: @@ -1187,6 +1111,7 @@ def from_csr( to_csr Matrix.ss.import_csr io.from_scipy_sparse + """ return cls._from_csx(_CSR_FORMAT, indptr, col_indices, values, dtype, ncols, nrows, name) @@ -1234,6 +1159,7 @@ def from_csc( to_csc Matrix.ss.import_csc io.from_scipy_sparse + """ return cls._from_csx(_CSC_FORMAT, indptr, row_indices, values, dtype, nrows, ncols, name) @@ -1294,6 +1220,7 @@ def from_dcsr( to_dcsr Matrix.ss.import_hypercsr io.from_scipy_sparse + """ if backend == "suitesparse": return cls.ss.import_hypercsr( @@ -1378,6 +1305,7 @@ def from_dcsc( to_dcsc Matrix.ss.import_hypercsc io.from_scipy_sparse + """ if backend == "suitesparse": return cls.ss.import_hypercsc( @@ -1439,6 +1367,7 @@ def from_scalar(cls, value, nrows, ncols, dtype=None, *, name=None, **opts): Returns ------- Matrix + """ if type(value) is not Scalar: try: @@ -1492,6 +1421,7 @@ def from_dense(cls, values, missing_value=None, *, dtype=None, name=None, **opts Returns ------- Matrix + """ values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=2) if values.ndim == 0: @@ -1551,6 +1481,7 @@ def to_dense(self, fill_value=None, dtype=None, **opts): Returns ------- np.ndarray + """ max_nvals = self._nrows * self._ncols if fill_value is None or self._nvals == max_nvals: @@ -1626,6 +1557,7 @@ def from_dicts( Returns ------- Matrix + """ order = get_order(order) if isinstance(nested_dicts, Sequence): @@ -1735,6 +1667,7 @@ def to_csr(self, dtype=None, *, sort=True): from_csr Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("csr", sort=sort) @@ -1766,6 +1699,7 @@ def to_csc(self, dtype=None, *, sort=True): from_csc Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("csc", sort=sort) @@ -1800,6 +1734,7 @@ def to_dcsr(self, dtype=None, *, sort=True): from_dcsc Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("hypercsr", sort=sort) @@ -1842,6 +1777,7 @@ def to_dcsc(self, dtype=None, *, sort=True): from_dcsc Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("hypercsc", sort=sort) @@ -1879,6 +1815,7 @@ def to_dicts(self, order="rowwise"): Returns ------- dict + """ order = get_order(order) if order == "rowwise": @@ -1890,10 +1827,11 @@ def to_dicts(self, order="rowwise"): cols = cols.tolist() values = values.tolist() return { - row: dict(zip(cols[start:stop], values[start:stop])) + row: dict(zip(cols[start:stop], values[start:stop], strict=True)) for row, (start, stop) in zip( compressed_rows.tolist(), np.lib.stride_tricks.sliding_window_view(indptr, 2).tolist(), + strict=True, ) } # Alternative @@ -1948,6 +1886,7 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax C << monoid.max(A | B) + """ return self._ewise_add(other, op) @@ -2038,6 +1977,7 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax C << binary.gt(A & B) + """ return self._ewise_mult(other, op) @@ -2132,6 +2072,7 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax C << binary.div(A | B, left_default=1, right_default=1) + """ return self._ewise_union(other, op, left_default, right_default) @@ -2285,6 +2226,7 @@ def mxv(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(A @ v) + """ return self._mxv(other, op) @@ -2345,6 +2287,7 @@ def mxm(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(A @ B) + """ return self._mxm(other, op) @@ -2409,6 +2352,7 @@ def kronecker(self, other, op=binary.times): .. code-block:: python C << A.kronecker(B, op=binary.times) + """ method_name = "kronecker" other = self._expect_type( @@ -2465,6 +2409,7 @@ def apply(self, op, right=None, *, left=None): # Functional syntax C << op.abs(A) + """ method_name = "apply" extra_message = ( @@ -2613,6 +2558,7 @@ def select(self, op, thunk=None): # Functional syntax C << select.value(A >= 1) + """ method_name = "select" if isinstance(op, str): @@ -2707,6 +2653,7 @@ def reduce_rowwise(self, op=monoid.plus): .. code-block:: python w << A.reduce_rowwise(monoid.plus) + """ method_name = "reduce_rowwise" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -2744,6 +2691,7 @@ def reduce_columnwise(self, op=monoid.plus): .. code-block:: python w << A.reduce_columnwise(monoid.plus) + """ method_name = "reduce_columnwise" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -2762,8 +2710,7 @@ def reduce_columnwise(self, op=monoid.plus): ) def reduce_scalar(self, op=monoid.plus, *, allow_empty=True): - """ - Reduce all values in the Matrix into a single value using ``op``. + """Reduce all values in the Matrix into a single value using ``op``. See the `Reduce <../user_guide/operations.html#reduce>`__ section in the User Guide for more details. @@ -2785,6 +2732,7 @@ def reduce_scalar(self, op=monoid.plus, *, allow_empty=True): .. code-block:: python total << A.reduce_scalar(monoid.plus) + """ method_name = "reduce_scalar" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -2845,6 +2793,7 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None): .. code-block:: python C = A.reposition(1, 2).new() + """ if nrows is None: nrows = self._nrows @@ -2926,6 +2875,7 @@ def power(self, n, op=semiring.plus_times): C = A.dup() for i in range(1, 4): C << A @ C + """ method_name = "power" if self._nrows != self._ncols: @@ -2970,6 +2920,7 @@ def setdiag(self, values, k=0, *, mask=None, accum=None, **opts): If it is Matrix Mask, then only the diagonal is used as the mask. accum : Monoid or BinaryOp, optional Operator to use to combine existing diagonal values and new values. + """ if (K := maybe_integral(k)) is None: raise TypeError(f"k must be an integer; got bad type: {type(k)}") @@ -3633,6 +3584,7 @@ class MatrixExpression(BaseExpression): ndim = 2 output_type = Matrix _is_transposed = False + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__( @@ -3751,7 +3703,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense)) to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts)) to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist)) - to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values)) wait = wrapdoc(Matrix.wait)(property(automethods.wait)) # These raise exceptions __array__ = Matrix.__array__ @@ -3775,6 +3726,7 @@ class MatrixIndexExpr(AmbiguousAssignOrExtract): ndim = 2 output_type = Matrix _is_transposed = False + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__(self, parent, resolved_indexes, nrows, ncols): @@ -3852,7 +3804,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense)) to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts)) to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist)) - to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values)) wait = wrapdoc(Matrix.wait)(property(automethods.wait)) # These raise exceptions __array__ = Matrix.__array__ @@ -3876,6 +3827,7 @@ class TransposedMatrix: ndim = 2 _is_scalar = False _is_transposed = True + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__(self, matrix): @@ -3927,13 +3879,6 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True) ) return cols, rows, vals - @wrapdoc(Matrix.to_values) - def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=True): - rows, cols, vals = self._matrix.to_values( - dtype, rows=rows, columns=columns, values=values, sort=sort - ) - return cols, rows, vals - @wrapdoc(Matrix.diag) def diag(self, k=0, dtype=None, *, name=None, **opts): return self._matrix.diag(-k, dtype, name=name, **opts) diff --git a/graphblas/core/operator/base.py b/graphblas/core/operator/base.py index 59482b47d..97b2c9fbd 100644 --- a/graphblas/core/operator/base.py +++ b/graphblas/core/operator/base.py @@ -251,8 +251,7 @@ def __init__(self, parent, name, type_, return_type, gb_obj, gb_name, dtype2=Non def __repr__(self): classname = self.opclass.lower() - if classname.endswith("op"): - classname = classname[:-2] + classname = classname.removesuffix("op") dtype2 = "" if self._type2 is None else f", {self._type2.name}" return f"{classname}.{self.name}[{self.type.name}{dtype2}]" @@ -405,7 +404,8 @@ def _find(cls, funcname): @classmethod def _initialize(cls, include_in_ops=True): - """ + """Initialize operators for this operator type. + include_in_ops determines whether the operators are included in the ``gb.ops`` namespace in addition to the defined module. """ diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 278ee3183..3ee089fe4 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -663,6 +663,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- BinaryOp or ParameterizedBinaryOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -725,6 +726,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> return x == y or abs(x - y) <= max(rel_tol * max(abs(x), abs(y)), abs_tol) >>> return inner >>> gb.binary.register_new("user_isclose", user_isclose, parameterized=True) + """ cls._check_supports_udf("register_new") module, funcname = cls._remove_nesting(name) diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py index b6fc74e91..6fdacbcc1 100644 --- a/graphblas/core/operator/indexunary.py +++ b/graphblas/core/operator/indexunary.py @@ -285,6 +285,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- return IndexUnaryOp or ParameterizedIndexUnaryOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -340,6 +341,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> gb.indexunary.register_new("row_mod", lambda x, i, j, thunk: i % max(thunk, 2)) >>> dir(gb.indexunary) [..., 'row_mod', ...] + """ cls._check_supports_udf("register_new") module, funcname = cls._remove_nesting(name) diff --git a/graphblas/core/operator/monoid.py b/graphblas/core/operator/monoid.py index 21d2b7cac..e3f218a90 100644 --- a/graphblas/core/operator/monoid.py +++ b/graphblas/core/operator/monoid.py @@ -270,6 +270,7 @@ def register_anonymous(cls, binaryop, identity, name=None, *, is_idempotent=Fals Returns ------- Monoid or ParameterizedMonoid + """ if type(binaryop) is ParameterizedBinaryOp: return ParameterizedMonoid( @@ -309,6 +310,7 @@ def register_new(cls, name, binaryop, identity, *, is_idempotent=False, lazy=Fal >>> gb.core.operator.Monoid.register_new("max_zero", gb.binary.max_zero, 0) >>> dir(gb.monoid) [..., 'max_zero', ...] + """ module, funcname = cls._remove_nesting(name) if lazy: diff --git a/graphblas/core/operator/select.py b/graphblas/core/operator/select.py index 4dd65ef16..6de4fa89a 100644 --- a/graphblas/core/operator/select.py +++ b/graphblas/core/operator/select.py @@ -208,6 +208,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- SelectOp or ParameterizedSelectOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -264,6 +265,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> gb.select.register_new("upper_left_triangle", lambda x, i, j, thunk: i + j <= thunk) >>> dir(gb.select) [..., 'upper_left_triangle', ...] + """ cls._check_supports_udf("register_new") iop = IndexUnaryOp.register_new( diff --git a/graphblas/core/operator/semiring.py b/graphblas/core/operator/semiring.py index d367461f6..a8d18f1bf 100644 --- a/graphblas/core/operator/semiring.py +++ b/graphblas/core/operator/semiring.py @@ -287,6 +287,7 @@ def register_anonymous(cls, monoid, binaryop, name=None): Returns ------- Semiring or ParameterizedSemiring + """ if type(monoid) is ParameterizedMonoid or type(binaryop) is ParameterizedBinaryOp: return ParameterizedSemiring(name, monoid, binaryop, anonymous=True) @@ -318,6 +319,7 @@ def register_new(cls, name, monoid, binaryop, *, lazy=False): >>> gb.core.operator.Semiring.register_new("max_max", gb.monoid.max, gb.binary.max) >>> dir(gb.semiring) [..., 'max_max', ...] + """ module, funcname = cls._remove_nesting(name) if lazy: diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py index 7484f74d9..26e0ca61c 100644 --- a/graphblas/core/operator/unary.py +++ b/graphblas/core/operator/unary.py @@ -304,6 +304,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- UnaryOp or ParameterizedUnaryOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -349,6 +350,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> gb.core.operator.UnaryOp.register_new("plus_one", lambda x: x + 1) >>> dir(gb.unary) [..., 'plus_one', ...] + """ cls._check_supports_udf("register_new") module, funcname = cls._remove_nesting(name) diff --git a/graphblas/core/operator/utils.py b/graphblas/core/operator/utils.py index 543df793e..1442a9b5e 100644 --- a/graphblas/core/operator/utils.py +++ b/graphblas/core/operator/utils.py @@ -170,6 +170,7 @@ def get_semiring(monoid, binaryop, name=None): semiring.register_anonymous semiring.register_new semiring.from_string + """ monoid, opclass = find_opclass(monoid) switched = False diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index 9cdf3043e..25aef5743 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -53,6 +53,7 @@ class Scalar(BaseType): with a proper GrB_Scalar object. name : str, optional Name to give the Scalar. This will be displayed in the ``__repr__``. + """ __slots__ = "_empty", "_is_cscalar" @@ -164,7 +165,7 @@ def __index__(self): return self.__int__ raise AttributeError("Scalar object only has `__index__` for integral dtypes") - def __array__(self, dtype=None): + def __array__(self, dtype=None, *, copy=None): if dtype is None: dtype = self.dtype.np_type return np.array(self.value, dtype=dtype) @@ -196,6 +197,7 @@ def isequal(self, other, *, check_dtype=False): See Also -------- :meth:`isclose` : For equality check of floating point dtypes + """ if type(other) is not Scalar: if other is None: @@ -245,6 +247,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False): Returns ------- bool + """ if type(other) is not Scalar: if other is None: @@ -428,6 +431,7 @@ def dup(self, dtype=None, *, clear=False, is_cscalar=None, name=None): Returns ------- Scalar + """ if is_cscalar is None: is_cscalar = self._is_cscalar @@ -473,6 +477,7 @@ def wait(self, how="materialize"): Use wait to force completion of the Scalar. Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__. + """ how = how.lower() if how == "materialize": @@ -496,6 +501,7 @@ def get(self, default=None): Returns ------- Python scalar + """ return default if self._is_empty else self.value @@ -519,6 +525,7 @@ def from_value(cls, value, dtype=None, *, is_cscalar=False, name=None): Returns ------- Scalar + """ typ = output_type(value) if dtype is None: @@ -628,6 +635,7 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax c << monoid.max(a | b) + """ return self._ewise_add(other, op) @@ -698,6 +706,7 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax c << binary.gt(a & b) + """ return self._ewise_mult(other, op) @@ -772,6 +781,7 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax c << binary.div(a | b, left_default=1, right_default=1) + """ return self._ewise_union(other, op, left_default, right_default) @@ -917,6 +927,7 @@ def apply(self, op, right=None, *, left=None): # Functional syntax b << op.abs(a) + """ expr = self._as_vector().apply(op, right, left=left) return ScalarExpression( diff --git a/graphblas/core/ss/__init__.py b/graphblas/core/ss/__init__.py index c2e83ddcc..10a6fed94 100644 --- a/graphblas/core/ss/__init__.py +++ b/graphblas/core/ss/__init__.py @@ -1,3 +1,5 @@ import suitesparse_graphblas as _ssgb -_IS_SSGB7 = _ssgb.__version__.split(".", 1)[0] == "7" +(version_major, version_minor, version_bug) = map(int, _ssgb.__version__.split(".")[:3]) + +_IS_SSGB7 = version_major == 7 diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py index 6965aeaf1..d53608818 100644 --- a/graphblas/core/ss/binary.py +++ b/graphblas/core/ss/binary.py @@ -71,6 +71,7 @@ def register_new(name, jit_c_definition, left_type, right_type, ret_type): gb.binary.register_new gb.binary.register_anonymous gb.unary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py index 20cf318e8..70a7dd196 100644 --- a/graphblas/core/ss/config.py +++ b/graphblas/core/ss/config.py @@ -99,7 +99,7 @@ def __getitem__(self, key): return {reverse_bitwise[val]} rv = set() for k, v in self._bitwise[key].items(): - if isinstance(k, str) and val & v and bin(v).count("1") == 1: + if isinstance(k, str) and val & v and (v).bit_count() == 1: rv.add(k) return rv if is_bool: diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index 52c43b95d..781661b7b 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -157,6 +157,7 @@ def get_descriptor(**opts): Returns ------- Descriptor or None + """ if not opts or all(val is False or val is None for val in opts.values()): return diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py index d5f709526..b60837acf 100644 --- a/graphblas/core/ss/indexunary.py +++ b/graphblas/core/ss/indexunary.py @@ -70,6 +70,7 @@ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): gb.indexunary.register_new gb.indexunary.register_anonymous gb.select.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 64914cf02..509c56113 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -58,7 +58,7 @@ def head(matrix, n=10, dtype=None, *, sort=False): dtype = matrix.dtype else: dtype = lookup_dtype(dtype) - rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n)) + rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n), strict=True) return np.array(rows, np.uint64), np.array(cols, np.uint64), np.array(vals, dtype.np_type) @@ -250,8 +250,7 @@ def orientation(self): return "rowwise" def build_diag(self, vector, k=0, **opts): - """ - GxB_Matrix_diag. + """GxB_Matrix_diag. Construct a diagonal Matrix from the given vector. Existing entries in the Matrix are discarded. @@ -279,8 +278,7 @@ def build_diag(self, vector, k=0, **opts): ) def split(self, chunks, *, name=None, **opts): - """ - GxB_Matrix_split. + """GxB_Matrix_split. Split a Matrix into a 2D array of sub-matrices according to ``chunks``. @@ -302,6 +300,7 @@ def split(self, chunks, *, name=None, **opts): -------- Matrix.ss.concat graphblas.ss.concat + """ from ..matrix import Matrix @@ -361,8 +360,7 @@ def _concat(self, tiles, m, n, opts): ) def concat(self, tiles, **opts): - """ - GxB_Matrix_concat. + """GxB_Matrix_concat. Concatenate a 2D list of Matrix objects into the current Matrix. Any existing values in the current Matrix will be discarded. @@ -376,13 +374,13 @@ def concat(self, tiles, **opts): -------- Matrix.ss.split graphblas.ss.concat + """ tiles, m, n, is_matrix = _concat_mn(tiles, is_matrix=True) self._concat(tiles, m, n, opts) def build_scalar(self, rows, columns, value): - """ - GxB_Matrix_build_Scalar. + """GxB_Matrix_build_Scalar. Like ``build``, but uses a scalar for all the values. @@ -390,6 +388,7 @@ def build_scalar(self, rows, columns, value): -------- Matrix.build Matrix.from_coo + """ rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices") columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices") @@ -536,8 +535,7 @@ def iteritems(self, seek=0): lib.GxB_Iterator_free(it_ptr) def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts): - """ - GxB_Matrix_export_xxx. + """GxB_Matrix_export_xxx. Parameters ---------- @@ -718,6 +716,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** >>> pieces = A.ss.export() >>> A2 = Matrix.ss.import_any(**pieces) + """ return self._export( format, @@ -729,8 +728,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** ) def unpack(self, format=None, *, sort=False, raw=False, **opts): - """ - GxB_Matrix_unpack_xxx. + """GxB_Matrix_unpack_xxx. ``unpack`` is like ``export``, except that the Matrix remains valid but empty. ``pack_*`` methods are the opposite of ``unpack``. @@ -1179,8 +1177,7 @@ def import_csr( name=None, **opts, ): - """ - GxB_Matrix_import_CSR. + """GxB_Matrix_import_CSR. Create a new Matrix from standard CSR format. @@ -1220,6 +1217,7 @@ def import_csr( Returns ------- Matrix + """ return cls._import_csr( nrows=nrows, @@ -1256,8 +1254,7 @@ def pack_csr( name=None, **opts, ): - """ - GxB_Matrix_pack_CSR. + """GxB_Matrix_pack_CSR. ``pack_csr`` is like ``import_csr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csr")`` @@ -1369,8 +1366,7 @@ def import_csc( name=None, **opts, ): - """ - GxB_Matrix_import_CSC. + """GxB_Matrix_import_CSC. Create a new Matrix from standard CSC format. @@ -1410,6 +1406,7 @@ def import_csc( Returns ------- Matrix + """ return cls._import_csc( nrows=nrows, @@ -1446,8 +1443,7 @@ def pack_csc( name=None, **opts, ): - """ - GxB_Matrix_pack_CSC. + """GxB_Matrix_pack_CSC. ``pack_csc`` is like ``import_csc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csc")`` @@ -1561,8 +1557,7 @@ def import_hypercsr( name=None, **opts, ): - """ - GxB_Matrix_import_HyperCSR. + """GxB_Matrix_import_HyperCSR. Create a new Matrix from standard HyperCSR format. @@ -1606,6 +1601,7 @@ def import_hypercsr( Returns ------- Matrix + """ return cls._import_hypercsr( nrows=nrows, @@ -1646,8 +1642,7 @@ def pack_hypercsr( name=None, **opts, ): - """ - GxB_Matrix_pack_HyperCSR. + """GxB_Matrix_pack_HyperCSR. ``pack_hypercsr`` is like ``import_hypercsr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsr")`` @@ -1785,8 +1780,7 @@ def import_hypercsc( name=None, **opts, ): - """ - GxB_Matrix_import_HyperCSC. + """GxB_Matrix_import_HyperCSC. Create a new Matrix from standard HyperCSC format. @@ -1830,6 +1824,7 @@ def import_hypercsc( Returns ------- Matrix + """ return cls._import_hypercsc( nrows=nrows, @@ -1870,8 +1865,7 @@ def pack_hypercsc( name=None, **opts, ): - """ - GxB_Matrix_pack_HyperCSC. + """GxB_Matrix_pack_HyperCSC. ``pack_hypercsc`` is like ``import_hypercsc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsc")`` @@ -2006,8 +2000,7 @@ def import_bitmapr( name=None, **opts, ): - """ - GxB_Matrix_import_BitmapR. + """GxB_Matrix_import_BitmapR. Create a new Matrix from values and bitmap (as mask) arrays. @@ -2053,6 +2046,7 @@ def import_bitmapr( Returns ------- Matrix + """ return cls._import_bitmapr( bitmap=bitmap, @@ -2087,8 +2081,7 @@ def pack_bitmapr( name=None, **opts, ): - """ - GxB_Matrix_pack_BitmapR. + """GxB_Matrix_pack_BitmapR. ``pack_bitmapr`` is like ``import_bitmapr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapr")`` @@ -2199,8 +2192,7 @@ def import_bitmapc( name=None, **opts, ): - """ - GxB_Matrix_import_BitmapC. + """GxB_Matrix_import_BitmapC. Create a new Matrix from values and bitmap (as mask) arrays. @@ -2246,6 +2238,7 @@ def import_bitmapc( Returns ------- Matrix + """ return cls._import_bitmapc( bitmap=bitmap, @@ -2280,8 +2273,7 @@ def pack_bitmapc( name=None, **opts, ): - """ - GxB_Matrix_pack_BitmapC. + """GxB_Matrix_pack_BitmapC. ``pack_bitmapc`` is like ``import_bitmapc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapc")`` @@ -2390,8 +2382,7 @@ def import_fullr( name=None, **opts, ): - """ - GxB_Matrix_import_FullR. + """GxB_Matrix_import_FullR. Create a new Matrix from values. @@ -2432,6 +2423,7 @@ def import_fullr( Returns ------- Matrix + """ return cls._import_fullr( values=values, @@ -2462,8 +2454,7 @@ def pack_fullr( name=None, **opts, ): - """ - GxB_Matrix_pack_FullR. + """GxB_Matrix_pack_FullR. ``pack_fullr`` is like ``import_fullr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullr")`` @@ -2549,8 +2540,7 @@ def import_fullc( name=None, **opts, ): - """ - GxB_Matrix_import_FullC. + """GxB_Matrix_import_FullC. Create a new Matrix from values. @@ -2591,6 +2581,7 @@ def import_fullc( Returns ------- Matrix + """ return cls._import_fullc( values=values, @@ -2621,8 +2612,7 @@ def pack_fullc( name=None, **opts, ): - """ - GxB_Matrix_pack_FullC. + """GxB_Matrix_pack_FullC. ``pack_fullc`` is like ``import_fullc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullc")`` @@ -2711,8 +2701,7 @@ def import_coo( name=None, **opts, ): - """ - GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. + """GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. Create a new Matrix from indices and values in coordinate format. @@ -2746,6 +2735,7 @@ def import_coo( Returns ------- Matrix + """ return cls._import_coo( rows=rows, @@ -2784,8 +2774,7 @@ def pack_coo( name=None, **opts, ): - """ - GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. + """GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. ``pack_coo`` is like ``import_coo`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coo")`` @@ -2897,8 +2886,7 @@ def import_coor( name=None, **opts, ): - """ - GxB_Matrix_import_CSR. + """GxB_Matrix_import_CSR. Create a new Matrix from indices and values in coordinate format. Rows must be sorted. @@ -2942,6 +2930,7 @@ def import_coor( Returns ------- Matrix + """ return cls._import_coor( rows=rows, @@ -2980,8 +2969,7 @@ def pack_coor( name=None, **opts, ): - """ - GxB_Matrix_pack_CSR. + """GxB_Matrix_pack_CSR. ``pack_coor`` is like ``import_coor`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coor")`` @@ -3066,8 +3054,7 @@ def import_cooc( name=None, **opts, ): - """ - GxB_Matrix_import_CSC. + """GxB_Matrix_import_CSC. Create a new Matrix from indices and values in coordinate format. Rows must be sorted. @@ -3111,6 +3098,7 @@ def import_cooc( Returns ------- Matrix + """ return cls._import_cooc( rows=rows, @@ -3149,8 +3137,7 @@ def pack_cooc( name=None, **opts, ): - """ - GxB_Matrix_pack_CSC. + """GxB_Matrix_pack_CSC. ``pack_cooc`` is like ``import_cooc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("cooc")`` @@ -3251,8 +3238,7 @@ def import_any( nvals=None, # optional **opts, ): - """ - GxB_Matrix_import_xxx. + """GxB_Matrix_import_xxx. Dispatch to appropriate import method inferred from inputs. See the other import functions and ``Matrix.ss.export`` for details. @@ -3280,6 +3266,7 @@ def import_any( >>> pieces = A.ss.export() >>> A2 = Matrix.ss.import_any(**pieces) + """ return cls._import_any( values=values, @@ -3349,8 +3336,7 @@ def pack_any( name=None, **opts, ): - """ - GxB_Matrix_pack_xxx. + """GxB_Matrix_pack_xxx. ``pack_any`` is like ``import_any`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack()`` @@ -3664,8 +3650,10 @@ def _import_any( def unpack_hyperhash(self, *, compute=False, name=None, **opts): """Unpacks the hyper_hash of a hypersparse matrix if possible. - Will return None if the matrix is not hypersparse or if the hash is not computed. - Use ``compute=True`` to compute the hyper_hash if the input is hypersparse. + Will return None if the matrix is not hypersparse, if the hash is not computed, + or if the hash is not needed. Use ``compute=True`` to try to compute the hyper_hash + if the input is hypersparse. The hyper_hash is optional in SuiteSparse:GraphBLAS, + so it may not be computed even with ``compute=True``. Use ``pack_hyperhash`` to move a hyper_hash matrix that was previously unpacked back into a matrix. @@ -3707,6 +3695,7 @@ def scan(self, op=monoid.plus, order="rowwise", *, name=None, **opts): Returns ------- Matrix + """ order = get_order(order) parent = self._parent @@ -3735,6 +3724,7 @@ def flatten(self, order="rowwise", *, name=None, **opts): See Also -------- Vector.ss.reshape : copy a Vector to a Matrix. + """ rv = self.reshape(-1, 1, order=order, name=name, **opts) return rv._as_vector() @@ -3771,6 +3761,7 @@ def reshape(self, nrows, ncols=None, order="rowwise", *, inplace=False, name=Non -------- Matrix.ss.flatten : flatten a Matrix into a Vector. Vector.ss.reshape : copy a Vector to a Matrix. + """ from ..matrix import Matrix @@ -3825,6 +3816,7 @@ def selectk(self, how, k, order="rowwise", *, name=None): The number of elements to choose from each row **THIS API IS EXPERIMENTAL AND MAY CHANGE** + """ # TODO: largest, smallest, random_weighted order = get_order(order) @@ -4021,6 +4013,7 @@ def sort(self, op=binary.lt, order="rowwise", *, values=True, permutation=True, See Also -------- Matrix.ss.compactify + """ from ..matrix import Matrix @@ -4082,11 +4075,27 @@ def serialize(self, compression="default", level=None, **opts): This method is intended to support all serialization options from SuiteSparse:GraphBLAS. *Warning*: Behavior of serializing UDTs is experimental and may change in a future release. + """ desc = get_descriptor(compression=compression, compression_level=level, **opts) blob_handle = ffi_new("void**") blob_size_handle = ffi_new("GrB_Index*") parent = self._parent + if parent.dtype._is_udt and hasattr(lib, "GrB_Type_get_String"): + # Get the name from the dtype and set it to the name of the matrix so we can + # recreate the UDT. This is a bit hacky and we should restore the original name. + # First get the size of name. + dtype_size = ffi_new("size_t*") + status = lib.GrB_Type_get_SIZE(parent.dtype.gb_obj[0], dtype_size, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + status = lib.GrB_Type_get_String(parent.dtype.gb_obj[0], dtype_char, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then set the name + status = lib.GrB_Matrix_set_String(parent._carg, dtype_char, lib.GrB_NAME) + check_status_carg(status, "Matrix", parent._carg) + check_status( lib.GxB_Matrix_serialize( blob_handle, @@ -4121,14 +4130,15 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): nthreads : int, optional The maximum number of threads to use when deserializing. None, 0 or negative nthreads means to use the default number of threads. + """ if isinstance(data, np.ndarray): data = ints_to_numpy_buffer(data, np.uint8) else: data = np.frombuffer(data, np.uint8) data_obj = ffi.from_buffer("void*", data) - # Get the dtype name first if dtype is None: + # Get the dtype name first (for non-UDTs) cname = ffi_new(f"char[{lib.GxB_MAX_NAME_LEN}]") info = lib.GxB_deserialize_type_name( cname, @@ -4138,6 +4148,22 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): if info != lib.GrB_SUCCESS: raise _error_code_lookup[info]("Matrix deserialize failed to get the dtype name") dtype_name = b"".join(itertools.takewhile(b"\x00".__ne__, cname)).decode() + if not dtype_name and hasattr(lib, "GxB_Serialized_get_String"): + # Handle UDTs. First get the size of name + dtype_size = ffi_new("size_t*") + info = lib.GxB_Serialized_get_SIZE(data_obj, dtype_size, lib.GrB_NAME, data.nbytes) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]( + "Matrix deserialize failed to get the size of name" + ) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + info = lib.GxB_Serialized_get_String( + data_obj, dtype_char, lib.GrB_NAME, data.nbytes + ) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]("Matrix deserialize failed to get the name") + dtype_name = ffi.string(dtype_char).decode() dtype = _string_to_dtype(dtype_name) else: dtype = lookup_dtype(dtype) diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py index ff12f80fa..3ba135eee 100644 --- a/graphblas/core/ss/select.py +++ b/graphblas/core/ss/select.py @@ -66,6 +66,7 @@ def register_new(name, jit_c_definition, input_type, thunk_type): gb.select.register_new gb.select.register_anonymous gb.indexunary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index 5a5c63632..0b7ced3c8 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -63,6 +63,7 @@ def register_new(name, jit_c_definition, input_type, ret_type): gb.unary.register_new gb.unary.register_anonymous gb.binary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index a8bff4ee5..fdde7eb92 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -43,7 +43,7 @@ def head(vector, n=10, dtype=None, *, sort=False): dtype = vector.dtype else: dtype = lookup_dtype(dtype) - indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n)) + indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n), strict=True) return np.array(indices, np.uint64), np.array(vals, dtype.np_type) @@ -145,8 +145,7 @@ def format(self): return format def build_diag(self, matrix, k=0, **opts): - """ - GxB_Vector_diag. + """GxB_Vector_diag. Extract a diagonal from a Matrix or TransposedMatrix into a Vector. Existing entries in the Vector are discarded. @@ -183,8 +182,7 @@ def build_diag(self, matrix, k=0, **opts): ) def split(self, chunks, *, name=None, **opts): - """ - GxB_Matrix_split. + """GxB_Matrix_split. Split a Vector into a 1D array of sub-vectors according to ``chunks``. @@ -202,6 +200,7 @@ def split(self, chunks, *, name=None, **opts): -------- Vector.ss.concat graphblas.ss.concat + """ from ..vector import Vector @@ -249,8 +248,7 @@ def _concat(self, tiles, m, opts): ) def concat(self, tiles, **opts): - """ - GxB_Matrix_concat. + """GxB_Matrix_concat. Concatenate a 1D list of Vector objects into the current Vector. Any existing values in the current Vector will be discarded. @@ -262,13 +260,13 @@ def concat(self, tiles, **opts): -------- Vector.ss.split graphblas.ss.concat + """ tiles, m, n, is_matrix = _concat_mn(tiles, is_matrix=False) self._concat(tiles, m, opts) def build_scalar(self, indices, value): - """ - GxB_Vector_build_Scalar. + """GxB_Vector_build_Scalar. Like ``build``, but uses a scalar for all the values. @@ -276,6 +274,7 @@ def build_scalar(self, indices, value): -------- Vector.build Vector.from_coo + """ indices = ints_to_numpy_buffer(indices, np.uint64, name="indices") scalar = _as_scalar(value, self._parent.dtype, is_cscalar=False) # pragma: is_grbscalar @@ -410,8 +409,7 @@ def iteritems(self, seek=0): lib.GxB_Iterator_free(it_ptr) def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts): - """ - GxB_Vextor_export_xxx. + """GxB_Vextor_export_xxx. Parameters ---------- @@ -468,6 +466,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** >>> pieces = v.ss.export() >>> v2 = Vector.ss.import_any(**pieces) + """ return self._export( format=format, @@ -479,8 +478,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** ) def unpack(self, format=None, *, sort=False, raw=False, **opts): - """ - GxB_Vector_unpack_xxx. + """GxB_Vector_unpack_xxx. ``unpack`` is like ``export``, except that the Vector remains valid but empty. ``pack_*`` methods are the opposite of ``unpack``. @@ -655,8 +653,7 @@ def import_any( nvals=None, # optional **opts, ): - """ - GxB_Vector_import_xxx. + """GxB_Vector_import_xxx. Dispatch to appropriate import method inferred from inputs. See the other import functions and ``Vector.ss.export`` for details. @@ -679,6 +676,7 @@ def import_any( >>> pieces = v.ss.export() >>> v2 = Vector.ss.import_any(**pieces) + """ return cls._import_any( values=values, @@ -722,8 +720,7 @@ def pack_any( name=None, **opts, ): - """ - GxB_Vector_pack_xxx. + """GxB_Vector_pack_xxx. ``pack_any`` is like ``import_any`` except it "packs" data into an existing Vector. This is the opposite of ``unpack()`` @@ -844,8 +841,7 @@ def import_sparse( name=None, **opts, ): - """ - GxB_Vector_import_CSC. + """GxB_Vector_import_CSC. Create a new Vector from sparse input. @@ -886,6 +882,7 @@ def import_sparse( Returns ------- Vector + """ return cls._import_sparse( size=size, @@ -920,8 +917,7 @@ def pack_sparse( name=None, **opts, ): - """ - GxB_Vector_pack_CSC. + """GxB_Vector_pack_CSC. ``pack_sparse`` is like ``import_sparse`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("sparse")`` @@ -1029,8 +1025,7 @@ def import_bitmap( name=None, **opts, ): - """ - GxB_Vector_import_Bitmap. + """GxB_Vector_import_Bitmap. Create a new Vector from values and bitmap (as mask) arrays. @@ -1071,6 +1066,7 @@ def import_bitmap( Returns ------- Vector + """ return cls._import_bitmap( bitmap=bitmap, @@ -1103,8 +1099,7 @@ def pack_bitmap( name=None, **opts, ): - """ - GxB_Vector_pack_Bitmap. + """GxB_Vector_pack_Bitmap. ``pack_bitmap`` is like ``import_bitmap`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("bitmap")`` @@ -1214,8 +1209,7 @@ def import_full( name=None, **opts, ): - """ - GxB_Vector_import_Full. + """GxB_Vector_import_Full. Create a new Vector from values. @@ -1252,6 +1246,7 @@ def import_full( Returns ------- Vector + """ return cls._import_full( values=values, @@ -1280,8 +1275,7 @@ def pack_full( name=None, **opts, ): - """ - GxB_Vector_pack_Full. + """GxB_Vector_pack_Full. ``pack_full`` is like ``import_full`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("full")`` @@ -1371,6 +1365,7 @@ def scan(self, op=monoid.plus, *, name=None, **opts): Returns ------- Scalar + """ return prefix_scan(self._parent, op, name=name, within="scan", **opts) @@ -1401,6 +1396,7 @@ def reshape(self, nrows, ncols=None, order="rowwise", *, name=None, **opts): See Also -------- Matrix.ss.flatten : flatten a Matrix into a Vector. + """ return self._parent._as_matrix().ss.reshape(nrows, ncols, order, name=name, **opts) @@ -1420,6 +1416,7 @@ def selectk(self, how, k, *, name=None): The number of elements to choose **THIS API IS EXPERIMENTAL AND MAY CHANGE** + """ how = how.lower() if k < 0: @@ -1588,6 +1585,7 @@ def sort(self, op=binary.lt, *, values=True, permutation=True, **opts): See Also -------- Vector.ss.compactify + """ from ..vector import Vector @@ -1648,11 +1646,27 @@ def serialize(self, compression="default", level=None, **opts): This method is intended to support all serialization options from SuiteSparse:GraphBLAS. *Warning*: Behavior of serializing UDTs is experimental and may change in a future release. + """ desc = get_descriptor(compression=compression, compression_level=level, **opts) blob_handle = ffi_new("void**") blob_size_handle = ffi_new("GrB_Index*") parent = self._parent + if parent.dtype._is_udt and hasattr(lib, "GrB_Type_get_String"): + # Get the name from the dtype and set it to the name of the vector so we can + # recreate the UDT. This is a bit hacky and we should restore the original name. + # First get the size of name. + dtype_size = ffi_new("size_t*") + status = lib.GrB_Type_get_SIZE(parent.dtype.gb_obj[0], dtype_size, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + status = lib.GrB_Type_get_String(parent.dtype.gb_obj[0], dtype_char, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then set the name + status = lib.GrB_Vector_set_String(parent._carg, dtype_char, lib.GrB_NAME) + check_status_carg(status, "Vector", parent._carg) + check_status( lib.GxB_Vector_serialize( blob_handle, @@ -1687,6 +1701,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): nthreads : int, optional The maximum number of threads to use when deserializing. None, 0 or negative nthreads means to use the default number of threads. + """ if isinstance(data, np.ndarray): data = ints_to_numpy_buffer(data, np.uint8) @@ -1694,7 +1709,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): data = np.frombuffer(data, np.uint8) data_obj = ffi.from_buffer("void*", data) if dtype is None: - # Get the dtype name first + # Get the dtype name first (for non-UDTs) cname = ffi_new(f"char[{lib.GxB_MAX_NAME_LEN}]") info = lib.GxB_deserialize_type_name( cname, @@ -1704,6 +1719,22 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): if info != lib.GrB_SUCCESS: raise _error_code_lookup[info]("Vector deserialize failed to get the dtype name") dtype_name = b"".join(itertools.takewhile(b"\x00".__ne__, cname)).decode() + if not dtype_name and hasattr(lib, "GxB_Serialized_get_String"): + # Handle UDTs. First get the size of name + dtype_size = ffi_new("size_t*") + info = lib.GxB_Serialized_get_SIZE(data_obj, dtype_size, lib.GrB_NAME, data.nbytes) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]( + "Vector deserialize failed to get the size of name" + ) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + info = lib.GxB_Serialized_get_String( + data_obj, dtype_char, lib.GrB_NAME, data.nbytes + ) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]("Vector deserialize failed to get the name") + dtype_name = ffi.string(dtype_char).decode() dtype = _string_to_dtype(dtype_name) else: dtype = lookup_dtype(dtype) diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 42fcf0685..e9a29b3a9 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -5,6 +5,8 @@ from ..dtypes import _INDEX, lookup_dtype from . import ffi, lib +_NP2 = np.__version__.startswith("2.") + def libget(name): """Helper to get items from GraphBLAS which might be GrB or GxB.""" @@ -43,7 +45,7 @@ def inner(func_wo_doc): object: object, type: type, } -_output_types.update((k, k) for k in np.cast) +_output_types.update((k, k) for k in set(np.sctypeDict.values())) def output_type(val): @@ -60,7 +62,8 @@ def ints_to_numpy_buffer(array, dtype, *, name="array", copy=False, ownable=Fals and not np.issubdtype(array.dtype, np.bool_) ): raise ValueError(f"{name} must be integers, not {array.dtype.name}") - array = np.array(array, dtype, copy=copy, order=order) + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + array = np.array(array, dtype, copy=copy or _NP2 and None, order=order) if ownable and (not array.flags.owndata or not array.flags.writeable): array = array.copy(order) return array @@ -86,13 +89,18 @@ def values_to_numpy_buffer( ------- np.ndarray dtype + """ if dtype is not None: dtype = lookup_dtype(dtype) - array = np.array(array, _get_subdtype(dtype.np_type), copy=copy, order=order) + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + array = np.array( + array, _get_subdtype(dtype.np_type), copy=copy or _NP2 and None, order=order + ) else: is_input_np = isinstance(array, np.ndarray) - array = np.array(array, copy=copy, order=order) + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + array = np.array(array, copy=copy or _NP2 and None, order=order) if array.dtype.hasobject: raise ValueError("object dtype for values is not allowed") if not is_input_np and array.dtype == np.int32: # pragma: no cover @@ -183,6 +191,7 @@ def normalize_chunks(chunks, shape): [(10,), (5, 15)] >>> normalize_chunks((5, (5, None)), shape) [(5, 5), (5, 15)] + """ if isinstance(chunks, (list, tuple)): pass @@ -200,7 +209,7 @@ def normalize_chunks(chunks, shape): f"chunks argument must be of length {len(shape)} (one for each dimension of a {typ})" ) chunksizes = [] - for size, chunk in zip(shape, chunks): + for size, chunk in zip(shape, chunks, strict=True): if chunk is None: cur_chunks = [size] elif (c := maybe_integral(chunk)) is not None: @@ -310,7 +319,10 @@ def __init__(self, array=None, dtype=_INDEX, *, size=None, name=None): if size is not None: self.array = np.empty(size, dtype=dtype.np_type) else: - self.array = np.array(array, dtype=_get_subdtype(dtype.np_type), copy=False, order="C") + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + self.array = np.array( + array, dtype=_get_subdtype(dtype.np_type), copy=_NP2 and None, order="C" + ) c_type = dtype.c_type if dtype._is_udt else f"{dtype.c_type}*" self._carg = ffi.cast(c_type, ffi.from_buffer(self.array)) self.dtype = dtype diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index feb95ed02..8bac4198e 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -1,5 +1,4 @@ import itertools -import warnings import numpy as np @@ -150,6 +149,7 @@ class Vector(BaseType): Size of the Vector. name : str, optional Name to give the Vector. This will be displayed in the ``__repr__``. + """ __slots__ = "_size", "_parent", "ss" @@ -266,6 +266,7 @@ def __delitem__(self, keys, **opts): Examples -------- >>> del v[1:-1] + """ del Updater(self, opts=opts)[keys] @@ -280,6 +281,7 @@ def __getitem__(self, keys): .. code-block:: python sub_v = v[[1, 3, 5]].new() + """ resolved_indexes = IndexerResolver(self, keys) shape = resolved_indexes.shape @@ -299,6 +301,7 @@ def __setitem__(self, keys, expr, **opts): # This makes a dense iso-value vector v[:] = 1 + """ Updater(self, opts=opts)[keys] = expr @@ -311,6 +314,7 @@ def __contains__(self, index): # Check if v[15] is non-empty 15 in v + """ extractor = self[index] if not extractor._is_scalar: @@ -350,6 +354,7 @@ def isequal(self, other, *, check_dtype=False, **opts): See Also -------- :meth:`isclose` : For equality check of floating point dtypes + """ other = self._expect_type(other, Vector, within="isequal", argname="other") if check_dtype and self.dtype != other.dtype: @@ -392,6 +397,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False, **opts Returns ------- bool + """ other = self._expect_type(other, Vector, within="isclose", argname="other") if check_dtype and self.dtype != other.dtype: @@ -456,36 +462,6 @@ def resize(self, size): call("GrB_Vector_resize", [self, size]) self._size = size.value - def to_values(self, dtype=None, *, indices=True, values=True, sort=True): - """Extract the indices and values as a 2-tuple of numpy arrays. - - .. deprecated:: 2022.11.0 - ``Vector.to_values`` will be removed in a future release. - Use ``Vector.to_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - dtype : - Requested dtype for the output values array. - indices :bool, default=True - Whether to return indices; will return ``None`` for indices if ``False`` - values : bool, default=True - Whether to return values; will return ``None`` for values if ``False`` - sort : bool, default=True - Whether to require sorted indices. - - Returns - ------- - np.ndarray[dtype=uint64] : Indices - np.ndarray : Values - """ - warnings.warn( - "`Vector.to_values(...)` is deprecated; please use `Vector.to_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.to_coo(dtype, indices=indices, values=values, sort=sort) - def to_coo(self, dtype=None, *, indices=True, values=True, sort=True): """Extract the indices and values as a 2-tuple of numpy arrays. @@ -510,6 +486,7 @@ def to_coo(self, dtype=None, *, indices=True, values=True, sort=True): ------- np.ndarray[dtype=uint64] : Indices np.ndarray : Values + """ if sort and backend == "suitesparse": self.wait() # sort in SS @@ -609,6 +586,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): Returns ------- Vector + """ if dtype is not None or mask is not None or clear: if dtype is None: @@ -639,6 +617,7 @@ def diag(self, k=0, *, name=None): Returns ------- :class:`~graphblas.Matrix` + """ from .matrix import Matrix @@ -663,6 +642,7 @@ def wait(self, how="materialize"): Use wait to force completion of the Vector. Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__. + """ how = how.lower() if how == "materialize": @@ -687,6 +667,7 @@ def get(self, index, default=None): Returns ------- Python scalar + """ expr = self[index] if expr._is_scalar: @@ -697,43 +678,6 @@ def get(self, index, default=None): "A single index should be given, and the result will be a Python scalar." ) - @classmethod - def from_values(cls, indices, values, dtype=None, *, size=None, dup_op=None, name=None): - """Create a new Vector from indices and values. - - .. deprecated:: 2022.11.0 - ``Vector.from_values`` will be removed in a future release. - Use ``Vector.from_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - indices : list or np.ndarray - Vector indices. - values : list or np.ndarray or scalar - List of values. If a scalar is provided, all values will be set to this single value. - dtype : - Data type of the Vector. If not provided, the values will be inspected - to choose an appropriate dtype. - size : int, optional - Size of the Vector. If not provided, ``size`` is computed from - the maximum index found in ``indices``. - dup_op : BinaryOp, optional - Function used to combine values if duplicate indices are found. - Leaving ``dup_op=None`` will raise an error if duplicates are found. - name : str, optional - Name to give the Vector. - - Returns - ------- - Vector - """ - warnings.warn( - "`Vector.from_values(...)` is deprecated; please use `Vector.from_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return cls.from_coo(indices, values, dtype, size=size, dup_op=dup_op, name=name) - @classmethod def from_coo(cls, indices, values=1.0, dtype=None, *, size=None, dup_op=None, name=None): """Create a new Vector from indices and values. @@ -766,6 +710,7 @@ def from_coo(cls, indices, values=1.0, dtype=None, *, size=None, dup_op=None, na Returns ------- Vector + """ indices = ints_to_numpy_buffer(indices, np.uint64, name="indices") values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=1) @@ -823,10 +768,11 @@ def from_pairs(cls, pairs, dtype=None, *, size=None, dup_op=None, name=None): Returns ------- Vector + """ if isinstance(pairs, np.ndarray): raise TypeError("pairs as NumPy array is not supported; use `Vector.from_coo` instead") - unzipped = list(zip(*pairs)) + unzipped = list(zip(*pairs, strict=True)) if len(unzipped) == 2: indices, values = unzipped elif not unzipped: @@ -874,6 +820,7 @@ def from_scalar(cls, value, size, dtype=None, *, name=None, **opts): Returns ------- Vector + """ if type(value) is not Scalar: try: @@ -926,6 +873,7 @@ def from_dense(cls, values, missing_value=None, *, dtype=None, name=None, **opts Returns ------- Vector + """ values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=1) if values.ndim == 0: @@ -974,6 +922,7 @@ def to_dense(self, fill_value=None, dtype=None, **opts): Returns ------- np.ndarray + """ if fill_value is None or self._nvals == self._size: if self._nvals != self._size: @@ -1044,6 +993,7 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax w << monoid.max(u | v) + """ return self._ewise_add(other, op) @@ -1135,6 +1085,7 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax w << binary.gt(u & v) + """ return self._ewise_mult(other, op) @@ -1228,6 +1179,7 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax w << binary.div(u | v, left_default=1, right_default=1) + """ return self._ewise_union(other, op, left_default, right_default) @@ -1382,6 +1334,7 @@ def vxm(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(v @ A) + """ return self._vxm(other, op) @@ -1461,6 +1414,7 @@ def apply(self, op, right=None, *, left=None): # Functional syntax w << op.abs(v) + """ method_name = "apply" extra_message = ( @@ -1606,6 +1560,7 @@ def select(self, op, thunk=None): # Functional syntax w << select.value(v >= 1) + """ method_name = "select" if isinstance(op, str): @@ -1700,6 +1655,7 @@ def reduce(self, op=monoid.plus, *, allow_empty=True): .. code-block:: python total << v.reduce(monoid.plus) + """ method_name = "reduce" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -1752,6 +1708,7 @@ def inner(self, other, op=semiring.plus_times): *Note*: This is not a standard GraphBLAS function, but fits with other functions in the `Matrix Multiplication <../user_guide/operations.html#matrix-multiply>`__ family of functions. + """ return self._inner(other, op) @@ -1807,6 +1764,7 @@ def outer(self, other, op=binary.times): C << v.outer(w, op=binary.times) *Note*: This is not a standard GraphBLAS function. + """ from .matrix import MatrixExpression @@ -1855,6 +1813,7 @@ def reposition(self, offset, *, size=None): .. code-block:: python w = v.reposition(20).new() + """ if size is None: size = self._size @@ -2115,6 +2074,7 @@ def from_dict(cls, d, dtype=None, *, size=None, name=None): Returns ------- Vector + """ indices = np.fromiter(d.keys(), np.uint64) if dtype is None: @@ -2142,9 +2102,10 @@ def to_dict(self): Returns ------- dict + """ indices, values = self.to_coo(sort=False) - return dict(zip(indices.tolist(), values.tolist())) + return dict(zip(indices.tolist(), values.tolist(), strict=True)) if backend == "suitesparse": @@ -2271,7 +2232,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo)) to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense)) to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict)) - to_values = wrapdoc(Vector.to_values)(property(automethods.to_values)) vxm = wrapdoc(Vector.vxm)(property(automethods.vxm)) wait = wrapdoc(Vector.wait)(property(automethods.wait)) # These raise exceptions @@ -2359,7 +2319,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo)) to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense)) to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict)) - to_values = wrapdoc(Vector.to_values)(property(automethods.to_values)) vxm = wrapdoc(Vector.vxm)(property(automethods.vxm)) wait = wrapdoc(Vector.wait)(property(automethods.wait)) # These raise exceptions diff --git a/graphblas/exceptions.py b/graphblas/exceptions.py index e7f3b3a83..05cac988a 100644 --- a/graphblas/exceptions.py +++ b/graphblas/exceptions.py @@ -1,4 +1,3 @@ -from . import backend as _backend from .core import ffi as _ffi from .core import lib as _lib from .core.utils import _Pointer @@ -85,9 +84,14 @@ class NotImplementedException(GraphblasException): """ +# SuiteSparse errors +class JitError(GraphblasException): + """SuiteSparse:GraphBLAS error using JIT.""" + + # Our errors class UdfParseError(GraphblasException): - """Unable to parse the user-defined function.""" + """SuiteSparse:GraphBLAS unable to parse the user-defined function.""" _error_code_lookup = { @@ -112,8 +116,12 @@ class UdfParseError(GraphblasException): } GrB_SUCCESS = _lib.GrB_SUCCESS GrB_NO_VALUE = _lib.GrB_NO_VALUE -if _backend == "suitesparse": + +# SuiteSparse-specific errors +if hasattr(_lib, "GxB_EXHAUSTED"): _error_code_lookup[_lib.GxB_EXHAUSTED] = StopIteration +if hasattr(_lib, "GxB_JIT_ERROR"): # Added in 9.4 + _error_code_lookup[_lib.GxB_JIT_ERROR] = JitError def check_status(response_code, args): diff --git a/graphblas/io/__init__.py b/graphblas/io/__init__.py index b21b20963..a1b71db40 100644 --- a/graphblas/io/__init__.py +++ b/graphblas/io/__init__.py @@ -1,6 +1,5 @@ from ._awkward import from_awkward, to_awkward from ._matrixmarket import mmread, mmwrite from ._networkx import from_networkx, to_networkx -from ._numpy import from_numpy, to_numpy # deprecated from ._scipy import from_scipy_sparse, to_scipy_sparse from ._sparse import from_pydata_sparse, to_pydata_sparse diff --git a/graphblas/io/_awkward.py b/graphblas/io/_awkward.py index 6c476817f..b30984251 100644 --- a/graphblas/io/_awkward.py +++ b/graphblas/io/_awkward.py @@ -154,6 +154,7 @@ def from_awkward(A, *, name=None): function. If attempting to convert an arbitrary `awkward-array`, make sure that the top-level attributes and parameters contain the expected values. + """ params = A.layout.parameters if missing := {"format", "shape"} - params.keys(): diff --git a/graphblas/io/_matrixmarket.py b/graphblas/io/_matrixmarket.py index 558605328..8cf8738a3 100644 --- a/graphblas/io/_matrixmarket.py +++ b/graphblas/io/_matrixmarket.py @@ -32,6 +32,7 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs): Returns ------- :class:`~graphblas.Matrix` + """ try: # scipy is currently needed for *all* engines @@ -95,6 +96,7 @@ def mmwrite( Number of digits to write for real or complex values symmetry : str, optional {"general", "symmetric", "skew-symmetric", "hermetian"} + """ try: # scipy is currently needed for *all* engines diff --git a/graphblas/io/_networkx.py b/graphblas/io/_networkx.py index 2324a11c2..8cf84e576 100644 --- a/graphblas/io/_networkx.py +++ b/graphblas/io/_networkx.py @@ -21,6 +21,7 @@ def from_networkx(G, nodelist=None, dtype=None, weight="weight", name=None): Returns ------- :class:`~graphblas.Matrix` + """ import networkx as nx @@ -45,6 +46,7 @@ def to_networkx(m, edge_attribute="weight"): Returns ------- nx.DiGraph + """ import networkx as nx @@ -53,7 +55,9 @@ def to_networkx(m, edge_attribute="weight"): cols = cols.tolist() G = nx.DiGraph() if edge_attribute is None: - G.add_edges_from(zip(rows, cols)) + G.add_edges_from(zip(rows, cols, strict=True)) else: - G.add_weighted_edges_from(zip(rows, cols, vals.tolist()), weight=edge_attribute) + G.add_weighted_edges_from( + zip(rows, cols, vals.tolist(), strict=True), weight=edge_attribute + ) return G diff --git a/graphblas/io/_numpy.py b/graphblas/io/_numpy.py deleted file mode 100644 index 954d28df7..000000000 --- a/graphblas/io/_numpy.py +++ /dev/null @@ -1,104 +0,0 @@ -from warnings import warn - -from ..core.utils import output_type -from ..core.vector import Vector -from ..dtypes import lookup_dtype -from ..exceptions import GraphblasException -from ._scipy import from_scipy_sparse, to_scipy_sparse - - -def from_numpy(m): # pragma: no cover (deprecated) - """Create a sparse Vector or Matrix from a dense numpy array. - - .. deprecated:: 2023.2.0 - ``from_numpy`` will be removed in a future release. - Use ``Vector.from_dense`` or ``Matrix.from_dense`` instead. - Will be removed in version 2023.10.0 or later - - A value of 0 is considered as "missing". - - - m.ndim == 1 returns a ``Vector`` - - m.ndim == 2 returns a ``Matrix`` - - m.ndim > 2 raises an error - - dtype is inferred from m.dtype - - Parameters - ---------- - m : np.ndarray - Input array - - See Also - -------- - Matrix.from_dense - Vector.from_dense - from_scipy_sparse - - Returns - ------- - Vector or Matrix - """ - warn( - "`graphblas.io.from_numpy` is deprecated; " - "use `Matrix.from_dense` and `Vector.from_dense` instead.", - DeprecationWarning, - stacklevel=2, - ) - if m.ndim > 2: - raise GraphblasException("m.ndim must be <= 2") - - try: - from scipy.sparse import coo_array, csr_array - except ImportError: # pragma: no cover (import) - raise ImportError("scipy is required to import from numpy") from None - - if m.ndim == 1: - A = csr_array(m) - _, size = A.shape - dtype = lookup_dtype(m.dtype) - return Vector.from_coo(A.indices, A.data, size=size, dtype=dtype) - A = coo_array(m) - return from_scipy_sparse(A) - - -def to_numpy(m): # pragma: no cover (deprecated) - """Create a dense numpy array from a sparse Vector or Matrix. - - .. deprecated:: 2023.2.0 - ``to_numpy`` will be removed in a future release. - Use ``Vector.to_dense`` or ``Matrix.to_dense`` instead. - Will be removed in version 2023.10.0 or later - - Missing values will become 0 in the output. - - numpy dtype will match the GraphBLAS dtype - - Parameters - ---------- - m : Vector or Matrix - GraphBLAS Vector or Matrix - - See Also - -------- - to_scipy_sparse - Matrix.to_dense - Vector.to_dense - - Returns - ------- - np.ndarray - """ - warn( - "`graphblas.io.to_numpy` is deprecated; " - "use `Matrix.to_dense` and `Vector.to_dense` instead.", - DeprecationWarning, - stacklevel=2, - ) - try: - import scipy # noqa: F401 - except ImportError: # pragma: no cover (import) - raise ImportError("scipy is required to export to numpy") from None - if output_type(m) is Vector: - return to_scipy_sparse(m).toarray()[0] - sparse = to_scipy_sparse(m, "coo") - return sparse.toarray() diff --git a/graphblas/io/_scipy.py b/graphblas/io/_scipy.py index 1eaa691dd..228432eed 100644 --- a/graphblas/io/_scipy.py +++ b/graphblas/io/_scipy.py @@ -22,6 +22,7 @@ def from_scipy_sparse(A, *, dup_op=None, name=None): Returns ------- :class:`~graphblas.Matrix` + """ nrows, ncols = A.shape dtype = lookup_dtype(A.dtype) diff --git a/graphblas/io/_sparse.py b/graphblas/io/_sparse.py index 2bbdad2e6..c0d4beabb 100644 --- a/graphblas/io/_sparse.py +++ b/graphblas/io/_sparse.py @@ -23,6 +23,7 @@ def from_pydata_sparse(s, *, dup_op=None, name=None): ------- :class:`~graphblas.Vector` :class:`~graphblas.Matrix` + """ try: import sparse diff --git a/graphblas/monoid/numpy.py b/graphblas/monoid/numpy.py index 5f6895e5d..b9ff2b502 100644 --- a/graphblas/monoid/numpy.py +++ b/graphblas/monoid/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import numpy as _np from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/select/__init__.py b/graphblas/select/__init__.py index aaf8e12d0..b55766ff8 100644 --- a/graphblas/select/__init__.py +++ b/graphblas/select/__init__.py @@ -88,9 +88,7 @@ def _match_expr(parent, expr): def value(expr): - """ - An advanced select method which allows for easily expressing - value comparison logic. + """An advanced select method for easily expressing value comparison logic. Example usage: >>> gb.select.value(A > 0) @@ -102,9 +100,7 @@ def value(expr): def row(expr): - """ - An advanced select method which allows for easily expressing - Matrix row index comparison logic. + """An advanced select method for easily expressing Matrix row index comparison logic. Example usage: >>> gb.select.row(A <= 5) @@ -116,9 +112,7 @@ def row(expr): def column(expr): - """ - An advanced select method which allows for easily expressing - Matrix column index comparison logic. + """An advanced select method for easily expressing Matrix column index comparison logic. Example usage: >>> gb.select.column(A <= 5) @@ -130,8 +124,7 @@ def column(expr): def index(expr): - """ - An advanced select method which allows for easily expressing + """An advanced select method which allows for easily expressing Vector index comparison logic. Example usage: diff --git a/graphblas/semiring/numpy.py b/graphblas/semiring/numpy.py index 97b90874b..10a680ea0 100644 --- a/graphblas/semiring/numpy.py +++ b/graphblas/semiring/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import itertools as _itertools from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index 29a67e08b..b42ea72b4 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -22,8 +22,7 @@ class _graphblas_ss: def diag(x, k=0, dtype=None, *, name=None, **opts): - """ - GxB_Matrix_diag, GxB_Vector_diag. + """GxB_Matrix_diag, GxB_Vector_diag. Extract a diagonal Vector from a Matrix, or construct a diagonal Matrix from a Vector. Unlike ``Matrix.diag`` and ``Vector.diag``, this function @@ -71,8 +70,7 @@ def diag(x, k=0, dtype=None, *, name=None, **opts): def concat(tiles, dtype=None, *, name=None, **opts): - """ - GxB_Matrix_concat. + """GxB_Matrix_concat. Concatenate a 2D list of Matrix objects into a new Matrix, or a 1D list of Vector objects into a new Vector. To concatenate into existing objects, diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index ce9e6488f..964325e0d 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -3,6 +3,7 @@ import functools import itertools import platform +import sys from pathlib import Path import numpy as np @@ -68,9 +69,11 @@ def save_records(): for key in dir(gb.semiring) if key != "ss" and isinstance( - getattr(gb.semiring, key) - if key not in gb.semiring._deprecated - else gb.semiring._deprecated[key], + ( + getattr(gb.semiring, key) + if key not in gb.semiring._deprecated + else gb.semiring._deprecated[key] + ), (gb.core.operator.Semiring, gb.core.operator.ParameterizedSemiring), ) ) @@ -79,9 +82,11 @@ def save_records(): for key in dir(gb.binary) if key != "ss" and isinstance( - getattr(gb.binary, key) - if key not in gb.binary._deprecated - else gb.binary._deprecated[key], + ( + getattr(gb.binary, key) + if key not in gb.binary._deprecated + else gb.binary._deprecated[key] + ), (gb.core.operator.BinaryOp, gb.core.operator.ParameterizedBinaryOp), ) ) @@ -152,3 +157,10 @@ def compute(x): def shouldhave(module, opname): """Whether an "operator" module should have the given operator.""" return supports_udfs or hasattr(module, opname) + + +def dprint(*args, **kwargs): # pragma: no cover (debug) + """Print to stderr for debugging purposes.""" + kwargs["file"] = sys.stderr + kwargs["flush"] = True + print(*args, **kwargs) diff --git a/graphblas/tests/test_descriptor.py b/graphblas/tests/test_descriptor.py index 9209a8055..6ec9df36a 100644 --- a/graphblas/tests/test_descriptor.py +++ b/graphblas/tests/test_descriptor.py @@ -2,8 +2,7 @@ def test_caching(): - """ - Test that building a descriptor is actually caching rather than building + """Test that building a descriptor is actually caching rather than building a new object for each call. """ tocr = descriptor.lookup( diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index 5797dda10..ecbca707f 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -9,6 +9,7 @@ import graphblas as gb from graphblas import core, dtypes from graphblas.core import lib +from graphblas.core.utils import _NP2 from graphblas.dtypes import lookup_dtype suitesparse = gb.backend == "suitesparse" @@ -224,6 +225,13 @@ def test_record_dtype_from_dict(): def test_dtype_to_from_string(): types = [dtypes.BOOL, dtypes.FP64] for c in string.ascii_letters: + if c == "T": + # See NEP 55 about StringDtype "T". Notably, this doesn't work: + # >>> np.dtype(np.dtype("T").str) + continue + if _NP2 and c == "a": + # Data type alias 'a' was deprecated in NumPy 2.0. Use the 'S' alias instead. + continue try: dtype = np.dtype(c) types.append(dtype) @@ -241,7 +249,7 @@ def test_dtype_to_from_string(): def test_has_complex(): - """Only SuiteSparse has complex (with Windows support in Python after v7.4.3.1)""" + """Only SuiteSparse has complex (with Windows support in Python after v7.4.3.1).""" if not suitesparse: assert not dtypes._supports_complex return diff --git a/graphblas/tests/test_infix.py b/graphblas/tests/test_infix.py index e688086b9..601f282a7 100644 --- a/graphblas/tests/test_infix.py +++ b/graphblas/tests/test_infix.py @@ -346,7 +346,7 @@ def test_inplace_infix(s1, v1, v2, A1, A2): @autocompute def test_infix_expr_value_types(): - """Test bug where `infix_expr._value` was used as MatrixExpression or Matrix""" + """Test bug where `infix_expr._value` was used as MatrixExpression or Matrix.""" from graphblas.core.matrix import MatrixExpression A = Matrix(int, 3, 3) diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index 6ad92a950..7e786f0da 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -38,17 +38,6 @@ suitesparse = gb.backend == "suitesparse" -@pytest.mark.skipif("not ss") -def test_deprecated(): - a = np.array([0.0, 2.0, 4.1]) - with pytest.warns(DeprecationWarning): - v = gb.io.from_numpy(a) - assert v.isequal(gb.Vector.from_coo([1, 2], [2.0, 4.1]), check_dtype=True) - with pytest.warns(DeprecationWarning): - a2 = gb.io.to_numpy(v) - np.testing.assert_array_equal(a, a2) - - @pytest.mark.skipif("not ss") def test_vector_to_from_numpy(): a = np.array([0.0, 2.0, 4.1]) @@ -157,7 +146,7 @@ def test_matrix_to_from_networkx(): M = gb.io.from_networkx(G, nodelist=range(7)) if suitesparse: assert M.ss.is_iso - rows, cols = zip(*edges) + rows, cols = zip(*edges, strict=True) expected = gb.Matrix.from_coo(rows, cols, 1) assert expected.isequal(M) # Test empty diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 233fc9a9b..24f0e73d7 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -2603,12 +2603,14 @@ def test_iter(A): zip( [3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1], [0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6], + strict=True, ) ) assert set(A.T) == set( zip( [0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6], [3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1], + strict=True, ) ) @@ -2731,8 +2733,8 @@ def test_ss_split(A): for results in [A.ss.split([4, 3]), A.ss.split([[4, None], 3], name="split")]: row_boundaries = [0, 4, 7] col_boundaries = [0, 3, 6, 7] - for i, (i1, i2) in enumerate(zip(row_boundaries[:-1], row_boundaries[1:])): - for j, (j1, j2) in enumerate(zip(col_boundaries[:-1], col_boundaries[1:])): + for i, (i1, i2) in enumerate(itertools.pairwise(row_boundaries)): + for j, (j1, j2) in enumerate(itertools.pairwise(col_boundaries)): expected = A[i1:i2, j1:j2].new() assert expected.isequal(results[i][j]) with pytest.raises(DimensionMismatch): @@ -2952,7 +2954,6 @@ def test_expr_is_like_matrix(A): "from_dicts", "from_edgelist", "from_scalar", - "from_values", "resize", "setdiag", "update", @@ -3018,7 +3019,6 @@ def test_index_expr_is_like_matrix(A): "from_dicts", "from_edgelist", "from_scalar", - "from_values", "resize", "setdiag", } @@ -3070,7 +3070,7 @@ def test_ss_flatten(A): [3, 2, 3, 1, 5, 3, 7, 8, 3, 1, 7, 4], ] # row-wise - indices = [row * A.ncols + col for row, col in zip(data[0], data[1])] + indices = [row * A.ncols + col for row, col in zip(data[0], data[1], strict=True)] expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols) for fmt in ["csr", "hypercsr", "bitmapr"]: B = Matrix.ss.import_any(**A.ss.export(format=fmt)) @@ -3089,7 +3089,7 @@ def test_ss_flatten(A): assert C.isequal(B) # column-wise - indices = [col * A.nrows + row for row, col in zip(data[0], data[1])] + indices = [col * A.nrows + row for row, col in zip(data[0], data[1], strict=True)] expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols) for fmt in ["csc", "hypercsc", "bitmapc"]: B = Matrix.ss.import_any(**A.ss.export(format=fmt)) @@ -3557,15 +3557,6 @@ def compare(A, expected, isequal=True, **kwargs): A.ss.compactify("bad_how") -def test_deprecated(A): - with pytest.warns(DeprecationWarning): - A.to_values() - with pytest.warns(DeprecationWarning): - A.T.to_values() - with pytest.warns(DeprecationWarning): - A.from_values([1], [2], [3]) - - def test_ndim(A): assert A.ndim == 2 assert A.ewise_mult(A).ndim == 2 @@ -3637,9 +3628,9 @@ def test_ss_iteration(A): assert not list(B.ss.itervalues()) assert not list(B.ss.iteritems()) rows, columns, values = A.to_coo() - assert sorted(zip(rows, columns)) == sorted(A.ss.iterkeys()) + assert sorted(zip(rows, columns, strict=True)) == sorted(A.ss.iterkeys()) assert sorted(values) == sorted(A.ss.itervalues()) - assert sorted(zip(rows, columns, values)) == sorted(A.ss.iteritems()) + assert sorted(zip(rows, columns, values, strict=True)) == sorted(A.ss.iteritems()) N = rows.size A = Matrix.ss.import_bitmapr(**A.ss.export("bitmapr")) @@ -4083,10 +4074,11 @@ def test_ss_pack_hyperhash(A): Y = C.ss.unpack_hyperhash() Y = C.ss.unpack_hyperhash(compute=True) assert C.ss.unpack_hyperhash() is None - assert Y.nrows == C.nrows - C.ss.pack_hyperhash(Y) - assert Y.gb_obj[0] == gb.core.NULL - assert C.ss.unpack_hyperhash() is not None + if Y is not None: # hyperhash may or may not be computed + assert Y.nrows == C.nrows + C.ss.pack_hyperhash(Y) + assert Y.gb_obj[0] == gb.core.NULL + assert C.ss.unpack_hyperhash() is not None # May or may not be computed def test_to_dicts_from_dicts(A): diff --git a/graphblas/tests/test_numpyops.py b/graphblas/tests/test_numpyops.py index 25c52d7fd..999c6d5e0 100644 --- a/graphblas/tests/test_numpyops.py +++ b/graphblas/tests/test_numpyops.py @@ -5,6 +5,7 @@ import numpy as np import pytest +from packaging.version import parse import graphblas as gb import graphblas.binary.numpy as npbinary @@ -112,6 +113,15 @@ def test_npunary(): match(accum=gb.binary.lor) << gb_result.apply(npunary.isnan) compare = match.reduce(gb.monoid.land).new() if not compare: # pragma: no cover (debug) + import numba + + if ( + unary_name in {"sign"} + and np.__version__.startswith("2.") + and parse(numba.__version__) < parse("0.61.0") + ): + # numba <0.61.0 does not match numpy 2.0 + continue print(unary_name, gb_input.dtype) print(compute(gb_result)) print(np_result) diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py index c7d1ce97c..41fae80ae 100644 --- a/graphblas/tests/test_op.py +++ b/graphblas/tests/test_op.py @@ -1450,8 +1450,6 @@ def test_deprecated(): gb.op.secondj with pytest.warns(DeprecationWarning, match="please use"): gb.agg.argmin - with pytest.warns(DeprecationWarning, match="please use"): - import graphblas.core.agg # noqa: F401 @pytest.mark.slow diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py index 3c7bffa9a..e93511914 100644 --- a/graphblas/tests/test_scalar.py +++ b/graphblas/tests/test_scalar.py @@ -50,7 +50,7 @@ def test_dup(s): s_empty = Scalar(dtypes.FP64) s_unempty = Scalar.from_value(0.0) if s_empty.is_cscalar: - # NumPy wraps around + # NumPy <2 wraps around; >=2 raises OverflowError uint_data = [ ("UINT8", 2**8 - 2), ("UINT16", 2**16 - 2), @@ -73,6 +73,10 @@ def test_dup(s): ("FP32", -2.5), *uint_data, ]: + if dtype.startswith("UINT") and s_empty.is_cscalar and not np.__version__.startswith("1."): + with pytest.raises(OverflowError, match="out of bounds for uint"): + s4.dup(dtype=dtype, name="s5") + continue s5 = s4.dup(dtype=dtype, name="s5") assert s5.dtype == dtype assert s5.value == val diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 3c974c50d..4cea0b563 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -1,6 +1,8 @@ import os import pathlib +import platform import sys +import sysconfig import numpy as np import pytest @@ -26,11 +28,48 @@ @pytest.fixture(scope="module", autouse=True) def _setup_jit(): + """Set up the SuiteSparse:GraphBLAS JIT.""" + if _IS_SSGB7: + # SuiteSparse JIT was added in SSGB 8 + yield + return + + if not os.environ.get("GITHUB_ACTIONS"): + # Try to run the tests with defaults from sysconfig if not running in CI + prev = gb.ss.config["jit_c_control"] + cc = sysconfig.get_config_var("CC") + cflags = sysconfig.get_config_var("CFLAGS") + include = sysconfig.get_path("include") + libs = sysconfig.get_config_var("LIBS") + if not (cc is None or cflags is None or include is None or libs is None): + gb.ss.config["jit_c_control"] = "on" + gb.ss.config["jit_c_compiler_name"] = cc + gb.ss.config["jit_c_compiler_flags"] = f"{cflags} -I{include}" + gb.ss.config["jit_c_libraries"] = libs + else: + # Should we skip or try to run if sysconfig vars aren't set? + gb.ss.config["jit_c_control"] = "on" # "off" + try: + yield + finally: + gb.ss.config["jit_c_control"] = prev + return + + if ( + sys.platform == "darwin" + or sys.platform == "linux" + and "conda" not in gb.ss.config["jit_c_compiler_name"] + ): + # XXX TODO: tests for SuiteSparse JIT are not passing on linux when using wheels or on osx + # This should be understood and fixed! + gb.ss.config["jit_c_control"] = "off" + yield + return + # Configuration values below were obtained from the output of the JIT config # in CI, but with paths changed to use `{conda_prefix}` where appropriate. - if "CONDA_PREFIX" not in os.environ or _IS_SSGB7: - return conda_prefix = os.environ["CONDA_PREFIX"] + prev = gb.ss.config["jit_c_control"] gb.ss.config["jit_c_control"] = "on" if sys.platform == "linux": gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/x86_64-conda-linux-gnu-cc" @@ -59,7 +98,7 @@ def _setup_jit(): gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " - "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64" + f"-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch {platform.machine()}" ) gb.ss.config["jit_c_linker_flags"] = ( "-Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs " @@ -72,6 +111,7 @@ def _setup_jit(): # This probably means we're testing a `python-suitesparse-graphblas` wheel # in a conda environment. This is not yet working. gb.ss.config["jit_c_control"] = "off" + yield return gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" @@ -86,6 +126,12 @@ def _setup_jit(): if not pathlib.Path(gb.ss.config["jit_c_compiler_name"]).exists(): # Can't use the JIT if we don't have a compiler! gb.ss.config["jit_c_control"] = "off" + yield + return + try: + yield + finally: + gb.ss.config["jit_c_control"] = prev @pytest.fixture diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index 8a2cd0824..db80cdf71 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -29,6 +29,8 @@ suitesparse = backend == "suitesparse" +if suitesparse: + ss_version_major = gb.core.ss.version_major @pytest.fixture @@ -1675,7 +1677,6 @@ def test_expr_is_like_vector(v): "from_dict", "from_pairs", "from_scalar", - "from_values", "resize", "update", } @@ -1725,7 +1726,6 @@ def test_index_expr_is_like_vector(v): "from_dict", "from_pairs", "from_scalar", - "from_values", "resize", } ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_inner", "_vxm"} @@ -2012,13 +2012,6 @@ def test_ss_split(v): assert x2.name == "split_1" -def test_deprecated(v): - with pytest.warns(DeprecationWarning): - v.to_values() - with pytest.warns(DeprecationWarning): - Vector.from_values([1], [2]) - - def test_ndim(A, v): assert v.ndim == 1 assert v.ewise_mult(v).ndim == 1 @@ -2214,7 +2207,10 @@ def test_udt(): long_dtype = np.dtype([("x", np.bool_), ("y" * 1000, np.float64)], align=True) if suitesparse: - with pytest.warns(UserWarning, match="too large"): + if ss_version_major < 9: + with pytest.warns(UserWarning, match="too large"): + long_udt = dtypes.register_anonymous(long_dtype) + else: long_udt = dtypes.register_anonymous(long_dtype) else: # UDTs don't currently have a name in vanilla GraphBLAS @@ -2225,13 +2221,19 @@ def test_udt(): if suitesparse: vv = Vector.ss.deserialize(v.ss.serialize(), dtype=long_udt) assert v.isequal(vv, check_dtype=True) - with pytest.raises(SyntaxError): - # The size of the UDT name is limited + if ss_version_major < 9: + with pytest.raises(SyntaxError): + # The size of the UDT name is limited + Vector.ss.deserialize(v.ss.serialize()) + else: Vector.ss.deserialize(v.ss.serialize()) # May be able to look up non-anonymous dtypes by name if their names are too long named_long_dtype = np.dtype([("x", np.bool_), ("y" * 1000, np.float64)], align=False) if suitesparse: - with pytest.warns(UserWarning, match="too large"): + if ss_version_major < 9: + with pytest.warns(UserWarning, match="too large"): + named_long_udt = dtypes.register_new("LongUDT", named_long_dtype) + else: named_long_udt = dtypes.register_new("LongUDT", named_long_dtype) else: named_long_udt = dtypes.register_new("LongUDT", named_long_dtype) @@ -2279,7 +2281,7 @@ def test_ss_iteration(v): # This is what I would expect assert sorted(indices) == sorted(v.ss.iterkeys()) assert sorted(values) == sorted(v.ss.itervalues()) - assert sorted(zip(indices, values)) == sorted(v.ss.iteritems()) + assert sorted(zip(indices, values, strict=True)) == sorted(v.ss.iteritems()) N = indices.size v = Vector.ss.import_bitmap(**v.ss.export("bitmap")) diff --git a/graphblas/unary/numpy.py b/graphblas/unary/numpy.py index 9b742d8bc..0c36565ec 100644 --- a/graphblas/unary/numpy.py +++ b/graphblas/unary/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import numpy as _np from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/viz.py b/graphblas/viz.py index f0367e119..b6d5f6ba7 100644 --- a/graphblas/viz.py +++ b/graphblas/viz.py @@ -79,6 +79,7 @@ def spy(M, *, centered=False, show=True, figure=None, axes=None, figsize=None, * See Also -------- datashade + """ mpl, plt, ss = _get_imports(["mpl", "plt", "ss"], "spy") A = to_scipy_sparse(M, "coo") @@ -129,6 +130,7 @@ def datashade(M, agg="count", *, width=None, height=None, opts_kwargs=None, **kw See Also -------- spy + """ np, pd, bk, hv, hp, ds = _get_imports(["np", "pd", "bk", "hv", "hp", "ds"], "datashade") if "df" not in kwargs: diff --git a/pyproject.toml b/pyproject.toml index 04ef28645..1bad95118 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,70 +1,68 @@ [build-system] build-backend = "setuptools.build_meta" -requires = [ - "setuptools >=64", - "setuptools-git-versioning", -] +requires = ["setuptools >=64", "setuptools-git-versioning"] [project] name = "python-graphblas" dynamic = ["version"] description = "Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics" readme = "README.md" -requires-python = ">=3.9" -license = {file = "LICENSE"} +requires-python = ">=3.10" +license = { file = "LICENSE" } authors = [ - {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, - {name = "Jim Kitchen"}, - {name = "Python-graphblas contributors"}, + { name = "Erik Welch", email = "erik.n.welch@gmail.com" }, + { name = "Jim Kitchen" }, + { name = "Python-graphblas contributors" }, ] maintainers = [ - {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, - {name = "Jim Kitchen", email = "jim22k@gmail.com"}, - {name = "Sultan Orazbayev", email = "contact@econpoint.com"}, + { name = "Erik Welch", email = "erik.n.welch@gmail.com" }, + { name = "Jim Kitchen", email = "jim22k@gmail.com" }, + { name = "Sultan Orazbayev", email = "contact@econpoint.com" }, ] keywords = [ - "graphblas", - "graph", - "sparse", - "matrix", - "lagraph", - "suitesparse", - "Networks", - "Graph Theory", - "Mathematics", - "network", - "discrete mathematics", - "math", + "graphblas", + "graph", + "sparse", + "matrix", + "lagraph", + "suitesparse", + "Networks", + "Graph Theory", + "Mathematics", + "network", + "discrete mathematics", + "math", ] classifiers = [ - "Development Status :: 5 - Production/Stable", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: POSIX :: Linux", - "Operating System :: Microsoft :: Windows", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3 :: Only", - "Intended Audience :: Developers", - "Intended Audience :: Other Audience", - "Intended Audience :: Science/Research", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Information Analysis", - "Topic :: Scientific/Engineering :: Mathematics", - "Topic :: Software Development :: Libraries :: Python Modules", + "Development Status :: 5 - Production/Stable", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3 :: Only", + "Intended Audience :: Developers", + "Intended Audience :: Other Audience", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ - "numpy >=1.21", - "donfig >=0.6", - "pyyaml >=5.4", - # These won't be installed by default after 2024.3.0 - # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >=7.4.0.0, <9", - "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported + "numpy >=1.23", + "donfig >=0.6", + "pyyaml >=5.4", + # These won't be installed by default after 2024.3.0 + # once pep-771 is supported: https://peps.python.org/pep-0771/ + # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead + "suitesparse-graphblas >=7.4.0.0, <10", + "numba >=0.55; python_version<'3.14'", # make optional where numba is not supported ] [project.urls] @@ -74,56 +72,41 @@ repository = "https://github.com/python-graphblas/python-graphblas" changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] -suitesparse = [ - "suitesparse-graphblas >=7.4.0.0, <9", -] -networkx = [ - "networkx >=2.8", -] -numba = [ - "numba >=0.55", -] -pandas = [ - "pandas >=1.2", -] -scipy = [ - "scipy >=1.9", -] -suitesparse-udf = [ # udf requires numba - "python-graphblas[suitesparse,numba]", -] -repr = [ - "python-graphblas[pandas]", +suitesparse = ["suitesparse-graphblas >=7.4.0.0, <10"] +networkx = ["networkx >=2.8"] +numba = ["numba >=0.55"] +pandas = ["pandas >=1.5"] +scipy = ["scipy >=1.9"] +suitesparse-udf = [ # udf requires numba + "python-graphblas[suitesparse,numba]", ] +repr = ["python-graphblas[pandas]"] io = [ - "python-graphblas[networkx,scipy]", - "python-graphblas[numba]; python_version<'3.12'", - "awkward >=1.9", - "sparse >=0.13; python_version<'3.12'", # make optional, b/c sparse needs numba - "fast-matrix-market >=1.4.5", + "python-graphblas[networkx,scipy]", + "python-graphblas[numba]; python_version<'3.14'", + "awkward >=2.0", + "sparse >=0.14; python_version<'3.13'", # make optional, b/c sparse needs numba + "fast-matrix-market >=1.4.5; python_version<'3.13'", # py3.13 not supported yet ] -viz = [ - "python-graphblas[networkx,scipy]", - "matplotlib >=3.5", -] -datashade = [ # datashade requires numba - "python-graphblas[numba,pandas,scipy]", - "datashader >=0.12", - "hvplot >=0.7", +viz = ["python-graphblas[networkx,scipy]", "matplotlib >=3.6"] +datashade = [ # datashade requires numba + "python-graphblas[numba,pandas,scipy]", + "datashader >=0.14", + "hvplot >=0.8", ] test = [ - "python-graphblas[suitesparse,pandas,scipy]", - "packaging >=21", - "pytest >=6.2", - "tomli >=1", + "python-graphblas[suitesparse,pandas,scipy]", + "packaging >=21", + "pytest >=6.2", + "tomli >=1", ] default = [ - "python-graphblas[suitesparse,pandas,scipy]", - "python-graphblas[numba]; python_version<'3.12'", # make optional where numba is not supported + "python-graphblas[suitesparse,pandas,scipy]", + "python-graphblas[numba]; python_version<'3.14'", # make optional where numba is not supported ] all = [ - "python-graphblas[default,io,viz,test]", - "python-graphblas[datashade]; python_version<'3.12'", # make optional, b/c datashade needs numba + "python-graphblas[default,io,viz,test]", + "python-graphblas[datashade]; python_version<'3.14'", # make optional, b/c datashade needs numba ] [tool.setuptools] @@ -132,22 +115,22 @@ all = [ # $ find graphblas/ -name __init__.py -print | sort | sed -e 's/\/__init__.py//g' -e 's/\//./g' # $ python -c 'import tomli ; [print(x) for x in sorted(tomli.load(open("pyproject.toml", "rb"))["tool"]["setuptools"]["packages"])]' packages = [ - "graphblas", - "graphblas.agg", - "graphblas.binary", - "graphblas.core", - "graphblas.core.operator", - "graphblas.core.ss", - "graphblas.dtypes", - "graphblas.indexunary", - "graphblas.io", - "graphblas.monoid", - "graphblas.op", - "graphblas.semiring", - "graphblas.select", - "graphblas.ss", - "graphblas.tests", - "graphblas.unary", + "graphblas", + "graphblas.agg", + "graphblas.binary", + "graphblas.core", + "graphblas.core.operator", + "graphblas.core.ss", + "graphblas.dtypes", + "graphblas.indexunary", + "graphblas.io", + "graphblas.monoid", + "graphblas.op", + "graphblas.semiring", + "graphblas.select", + "graphblas.ss", + "graphblas.tests", + "graphblas.unary", ] [tool.setuptools-git-versioning] @@ -157,7 +140,7 @@ dirty_template = "{tag}+{ccount}.g{sha}.dirty" [tool.black] line-length = 100 -target-version = ["py39", "py310", "py311", "py312"] +target-version = ["py310", "py311", "py312", "py313"] [tool.isort] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] @@ -171,53 +154,54 @@ line_length = 100 [tool.pytest.ini_options] minversion = "6.0" testpaths = "graphblas/tests" -xfail_strict = false # 2023-07-23: awkward and numpy 1.25 sometimes conflict +xfail_strict = false # 2023-07-23: awkward and numpy 1.25 sometimes conflict addopts = [ - "--strict-config", # Force error if config is mispelled - "--strict-markers", # Force error if marker is mispelled (must be defined in config) - "-ra", # Print summary of all fails/errors -] -markers = [ - "slow: Skipped unless --runslow passed", + "--strict-config", # Force error if config is mispelled + "--strict-markers", # Force error if marker is mispelled (must be defined in config) + "-ra", # Print summary of all fails/errors ] +markers = ["slow: Skipped unless --runslow passed"] log_cli_level = "info" filterwarnings = [ - # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters - # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings - "error", + # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters + # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings + "error", - # sparse 0.14.0 (2022-02-24) began raising this warning; it has been reported and fixed upstream. - "ignore:coords should be an ndarray. This will raise a ValueError:DeprecationWarning:sparse._coo.core", + # sparse 0.14.0 (2022-02-24) began raising this warning; it has been reported and fixed upstream. + "ignore:coords should be an ndarray. This will raise a ValueError:DeprecationWarning:sparse._coo.core", - # setuptools v67.3.0 deprecated `pkg_resources.declare_namespace` on 13 Feb 2023. See: - # https://setuptools.pypa.io/en/latest/history.html#v67-3-0 - # MAINT: check if this is still necessary in 2025 - "ignore:Deprecated call to `pkg_resources.declare_namespace:DeprecationWarning:pkg_resources", + # setuptools v67.3.0 deprecated `pkg_resources.declare_namespace` on 13 Feb 2023. See: + # https://setuptools.pypa.io/en/latest/history.html#v67-3-0 + # MAINT: check if this is still necessary in 2025 + "ignore:Deprecated call to `pkg_resources.declare_namespace:DeprecationWarning:pkg_resources", - # This deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See: - # https://setuptools.pypa.io/en/latest/history.html#v67-5-0 - "ignore:pkg_resources is deprecated as an API:DeprecationWarning:", + # This deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See: + # https://setuptools.pypa.io/en/latest/history.html#v67-5-0 + "ignore:pkg_resources is deprecated as an API:DeprecationWarning:", - # sre_parse deprecated in 3.11; this is triggered by awkward 0.10 - "ignore:module 'sre_parse' is deprecated:DeprecationWarning:", - "ignore:module 'sre_constants' is deprecated:DeprecationWarning:", + # sre_parse deprecated in 3.11; this is triggered by awkward 0.10 + "ignore:module 'sre_parse' is deprecated:DeprecationWarning:", + "ignore:module 'sre_constants' is deprecated:DeprecationWarning:", - # numpy 1.25.0 (2023-06-17) deprecated `np.find_common_type`; many other dependencies use it. - # See if we can remove this filter in 2025. - "ignore:np.find_common_type is deprecated:DeprecationWarning:", + # numpy 1.25.0 (2023-06-17) deprecated `np.find_common_type`; many other dependencies use it. + # See if we can remove this filter in 2025. + "ignore:np.find_common_type is deprecated:DeprecationWarning:", - # pypy gives this warning - "ignore:can't resolve package from __spec__ or __package__:ImportWarning:", + # pypy gives this warning + "ignore:can't resolve package from __spec__ or __package__:ImportWarning:", - # Python 3.12 introduced this deprecation, which is triggered by pandas 2.1.1 - "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:dateutil", + # Python 3.12 introduced this deprecation, which is triggered by pandas 2.1.1 + "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:dateutil", + + # Pandas 2.2 warns that pyarrow will become a required dependency in pandas 3.0 + "ignore:\\nPyarrow will become a required dependency of pandas:DeprecationWarning:", ] [tool.coverage.run] branch = true source = ["graphblas"] omit = [ - "graphblas/viz.py", # TODO: test and get coverage for viz.py + "graphblas/viz.py", # TODO: test and get coverage for viz.py ] [tool.coverage.report] @@ -227,9 +211,9 @@ fail_under = 0 skip_covered = true skip_empty = true exclude_lines = [ - "pragma: no cover", - "raise AssertionError", - "raise NotImplementedError", + "pragma: no cover", + "raise AssertionError", + "raise NotImplementedError", ] [tool.codespell] @@ -238,242 +222,278 @@ ignore-words-list = "coo,ba" [tool.ruff] # https://github.com/charliermarsh/ruff/ line-length = 100 -target-version = "py39" +target-version = "py310" + +[tool.ruff.format] +exclude = ["*.ipynb"] # Consider enabling auto-formatting of notebooks + +[tool.ruff.lint] +exclude = ["*.ipynb"] # Consider enabling auto-formatting of notebooks unfixable = [ - "F841" # unused-variable (Note: can leave useless expression) + "F841", # unused-variable (Note: can leave useless expression) + "B905", # zip-without-explicit-strict (Note: prefer `zip(x, y, strict=True)`) ] select = [ - # Have we enabled too many checks that they'll become a nuisance? We'll see... - "F", # pyflakes - "E", # pycodestyle Error - "W", # pycodestyle Warning - # "C90", # mccabe (Too strict, but maybe we should make things less complex) - # "I", # isort (Should we replace `isort` with this?) - "N", # pep8-naming - "D", # pydocstyle - "UP", # pyupgrade - "YTT", # flake8-2020 - # "ANN", # flake8-annotations (We don't use annotations yet) - "S", # bandit - # "BLE", # flake8-blind-except (Maybe consider) - # "FBT", # flake8-boolean-trap (Why?) - "B", # flake8-bugbear - "A", # flake8-builtins - "COM", # flake8-commas - "C4", # flake8-comprehensions - "DTZ", # flake8-datetimez - "T10", # flake8-debugger - # "DJ", # flake8-django (We don't use django) - # "EM", # flake8-errmsg (Perhaps nicer, but too much work) - "EXE", # flake8-executable - "ISC", # flake8-implicit-str-concat - # "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) - "G", # flake8-logging-format - "INP", # flake8-no-pep420 - "PIE", # flake8-pie - "T20", # flake8-print - # "PYI", # flake8-pyi (We don't have stub files yet) - "PT", # flake8-pytest-style - "Q", # flake8-quotes - "RSE", # flake8-raise - "RET", # flake8-return - # "SLF", # flake8-self (We can use our own private variables--sheesh!) - "SIM", # flake8-simplify - # "TID", # flake8-tidy-imports (Rely on isort and our own judgement) - # "TCH", # flake8-type-checking (Note: figure out type checking later) - # "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) - "PTH", # flake8-use-pathlib (Often better, but not always) - # "ERA", # eradicate (We like code in comments!) - # "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) - "PGH", # pygrep-hooks - "PL", # pylint - "PLC", # pylint Convention - "PLE", # pylint Error - "PLR", # pylint Refactor - "PLW", # pylint Warning - "TRY", # tryceratops - "NPY", # NumPy-specific rules - "RUF", # ruff-specific rules - "ALL", # Try new categories by default (making the above list unnecessary) + # Have we enabled too many checks that they'll become a nuisance? We'll see... + "F", # pyflakes + "E", # pycodestyle Error + "W", # pycodestyle Warning + # "C90", # mccabe (Too strict, but maybe we should make things less complex) + # "I", # isort (Should we replace `isort` with this?) + "N", # pep8-naming + "D", # pydocstyle + "UP", # pyupgrade + "YTT", # flake8-2020 + # "ANN", # flake8-annotations (We don't use annotations yet) + "S", # bandit + # "BLE", # flake8-blind-except (Maybe consider) + # "FBT", # flake8-boolean-trap (Why?) + "B", # flake8-bugbear + "A", # flake8-builtins + "COM", # flake8-commas + "C4", # flake8-comprehensions + "DTZ", # flake8-datetimez + "T10", # flake8-debugger + # "DJ", # flake8-django (We don't use django) + # "EM", # flake8-errmsg (Perhaps nicer, but too much work) + "EXE", # flake8-executable + "ISC", # flake8-implicit-str-concat + # "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) + "G", # flake8-logging-format + "INP", # flake8-no-pep420 + "PIE", # flake8-pie + "T20", # flake8-print + # "PYI", # flake8-pyi (We don't have stub files yet) + "PT", # flake8-pytest-style + "Q", # flake8-quotes + "RSE", # flake8-raise + "RET", # flake8-return + # "SLF", # flake8-self (We can use our own private variables--sheesh!) + "SIM", # flake8-simplify + # "TID", # flake8-tidy-imports (Rely on isort and our own judgement) + # "TCH", # flake8-type-checking (Note: figure out type checking later) + # "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) + "PTH", # flake8-use-pathlib (Often better, but not always) + # "ERA", # eradicate (We like code in comments!) + # "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) + "PGH", # pygrep-hooks + "PL", # pylint + "PLC", # pylint Convention + "PLE", # pylint Error + "PLR", # pylint Refactor + "PLW", # pylint Warning + "TRY", # tryceratops + "NPY", # NumPy-specific rules + "RUF", # ruff-specific rules + "ALL", # Try new categories by default (making the above list unnecessary) ] external = [ - # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external - "F811", + # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external + "F811", ] ignore = [ - # Would be nice to fix these - "D100", # Missing docstring in public module - "D101", # Missing docstring in public class - "D102", # Missing docstring in public method - "D103", # Missing docstring in public function - "D104", # Missing docstring in public package - "D105", # Missing docstring in magic method - # "D107", # Missing docstring in `__init__` - "D205", # 1 blank line required between summary line and description - "D401", # First line of docstring should be in imperative mood: - # "D417", # Missing argument description in the docstring: - "PLE0605", # Invalid format for `__all__`, must be `tuple` or `list` (Note: broken in v0.0.237) - - # Maybe consider - # "SIM300", # Yoda conditions are discouraged, use ... instead (Note: we're not this picky) - # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) - "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) - "TRY200", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) - "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) - "PERF401", # Use a list comprehension to create a transformed list (Note: poorly implemented atm) - - # Intentionally ignored - "COM812", # Trailing comma missing - "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred) - "D400", # First line should end with a period (Note: prefer D415, which also allows "?" and "!") - "N801", # Class name ... should use CapWords convention (Note:we have a few exceptions to this) - "N802", # Function name ... should be lowercase - "N803", # Argument name ... should be lowercase (Maybe okay--except in tests) - "N806", # Variable ... in function should be lowercase - "N807", # Function name should not start and end with `__` - "N818", # Exception name ... should be named with an Error suffix (Note: good advice) - "PERF203", # `try`-`except` within a loop incurs performance overhead (Note: too strict) - "PLC0205", # Class `__slots__` should be a non-string iterable (Note: string is fine) - "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict) - "PLR0911", # Too many return statements - "PLR0912", # Too many branches - "PLR0913", # Too many arguments to function call - "PLR0915", # Too many statements - "PLR2004", # Magic number used in comparison, consider replacing magic with a constant variable - "PLW0603", # Using the global statement to update ... is discouraged (Note: yeah, discouraged, but too strict) - "PLW2901", # Outer for loop variable ... overwritten by inner assignment target (Note: good advice, but too strict) - "RET502", # Do not implicitly `return None` in function able to return non-`None` value - "RET503", # Missing explicit `return` at the end of function able to return non-`None` value - "RET504", # Unnecessary variable assignment before `return` statement - "S110", # `try`-`except`-`pass` detected, consider logging the exception (Note: good advice, but we don't log) - "S112", # `try`-`except`-`continue` detected, consider logging the exception (Note: good advice, but we don't log) - "S603", # `subprocess` call: check for execution of untrusted input (Note: not important for us) - "S607", # Starting a process with a partial executable path (Note: not important for us) - "SIM102", # Use a single `if` statement instead of nested `if` statements (Note: often necessary) - "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster) - "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) - "TRY003", # Avoid specifying long messages outside the exception class (Note: why?) - - # Ignored categories - "C90", # mccabe (Too strict, but maybe we should make things less complex) - "I", # isort (Should we replace `isort` with this?) - "ANN", # flake8-annotations (We don't use annotations yet) - "BLE", # flake8-blind-except (Maybe consider) - "FBT", # flake8-boolean-trap (Why?) - "DJ", # flake8-django (We don't use django) - "EM", # flake8-errmsg (Perhaps nicer, but too much work) - "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) - "PYI", # flake8-pyi (We don't have stub files yet) - "SLF", # flake8-self (We can use our own private variables--sheesh!) - "TID", # flake8-tidy-imports (Rely on isort and our own judgement) - "TCH", # flake8-type-checking (Note: figure out type checking later) - "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) - "TD", # flake8-todos (Maybe okay to add some of these) - "FIX", # flake8-fixme (like flake8-todos) - "ERA", # eradicate (We like code in comments!) - "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) + # Would be nice to fix these + "D100", # Missing docstring in public module + "D101", # Missing docstring in public class + "D102", # Missing docstring in public method + "D103", # Missing docstring in public function + "D104", # Missing docstring in public package + "D105", # Missing docstring in magic method + "D107", # Missing docstring in `__init__` + "D205", # 1 blank line required between summary line and description + "D401", # First line of docstring should be in imperative mood: + "D417", # D417 Missing argument description in the docstring for ...: ... + "PLE0605", # Invalid format for `__all__`, must be `tuple` or `list` (Note: broken in v0.0.237) + + # Maybe consider + # "SIM300", # Yoda conditions are discouraged, use ... instead (Note: we're not this picky) + # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) + "B904", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) + "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) + "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) + "RUF021", # parenthesize-chained-operators (Note: results don't look good yet) + "RUF023", # unsorted-dunder-slots (Note: maybe fine, but noisy changes) + "PERF401", # Use a list comprehension to create a transformed list (Note: poorly implemented atm) + + # Intentionally ignored + "COM812", # Trailing comma missing + "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred) + "D213", # (Note: conflicts with D212, which is preferred) + "D400", # First line should end with a period (Note: prefer D415, which also allows "?" and "!") + "N801", # Class name ... should use CapWords convention (Note:we have a few exceptions to this) + "N802", # Function name ... should be lowercase + "N803", # Argument name ... should be lowercase (Maybe okay--except in tests) + "N806", # Variable ... in function should be lowercase + "N807", # Function name should not start and end with `__` + "N818", # Exception name ... should be named with an Error suffix (Note: good advice) + "PERF203", # `try`-`except` within a loop incurs performance overhead (Note: too strict) + "PLC0205", # Class `__slots__` should be a non-string iterable (Note: string is fine) + "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict) + "PLR0911", # Too many return statements + "PLR0912", # Too many branches + "PLR0913", # Too many arguments to function call + "PLR0915", # Too many statements + "PLR2004", # Magic number used in comparison, consider replacing magic with a constant variable + "PLW0603", # Using the global statement to update ... is discouraged (Note: yeah, discouraged, but too strict) + "PLW0642", # Reassigned `self` variable in instance method (Note: too strict for us) + "PLW2901", # Outer for loop variable ... overwritten by inner assignment target (Note: good advice, but too strict) + "RET502", # Do not implicitly `return None` in function able to return non-`None` value + "RET503", # Missing explicit `return` at the end of function able to return non-`None` value + "RET504", # Unnecessary variable assignment before `return` statement + "S110", # `try`-`except`-`pass` detected, consider logging the exception (Note: good advice, but we don't log) + "S112", # `try`-`except`-`continue` detected, consider logging the exception (Note: good advice, but we don't log) + "S603", # `subprocess` call: check for execution of untrusted input (Note: not important for us) + "S607", # Starting a process with a partial executable path (Note: not important for us) + "SIM102", # Use a single `if` statement instead of nested `if` statements (Note: often necessary) + "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster) + "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) + "TRY003", # Avoid specifying long messages outside the exception class (Note: why?) + "UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)` (Note: using `|` is slower atm) + + # Ignored categories + "C90", # mccabe (Too strict, but maybe we should make things less complex) + "I", # isort (Should we replace `isort` with this?) + "ANN", # flake8-annotations (We don't use annotations yet) + "BLE", # flake8-blind-except (Maybe consider) + "FBT", # flake8-boolean-trap (Why?) + "DJ", # flake8-django (We don't use django) + "EM", # flake8-errmsg (Perhaps nicer, but too much work) + "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) + "PYI", # flake8-pyi (We don't have stub files yet) + "SLF", # flake8-self (We can use our own private variables--sheesh!) + "TID", # flake8-tidy-imports (Rely on isort and our own judgement) + "TCH", # flake8-type-checking (Note: figure out type checking later) + "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) + "TD", # flake8-todos (Maybe okay to add some of these) + "FIX", # flake8-fixme (like flake8-todos) + "ERA", # eradicate (We like code in comments!) + "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ] -[tool.ruff.per-file-ignores] -"graphblas/core/agg.py" = ["F401", "F403"] # Deprecated -"graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF -"graphblas/core/ss/matrix.py" = ["NPY002"] # numba doesn't support rng generator yet -"graphblas/core/ss/vector.py" = ["NPY002"] # numba doesn't support rng generator yet -"graphblas/core/utils.py" = ["PLE0302"] # `__set__` is used as a property -"graphblas/ss/_core.py" = ["N999"] # We want _core.py to be underscopre +[tool.ruff.lint.per-file-ignores] +"graphblas/core/operator/__init__.py" = ["A005"] +"graphblas/io/__init__.py" = ["A005"] # shadows a standard-library module +"graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF +"graphblas/core/ss/matrix.py" = [ + "NPY002", # numba doesn't support rng generator yet + "PLR1730", +] +"graphblas/core/ss/vector.py" = [ + "NPY002", # numba doesn't support rng generator yet +] +"graphblas/core/utils.py" = ["PLE0302"] # `__set__` is used as a property +"graphblas/ss/_core.py" = ["N999"] # We want _core.py to be underscopre # Allow useless expressions, assert, pickle, RNG, print, no docstring, and yoda in tests -"graphblas/tests/*py" = ["B018", "S101", "S301", "S311", "T201", "D103", "D100", "SIM300"] -"graphblas/tests/test_formatting.py" = ["E501"] # Allow long lines -"graphblas/**/__init__.py" = ["F401"] # Allow unused imports (w/o defining `__all__`) -"scripts/*.py" = ["INP001"] # Not a package -"scripts/create_pickle.py" = ["F403", "F405"] # Allow `from foo import *` -"docs/*.py" = ["INP001"] # Not a package +"graphblas/tests/*py" = [ + "B018", + "S101", + "S301", + "S311", + "T201", + "D103", + "D100", + "SIM300", +] +"graphblas/tests/test_formatting.py" = ["E501"] # Allow long lines +"graphblas/**/__init__.py" = [ + "F401", # Allow unused imports (w/o defining `__all__`) +] +"scripts/*.py" = ["INP001"] # Not a package +"scripts/create_pickle.py" = ["F403", "F405"] # Allow `from foo import *` +"docs/*.py" = ["INP001"] # Not a package -[tool.ruff.flake8-builtins] +[tool.ruff.lint.flake8-builtins] builtins-ignorelist = ["copyright", "format", "min", "max"] +builtins-allowed-modules = ["select"] -[tool.ruff.flake8-pytest-style] +[tool.ruff.lint.flake8-pytest-style] fixture-parentheses = false mark-parentheses = false -[tool.ruff.pydocstyle] +[tool.lint.ruff.pydocstyle] convention = "numpy" +[tool.bandit] +exclude_dirs = ["graphblas/tests", "scripts"] +skips = [ + "B110", # Try, Except, Pass detected. (Note: it would be nice to not have this pattern) +] + [tool.pylint.messages_control] # To run a single check, do: pylint graphblas --disable E,W,R,C,I --enable assignment-from-no-return max-line-length = 100 -py-version = "3.9" +py-version = "3.10" enable = ["I"] disable = [ - # Error - "assignment-from-no-return", - - # Warning - "arguments-differ", - "arguments-out-of-order", - "expression-not-assigned", - "fixme", - "global-statement", - "non-parent-init-called", - "redefined-builtin", - "redefined-outer-name", - "super-init-not-called", - "unbalanced-tuple-unpacking", - "unnecessary-lambda", - "unspecified-encoding", - "unused-argument", - "unused-variable", - - # Refactor - "cyclic-import", - "duplicate-code", - "inconsistent-return-statements", - "too-few-public-methods", - - # Convention - "missing-class-docstring", - "missing-function-docstring", - "missing-module-docstring", - "too-many-lines", - - # Intentionally turned off - # error - "class-variable-slots-conflict", - "invalid-unary-operand-type", - "no-member", - "no-name-in-module", - "not-an-iterable", - "too-many-function-args", - "unexpected-keyword-arg", - # warning - "broad-except", - "pointless-statement", - "protected-access", - "undefined-loop-variable", - "unused-import", - # refactor - "comparison-with-itself", - "too-many-arguments", - "too-many-boolean-expressions", - "too-many-branches", - "too-many-instance-attributes", - "too-many-locals", - "too-many-nested-blocks", - "too-many-public-methods", - "too-many-return-statements", - "too-many-statements", - # convention - "import-outside-toplevel", - "invalid-name", - "line-too-long", - "singleton-comparison", - "single-string-used-for-slots", - "unidiomatic-typecheck", - "unnecessary-dunder-call", - "wrong-import-order", - "wrong-import-position", - # informative - "locally-disabled", - "suppressed-message", + # Error + "assignment-from-no-return", + + # Warning + "arguments-differ", + "arguments-out-of-order", + "expression-not-assigned", + "fixme", + "global-statement", + "non-parent-init-called", + "redefined-builtin", + "redefined-outer-name", + "super-init-not-called", + "unbalanced-tuple-unpacking", + "unnecessary-lambda", + "unspecified-encoding", + "unused-argument", + "unused-variable", + + # Refactor + "cyclic-import", + "duplicate-code", + "inconsistent-return-statements", + "too-few-public-methods", + + # Convention + "missing-class-docstring", + "missing-function-docstring", + "missing-module-docstring", + "too-many-lines", + + # Intentionally turned off + # error + "class-variable-slots-conflict", + "invalid-unary-operand-type", + "no-member", + "no-name-in-module", + "not-an-iterable", + "too-many-function-args", + "unexpected-keyword-arg", + # warning + "broad-except", + "pointless-statement", + "protected-access", + "undefined-loop-variable", + "unused-import", + # refactor + "comparison-with-itself", + "too-many-arguments", + "too-many-boolean-expressions", + "too-many-branches", + "too-many-instance-attributes", + "too-many-locals", + "too-many-nested-blocks", + "too-many-public-methods", + "too-many-return-statements", + "too-many-statements", + # convention + "import-outside-toplevel", + "invalid-name", + "line-too-long", + "singleton-comparison", + "single-string-used-for-slots", + "unidiomatic-typecheck", + "unnecessary-dunder-call", + "wrong-import-order", + "wrong-import-position", + # informative + "locally-disabled", + "suppressed-message", ] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 958bf2210..5aa88e045 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,15 +3,15 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'flake8-bugbear[channel=conda-forge]>=23.12.2' +conda search 'flake8-bugbear[channel=conda-forge]>=24.12.12' conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' -conda search 'numpy[channel=conda-forge]>=1.26.2' -conda search 'pandas[channel=conda-forge]>=2.1.4' -conda search 'scipy[channel=conda-forge]>=1.11.4' -conda search 'networkx[channel=conda-forge]>=3.2.1' -conda search 'awkward[channel=conda-forge]>=2.5.0' -conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.7.5' -conda search 'numba[channel=conda-forge]>=0.58.1' -conda search 'pyyaml[channel=conda-forge]>=6.0.1' -# conda search 'python[channel=conda-forge]>=3.9 *pypy*' +conda search 'numpy[channel=conda-forge]>=2.2.3' +conda search 'pandas[channel=conda-forge]>=2.2.3' +conda search 'scipy[channel=conda-forge]>=1.15.2' +conda search 'networkx[channel=conda-forge]>=3.4.2' +conda search 'awkward[channel=conda-forge]>=2.7.4' +conda search 'sparse[channel=conda-forge]>=0.15.5' +conda search 'fast_matrix_market[channel=conda-forge]>=1.7.6' +conda search 'numba[channel=conda-forge]>=0.61.0' +conda search 'pyyaml[channel=conda-forge]>=6.0.2' +# conda search 'python[channel=conda-forge]>=3.10 *pypy*'