diff --git a/.codecov.yml b/.codecov.yml
index 1720ac027..1894009c1 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -1,3 +1,16 @@
-comment: off
+coverage:
+ status:
+ project:
+ default:
+ informational: true
+ patch:
+ default:
+ informational: true
+ changes: false
+comment:
+ layout: "header, diff"
+ behavior: default
+github_checks:
+ annotations: false
ignore:
- graphblas/viz.py
diff --git a/.flake8 b/.flake8
index 80124c9e8..0dede3f1d 100644
--- a/.flake8
+++ b/.flake8
@@ -12,6 +12,5 @@ extend-ignore =
per-file-ignores =
scripts/create_pickle.py:F403,F405,
graphblas/tests/*.py:T201,
- graphblas/core/agg.py:F401,F403,
graphblas/core/ss/matrix.py:SIM113,
graphblas/**/__init__.py:F401,
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index b18fd2935..5ace4600a 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,6 +1,6 @@
version: 2
updates:
- - package-ecosystem: 'github-actions'
- directory: '/'
+ - package-ecosystem: "github-actions"
+ directory: "/"
schedule:
- interval: 'weekly'
+ interval: "weekly"
diff --git a/.github/workflows/debug.yml b/.github/workflows/debug.yml
index 794746f77..6c2b202b1 100644
--- a/.github/workflows/debug.yml
+++ b/.github/workflows/debug.yml
@@ -5,7 +5,7 @@ on:
workflow_dispatch:
inputs:
debug_enabled:
- description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)'
+ description: "Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)"
required: false
default: false
@@ -15,7 +15,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- pyver: [3.8]
+ pyver: [3.10]
testopts:
- "--blocking"
# - "--non-blocking --record --runslow"
@@ -26,9 +26,10 @@ jobs:
# - "conda-forge"
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
fetch-depth: 0
+ persist-credentials: false
- name: Setup conda env
run: |
source "$CONDA/etc/profile.d/conda.sh"
diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml
index 2b0b0ed9f..e24d0d4db 100644
--- a/.github/workflows/imports.yml
+++ b/.github/workflows/imports.yml
@@ -14,7 +14,7 @@ jobs:
pyver: ${{ steps.pyver.outputs.selected }}
steps:
- name: RNG for os
- uses: ddradar/choose-random-action@v2.0.2
+ uses: ddradar/choose-random-action@v3.0.0
id: os
with:
contents: |
@@ -26,14 +26,14 @@ jobs:
1
1
- name: RNG for Python version
- uses: ddradar/choose-random-action@v2.0.2
+ uses: ddradar/choose-random-action@v3.0.0
id: pyver
with:
contents: |
- 3.8
- 3.9
3.10
3.11
+ 3.12
+ 3.13
weights: |
1
1
@@ -45,14 +45,19 @@ jobs:
# runs-on: ${{ matrix.os }}
# strategy:
# matrix:
- # python-version: ["3.8", "3.9", "3.10", "3.11"]
+ # python-version: ["3.10", "3.11", "3.12", "3.13"]
# os: ["ubuntu-latest", "macos-latest", "windows-latest"]
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ persist-credentials: false
+ - uses: actions/setup-python@v5
with:
python-version: ${{ needs.rngs.outputs.pyver }}
# python-version: ${{ matrix.python-version }}
- run: python -m pip install --upgrade pip
+ # - run: pip install --pre suitesparse-graphblas # Use if we need pre-release
- run: pip install -e .[default]
- - run: ./scripts/test_imports.sh
+ - name: Run test imports
+ run: ./scripts/test_imports.sh
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 5ef2b1033..655a576e5 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -1,10 +1,12 @@
+# Rely on pre-commit.ci instead
name: Lint via pre-commit
on:
- pull_request:
- push:
- branches-ignore:
- - main
+ workflow_dispatch:
+ # pull_request:
+ # push:
+ # branches-ignore:
+ # - main
permissions:
contents: read
@@ -14,8 +16,11 @@ jobs:
name: pre-commit-hooks
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ persist-credentials: false
+ - uses: actions/setup-python@v5
with:
python-version: "3.10"
- - uses: pre-commit/action@v3.0.0
+ - uses: pre-commit/action@v3.0.1
diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml
index abf3fefa6..32926c5c8 100644
--- a/.github/workflows/publish_pypi.yml
+++ b/.github/workflows/publish_pypi.yml
@@ -3,7 +3,7 @@ name: Publish to PyPI
on:
push:
tags:
- - '20*'
+ - "20*"
jobs:
build_and_deploy:
@@ -14,20 +14,21 @@ jobs:
shell: bash -l {0}
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
fetch-depth: 0
+ persist-credentials: false
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
- python-version: "3.8"
+ python-version: "3.10"
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
python -m pip install build twine
- name: Build wheel and sdist
run: python -m build --sdist --wheel
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
name: releases
path: dist
@@ -35,7 +36,7 @@ jobs:
- name: Check with twine
run: python -m twine check --strict dist/*
- name: Publish to PyPI
- uses: pypa/gh-action-pypi-publish@v1.8.5
+ uses: pypa/gh-action-pypi-publish@v1.12.4
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml
index c20530fbe..af7525928 100644
--- a/.github/workflows/test_and_build.yml
+++ b/.github/workflows/test_and_build.yml
@@ -17,6 +17,10 @@ on:
branches:
- main
+# concurrency:
+# group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+# cancel-in-progress: true
+
jobs:
rngs:
# To achieve consistent coverage, we need a little bit of correlated collaboration.
@@ -46,7 +50,7 @@ jobs:
backend: ${{ steps.backend.outputs.selected }}
steps:
- name: RNG for mapnumpy
- uses: ddradar/choose-random-action@v2.0.2
+ uses: ddradar/choose-random-action@v3.0.0
id: mapnumpy
with:
contents: |
@@ -60,7 +64,7 @@ jobs:
1
1
- name: RNG for backend
- uses: ddradar/choose-random-action@v2.0.2
+ uses: ddradar/choose-random-action@v3.0.0
id: backend
with:
contents: |
@@ -80,43 +84,44 @@ jobs:
run:
shell: bash -l {0}
strategy:
- # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask`
- fail-fast: true
+ # To "stress test" in CI, set `fail-fast` to `false` and use `repeat` in matrix below
+ fail-fast: false
# The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype].
# This should ensure we'll have full code coverage (i.e., no chance of getting unlucky),
# since we need to run all slow tests on Windows and non-Windoes OSes.
matrix:
os: ["ubuntu-latest", "macos-latest", "windows-latest"]
slowtask: ["pytest_normal", "pytest_bizarro", "notebooks"]
+ # repeat: [1, 2, 3] # For stress testing
env:
# Wheels on OS X come with an OpenMP that conflicts with OpenMP from conda-forge.
# Setting this is a workaround.
KMP_DUPLICATE_LIB_OK: ${{ contains(matrix.os, 'macos') && 'TRUE' || 'FALSE' }}
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
fetch-depth: 0
+ persist-credentials: false
- name: RNG for Python version
- uses: ddradar/choose-random-action@v2.0.2
+ uses: ddradar/choose-random-action@v3.0.0
id: pyver
with:
- # We should support major Python versions for at least 36-42 months
+ # We should support major Python versions for at least 36 months as per SPEC 0
# We may be able to support pypy if anybody asks for it
- # 3.8.16 0_73_pypy
# 3.9.16 0_73_pypy
contents: |
- 3.8
- 3.9
3.10
3.11
+ 3.12
+ 3.13
weights: |
1
1
1
1
- name: RNG for source of python-suitesparse-graphblas
- uses: ddradar/choose-random-action@v2.0.2
+ uses: ddradar/choose-random-action@v3.0.0
id: sourcetype
with:
# Weights must be natural numbers, so set weights to very large to skip one
@@ -131,28 +136,14 @@ jobs:
1
1
1
- - name: Setup mamba
- uses: conda-incubator/setup-miniconda@v2
- id: setup_mamba
- continue-on-error: true
- with:
- miniforge-variant: Mambaforge
- miniforge-version: latest
- use-mamba: true
- python-version: ${{ steps.pyver.outputs.selected }}
- channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }}
- channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }}
- activate-environment: graphblas
- auto-activate-base: false
- name: Setup conda
- uses: conda-incubator/setup-miniconda@v2
+ uses: conda-incubator/setup-miniconda@v3
id: setup_conda
- if: steps.setup_mamba.outcome == 'failure'
- continue-on-error: false
with:
auto-update-conda: true
python-version: ${{ steps.pyver.outputs.selected }}
- channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }}
+ channels: conda-forge${{ contains(steps.pyver.outputs.selected, 'pypy') && ',defaults' || '' }}
+ conda-remove-defaults: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'false' || 'true' }}
channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }}
activate-environment: graphblas
auto-activate-base: false
@@ -163,69 +154,159 @@ jobs:
#
# First let's randomly get versions of dependencies to install.
# Consider removing old versions when they become problematic or very old (>=2 years).
- nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", ""]))')
- yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))')
- sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))')
- fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", ""]))')
- if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.8') }} == true ]]; then
- npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))')
- spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))')
- pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))')
- akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", ""]))')
- elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then
- npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))')
- spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))')
- pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))')
- akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", ""]))')
- elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then
- npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))')
- spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))')
- pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))')
- akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", ""]))')
- else # Python 3.11
- npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", ""]))')
- spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))')
- pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))')
- akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", ""]))')
+
+ # Randomly choosing versions of dependencies based on Python version works surprisingly well...
+ if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then
+ nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", "=3.3", "=3.4", ""]))')
+ npver=$(python -c 'import random ; print(random.choice(["=1.24", "=1.25", "=1.26", "=2.0", "=2.1", "=2.2", ""]))')
+ spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))')
+ pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))')
+ akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", "=2.6", "=2.7", ""]))')
+ fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))')
+ yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))')
+ sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))')
+ elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]]; then
+ nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", "=3.3", "=3.4", ""]))')
+ npver=$(python -c 'import random ; print(random.choice(["=1.24", "=1.25", "=1.26", "=2.0", "=2.1", "=2.2", ""]))')
+ spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))')
+ pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))')
+ akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", "=2.6", "=2.7", ""]))')
+ fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))')
+ yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))')
+ sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))')
+ elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]]; then
+ nxver=$(python -c 'import random ; print(random.choice(["=3.2", "=3.3", "=3.4", ""]))')
+ npver=$(python -c 'import random ; print(random.choice(["=1.26", "=2.0", "=2.1", "=2.2", ""]))')
+ spver=$(python -c 'import random ; print(random.choice(["=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))')
+ pdver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))')
+ akver=$(python -c 'import random ; print(random.choice(["=2.4", "=2.5", "=2.6", "=2.7", ""]))')
+ fmmver=$(python -c 'import random ; print(random.choice(["=1.7", ""]))')
+ yamlver=$(python -c 'import random ; print(random.choice(["=6.0", ""]))')
+ sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))')
+ else # Python 3.13
+ nxver=$(python -c 'import random ; print(random.choice(["=3.4", ""]))')
+ npver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))')
+ spver=$(python -c 'import random ; print(random.choice(["=1.14", "=1.15", ""]))')
+ pdver=$(python -c 'import random ; print(random.choice(["=2.2", ""]))')
+ akver=$(python -c 'import random ; print(random.choice(["=2.7", ""]))')
+ fmmver=NA # Not yet supported
+ yamlver=$(python -c 'import random ; print(random.choice(["=6.0", ""]))')
+ sparsever=NA # Not yet supported
fi
- if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then
+
+ # But there may be edge cases of incompatibility we need to handle (more handled below)
+ if [[ ${{ steps.sourcetype.outputs.selected }} == "source" ]]; then
# TODO: there are currently issues with some numpy versions when
- # installing python-suitesparse-grphblas from source or upstream.
+ # installing python-suitesparse-grphblas from source.
npver=""
spver=""
pdver=""
fi
+
# We can have a tight coupling with python-suitesparse-graphblas.
# That is, we don't need to support versions of it that are two years old.
# But, it's still useful for us to test with different versions!
psg=""
- if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then
- psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", ""]))')
+ if [[ ${{ steps.sourcetype.outputs.selected}} == "upstream" ]] ; then
+ # Upstream needs to build with numpy 2
+ psgver=""
+ if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]]; then
+ npver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))')
+ else
+ npver=$(python -c 'import random ; print(random.choice(["=2.0", "=2.1", "=2.2", ""]))')
+ fi
+ elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then
+ if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then
+ psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))')
+ psg=python-suitesparse-graphblas${psgver}
+ else
+ psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))')
+ fi
+ elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then
+ if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then
+ if [[ $npver == =1.* ]] ; then
+ psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", "=8.2.1.0"]))')
+ else
+ psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))')
+ fi
+ psg=python-suitesparse-graphblas${psgver}
+ else
+ if [[ $npver == =1.* ]] ; then
+ psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", "==8.2.1.0"]))')
+ else
+ psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))')
+ fi
+ fi
+ # python-suitsparse-graphblas support is the same for Python 3.10 and 3.11
+ elif [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then
+ if [[ $npver == =1.* ]] ; then
+ psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", "=8.2.1.0"]))')
+ else
+ psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))')
+ fi
psg=python-suitesparse-graphblas${psgver}
elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then
- psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", ""]))')
+ if [[ $npver == =1.* ]] ; then
+ psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))')
+ else
+ psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))')
+ fi
elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then
# These should be exact versions
- psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", ""]))')
- else
- psgver=""
+ if [[ $npver == =1.* ]] ; then
+ psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))')
+ else
+ psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))')
+ fi
fi
- if [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then
- numbaver=$(python -c 'import random ; print(random.choice(["=0.57", ""]))')
- elif [[ ${npver} == "=1.21" ]] ; then
- numbaver=$(python -c 'import random ; print(random.choice(["=0.55", "=0.56", "=0.57", ""]))')
+
+ # Numba is tightly coupled to numpy versions
+ if [[ ${npver} == "=1.26" ]] ; then
+ numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", "=0.60", "=0.61", ""]))')
+ if [[ ${spver} == "=1.9" ]] ; then
+ spver=$(python -c 'import random ; print(random.choice(["=1.10", "=1.11", ""]))')
+ fi
+ elif [[ ${npver} == "=1.25" ]] ; then
+ numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", "=0.60", "=0.61", ""]))')
+ elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then
+ numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", "=0.59", "=0.60", "=0.61", ""]))')
else
- numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", ""]))')
+ numbaver=""
+ fi
+ # Only numba >=0.59 support Python 3.12
+ if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then
+ numbaver=$(python -c 'import random ; print(random.choice(["=0.59", "=0.60", "=0.61", ""]))')
+ fi
+
+ # Handle NumPy 2
+ if [[ $npver != =1.* ]] ; then
+ # Only pandas >=2.2.2 supports NumPy 2
+ pdver=$(python -c 'import random ; print(random.choice(["=2.2", ""]))')
+
+ # Only awkward >=2.6.3 supports NumPy 2
+ if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then
+ akver=$(python -c 'import random ; print(random.choice(["=2.7", ""]))')
+ else
+ akver=$(python -c 'import random ; print(random.choice(["=2.6", "=2.7", ""]))')
+ fi
+
+ # Only scipy >=1.13 supports NumPy 2
+ if [[ $spver == "=1.9" || $spver == "=1.10" || $spver == "=1.11" || $spver == "=1.12" ]] ; then
+ spver="=1.13"
+ fi
fi
+
fmm=fast_matrix_market${fmmver}
awkward=awkward${akver}
+
+ # Don't install numba and sparse for some versions
if [[ ${{ contains(steps.pyver.outputs.selected, 'pypy') ||
- startsWith(steps.pyver.outputs.selected, '3.12') }} == true ||
+ startsWith(steps.pyver.outputs.selected, '3.14') }} == true ||
( ${{ matrix.slowtask != 'notebooks'}} == true && (
( ${{ matrix.os == 'windows-latest' }} == true && $(python -c 'import random ; print(random.random() < .2)') == True ) ||
( ${{ matrix.os == 'windows-latest' }} == false && $(python -c 'import random ; print(random.random() < .4)') == True ))) ]]
then
- # Some packages aren't available for pypy or Python 3.12; randomly otherwise (if not running notebooks)
+ # Some packages aren't available for pypy or Python 3.13; randomly otherwise (if not running notebooks)
echo "skipping numba"
numba=""
numbaver=NA
@@ -242,21 +323,44 @@ jobs:
pdver=""
yamlver=""
fi
+ elif [[ ${npver} == =2.* ]] ; then
+ # Don't install numba for unsupported versions of numpy
+ numba=""
+ numbaver=NA
+ sparse=""
+ sparsever=NA
else
numba=numba${numbaver}
sparse=sparse${sparsever}
fi
- echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}"
- # TODO: remove `-c numba` when numba 0.57 is properly released on conda-forge
- $(command -v mamba || command -v conda) install -c numba packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \
+ # sparse does not yet support Python 3.13
+ if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then
+ sparse=""
+ sparsever=NA
+ fi
+ # fast_matrix_market does not yet support Python 3.13 or osx-arm64
+ if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ||
+ ${{ matrix.os == 'macos-latest' }} == true ]]
+ then
+ fmm=""
+ fmmver=NA
+ fi
+
+ echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psg${psgver}"
+
+ set -x # echo on
+ $(command -v mamba || command -v conda) install -c nodefaults \
+ packaging pytest coverage pytest-randomly cffi donfig tomli c-compiler make \
pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \
networkx${nxver} ${numba} ${fmm} ${psg} \
${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \
- ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7"' || '' }} \
+ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7" drawsvg' || '' }} \
${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \
- ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4.0"' || '' }} \
- ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }}
+ ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4,<9.5"' || '' }} \
+ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \
+ ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} \
+ # ${{ matrix.os != 'windows-latest' && 'pytest-forked' || '' }} # to investigate crashes
- name: Build extension module
run: |
if [[ ${{ steps.sourcetype.outputs.selected }} == "wheel" ]]; then
@@ -277,6 +381,16 @@ jobs:
pip install --no-deps git+https://github.com/GraphBLAS/python-suitesparse-graphblas.git@main#egg=suitesparse-graphblas
fi
pip install --no-deps -e .
+ - name: python-suitesparse-graphblas tests
+ run: |
+ # Don't use our conftest.py ; allow `test_print_jit_config` to fail if it doesn't exist
+ (cd ..
+ pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config || true
+ pytest -v --pyargs suitesparse_graphblas || true)
+ - name: Print platform and sysconfig variables
+ run: |
+ python -c "import platform ; print(platform.uname())"
+ python -c "import pprint, sysconfig ; pprint.pprint(sysconfig.get_config_vars())"
- name: Unit tests
run: |
A=${{ needs.rngs.outputs.mapnumpy == 'A' || '' }} ; B=${{ needs.rngs.outputs.mapnumpy == 'B' || '' }}
@@ -304,7 +418,9 @@ jobs:
if [[ $H && $normal ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi)$( \
if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi)
echo ${args}
- pytest -v --pyargs suitesparse_graphblas
+ set -x # echo on
+ # pytest ${{ matrix.os != 'windows-latest' && '--forked' || '' }} \ # to investigate crashes
+ # --color=yes --randomly -v -s ${args} \
coverage run -m pytest --color=yes --randomly -v ${args} \
${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }}
- name: Unit tests (bizarro scalars)
@@ -340,6 +456,9 @@ jobs:
if [[ $H && $normal ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi)$( \
if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi)
echo ${args}
+ set -x # echo on
+ # pytest ${{ matrix.os != 'windows-latest' && '--forked' || '' }} \ # to investigate crashes
+ # --color=yes --randomly -v -s ${args} \
coverage run -a -m pytest --color=yes --randomly -v ${args} \
${{ matrix.slowtask == 'pytest_bizarro' && '--runslow' || '' }}
git checkout . # Undo changes to scalar default
@@ -353,11 +472,14 @@ jobs:
echo "from graphblas.agg import count" > script.py
coverage run -a script.py
echo "from graphblas import agg" > script.py # Does this still cover?
- echo "from graphblas.core import agg" >> script.py
+ echo "from graphblas.core.operator import agg" >> script.py
coverage run -a script.py
# Tests lazy loading of lib, ffi, and NULL in gb.core
echo "from graphblas.core import base" > script.py
coverage run -a script.py
+ # Test another code pathway for loading lib
+ echo "from graphblas.core import lib" > script.py
+ coverage run -a script.py
rm script.py
# Tests whose coverage depend on order of tests :/
# TODO: understand why these are order-dependent and try to fix
@@ -377,32 +499,12 @@ jobs:
coverage run -a -m graphblas.core.automethods
coverage run -a -m graphblas.core.infixmethods
git diff --exit-code
- - name: Coverage1
- id: coverageAttempt1
- continue-on-error: true
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }}
- COVERALLS_PARALLEL: true
+ - name: Coverage
run: |
coverage xml
coverage report --show-missing
- coveralls --service=github
- # Retry upload if first attempt failed.
- # This happens somewhat randomly and for irregular reasons.
- # Logic is a duplicate of previous step.
- - name: Coverage2
- id: coverageAttempt2
- if: steps.coverageAttempt1.outcome == 'failure'
- continue-on-error: false
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }}
- COVERALLS_PARALLEL: true
- run: |
- coveralls --service=github
- name: codecov
- uses: codecov/codecov-action@v3
+ uses: codecov/codecov-action@v5
- name: Notebooks Execution check
if: matrix.slowtask == 'notebooks'
run: |
@@ -410,18 +512,3 @@ jobs:
if python -c 'import numba' 2> /dev/null ; then
jupyter nbconvert --to notebook --execute notebooks/*ipynb
fi
-
- finish:
- needs: build_and_test
- if: always()
- runs-on: ubuntu-latest
- steps:
- - uses: actions/setup-python@v4
- with:
- python-version: "3.10"
- - run: python -m pip install --upgrade pip
- - run: pip install coveralls
- - name: Coveralls Finished
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: coveralls --finish
diff --git a/.github/zizmor.yml b/.github/zizmor.yml
new file mode 100644
index 000000000..61f32c2e0
--- /dev/null
+++ b/.github/zizmor.yml
@@ -0,0 +1,16 @@
+rules:
+ use-trusted-publishing:
+ # TODO: we should update to use trusted publishing
+ ignore:
+ - publish_pypi.yml
+ excessive-permissions:
+ # It is probably good practice to use narrow permissions
+ ignore:
+ - debug.yml
+ - imports.yml
+ - publish_pypi.yml
+ - test_and_build.yml
+ template-injection:
+ # We use templates pretty heavily
+ ignore:
+ - test_and_build.yml
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8f4fac317..43e28b8fe 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -5,92 +5,100 @@
# To update: `pre-commit autoupdate`
# - &flake8_dependencies below needs updated manually
ci:
- # See: https://pre-commit.ci/#configuration
- autofix_prs: false
- autoupdate_schedule: monthly
- skip: [pylint, no-commit-to-branch]
-fail_fast: true
+ # See: https://pre-commit.ci/#configuration
+ autofix_prs: false
+ autoupdate_schedule: quarterly
+ autoupdate_commit_msg: "chore: update pre-commit hooks"
+ autofix_commit_msg: "style: pre-commit fixes"
+ skip: [pylint, no-commit-to-branch]
+fail_fast: false
default_language_version:
- python: python3
+ python: python3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.4.0
+ rev: v5.0.0
hooks:
- id: check-added-large-files
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ # - id: check-symlinks
- id: check-ast
- id: check-toml
- id: check-yaml
+ - id: check-executables-have-shebangs
+ - id: check-vcs-permalinks
+ - id: destroyed-symlinks
+ - id: detect-private-key
- id: debug-statements
- id: end-of-file-fixer
+ exclude_types: [svg]
- id: mixed-line-ending
- id: trailing-whitespace
+ - id: name-tests-test
+ args: ["--pytest-test-first"]
- repo: https://github.com/abravalheri/validate-pyproject
- rev: v0.12.2
+ rev: v0.23
hooks:
- id: validate-pyproject
name: Validate pyproject.toml
# I don't yet trust ruff to do what autoflake does
- repo: https://github.com/PyCQA/autoflake
- rev: v2.1.1
+ rev: v2.3.1
hooks:
- id: autoflake
args: [--in-place]
# We can probably remove `isort` if we come to trust `ruff --fix`,
# but we'll need to figure out the configuration to do this in `ruff`
- repo: https://github.com/pycqa/isort
- rev: 5.12.0
+ rev: 6.0.0
hooks:
- id: isort
# Let's keep `pyupgrade` even though `ruff --fix` probably does most of it
- repo: https://github.com/asottile/pyupgrade
- rev: v3.3.2
+ rev: v3.19.1
hooks:
- id: pyupgrade
- args: [--py38-plus]
+ args: [--py310-plus]
- repo: https://github.com/MarcoGorelli/auto-walrus
- rev: v0.2.2
+ rev: 0.3.4
hooks:
- id: auto-walrus
args: [--line-length, "100"]
- repo: https://github.com/psf/black
- rev: 23.3.0
+ rev: 25.1.0
hooks:
- id: black
- id: black-jupyter
- - repo: https://github.com/charliermarsh/ruff-pre-commit
- rev: v0.0.264
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.9.6
hooks:
- id: ruff
args: [--fix-only, --show-fixes]
# Let's keep `flake8` even though `ruff` does much of the same.
# `flake8-bugbear` and `flake8-simplify` have caught things missed by `ruff`.
- repo: https://github.com/PyCQA/flake8
- rev: 6.0.0
+ rev: 7.1.2
hooks:
- id: flake8
- additional_dependencies: &flake8_dependencies
- # These versions need updated manually
- - flake8==6.0.0
- - flake8-bugbear==23.3.23
- - flake8-simplify==0.20.0
- - repo: https://github.com/asottile/yesqa
- rev: v1.4.0
- hooks:
- - id: yesqa
- additional_dependencies: *flake8_dependencies
+ args: ["--config=.flake8"]
+ additional_dependencies:
+ &flake8_dependencies # These versions need updated manually
+ - flake8==7.1.2
+ - flake8-bugbear==24.12.12
+ - flake8-simplify==0.21.0
- repo: https://github.com/codespell-project/codespell
- rev: v2.2.4
+ rev: v2.4.1
hooks:
- id: codespell
types_or: [python, rst, markdown]
additional_dependencies: [tomli]
files: ^(graphblas|docs)/
- - repo: https://github.com/charliermarsh/ruff-pre-commit
- rev: v0.0.264
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.9.6
hooks:
- id: ruff
- repo: https://github.com/sphinx-contrib/sphinx-lint
- rev: v0.6.7
+ rev: v1.0.0
hooks:
- id: sphinx-lint
args: [--enable, all, "--disable=line-too-long,leaked-markup"]
@@ -101,6 +109,40 @@ repos:
hooks:
- id: pyroma
args: [-n, "10", .]
+ - repo: https://github.com/shellcheck-py/shellcheck-py
+ rev: "v0.10.0.1"
+ hooks:
+ - id: shellcheck
+ - repo: https://github.com/rbubley/mirrors-prettier
+ rev: v3.5.1
+ hooks:
+ - id: prettier
+ - repo: https://github.com/ComPWA/taplo-pre-commit
+ rev: v0.9.3
+ hooks:
+ - id: taplo-format
+ - repo: https://github.com/rhysd/actionlint
+ rev: v1.7.7
+ hooks:
+ - id: actionlint
+ - repo: https://github.com/python-jsonschema/check-jsonschema
+ rev: 0.31.1
+ hooks:
+ - id: check-dependabot
+ - id: check-github-workflows
+ - id: check-readthedocs
+ - repo: https://github.com/adrienverge/yamllint
+ rev: v1.35.1
+ hooks:
+ - id: yamllint
+ - repo: https://github.com/woodruffw/zizmor-pre-commit
+ rev: v1.3.1
+ hooks:
+ - id: zizmor
+ - repo: meta
+ hooks:
+ - id: check-hooks-apply
+ - id: check-useless-excludes
- repo: local
hooks:
# Add `--hook-stage manual` to pre-commit command to run (very slow)
@@ -114,9 +156,9 @@ repos:
args: [graphblas/]
pass_filenames: false
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.4.0
+ rev: v5.0.0
hooks:
- - id: no-commit-to-branch # no commit directly to main
+ - id: no-commit-to-branch # no commit directly to main
#
# Maybe:
#
@@ -133,8 +175,10 @@ repos:
# additional_dependencies: [tomli]
#
# - repo: https://github.com/PyCQA/bandit
-# rev: 1.7.4
+# rev: 1.8.2
# hooks:
# - id: bandit
+# args: ["-c", "pyproject.toml"]
+# additional_dependencies: ["bandit[toml]"]
#
-# blacken-docs, blackdoc mypy, pydocstringformatter, velin, flynt, yamllint
+# blacken-docs, blackdoc, mypy, pydocstringformatter, velin, flynt
diff --git a/.yamllint.yaml b/.yamllint.yaml
new file mode 100644
index 000000000..54e656293
--- /dev/null
+++ b/.yamllint.yaml
@@ -0,0 +1,6 @@
+---
+extends: default
+rules:
+ document-start: disable
+ line-length: disable
+ truthy: disable
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 7cfcb10f9..eebd2c372 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -13,13 +13,13 @@ educational level, family status, culture, or political belief.
Examples of unacceptable behavior by participants include:
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information, such as physical or electronic
+- The use of sexualized language or imagery
+- Personal attacks
+- Trolling or insulting/derogatory comments
+- Public or private harassment
+- Publishing other's private information, such as physical or electronic
addresses, without explicit permission
-* Other unethical or unprofessional conduct
+- Other unethical or unprofessional conduct
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
@@ -52,12 +52,12 @@ that is deemed necessary and appropriate to the circumstances. Maintainers are
obligated to maintain confidentiality with regard to the reporter of an
incident.
-This Code of Conduct is adapted from the [Numba Code of Conduct][numba], which is based on the [Contributor Covenant][homepage],
+This Code of Conduct is adapted from the [Numba Code of Conduct][numba], which is based on the [Contributor Covenant][homepage],
version 1.3.0, available at
-[http://contributor-covenant.org/version/1/3/0/][version],
+[https://contributor-covenant.org/version/1/3/0/][version],
and the [Swift Code of Conduct][swift].
[numba]: https://github.com/numba/numba-governance/blob/accepted/code-of-conduct.md
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/3/0/
+[homepage]: https://contributor-covenant.org
+[version]: https://contributor-covenant.org/version/1/3/0/
[swift]: https://swift.org/community/#code-of-conduct
diff --git a/LICENSE b/LICENSE
index 74a8ba6c6..21c605c21 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
Apache License
Version 2.0, January 2004
- http://www.apache.org/licenses/
+ https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@@ -186,13 +186,13 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright 2020 Anaconda, Inc
+ Copyright 2020-2023 Anaconda, Inc. and contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/MANIFEST.in b/MANIFEST.in
index e2ff9c410..27cd3f0c4 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,11 +2,9 @@ recursive-include graphblas *.py
prune docs
prune scripts
include setup.py
+include conftest.py
include README.md
include LICENSE
include MANIFEST.in
include graphblas/graphblas.yaml
include graphblas/tests/pickle*.pkl
-include docs/_static/img/logo-name-medium.svg
-include docs/_static/img/draw-example.png
-include docs/_static/img/repr-matrix.png
diff --git a/README.md b/README.md
index 083483fe2..1080314c7 100644
--- a/README.md
+++ b/README.md
@@ -1,17 +1,19 @@
-
+
+[](https://numfocus.org)
+[](https://github.com/pyOpenSci/software-review/issues/81)
+[](https://discord.com/invite/vur45CbwMz)
+
[](https://anaconda.org/conda-forge/python-graphblas)
[](https://pypi.python.org/pypi/python-graphblas/)
[](https://pypi.python.org/pypi/python-graphblas/)
[](https://github.com/python-graphblas/python-graphblas/blob/main/LICENSE)
-[](https://github.com/python-graphblas/python-graphblas/actions)
+[](https://github.com/python-graphblas/python-graphblas/actions)
[](https://python-graphblas.readthedocs.io/en/latest/)
-[](https://coveralls.io/r/python-graphblas/python-graphblas)
-
+[](https://codecov.io/gh/python-graphblas/python-graphblas)
[](https://doi.org/10.5281/zenodo.7328791)
[](https://mybinder.org/v2/gh/python-graphblas/python-graphblas/HEAD?filepath=notebooks%2FIntro%20to%20GraphBLAS%20%2B%20SSSP%20example.ipynb)
-[](https://discord.com/invite/vur45CbwMz)
Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics.
For algorithms, see
@@ -24,23 +26,28 @@ For algorithms, see
- **Source:** [https://github.com/python-graphblas/python-graphblas](https://github.com/python-graphblas/python-graphblas)
- **Bug reports:** [https://github.com/python-graphblas/python-graphblas/issues](https://github.com/python-graphblas/python-graphblas/issues)
- **Github discussions:** [https://github.com/python-graphblas/python-graphblas/discussions](https://github.com/python-graphblas/python-graphblas/discussions)
-- **Weekly community call:** [https://github.com/python-graphblas/python-graphblas/issues/247](https://github.com/python-graphblas/python-graphblas/issues/247)
+- **Weekly community call:** [python-graphblas#247](https://github.com/python-graphblas/python-graphblas/issues/247) or [https://scientific-python.org/calendars/](https://scientific-python.org/calendars/)
- **Chat via Discord:** [https://discord.com/invite/vur45CbwMz](https://discord.com/invite/vur45CbwMz) in the [#graphblas channel](https://discord.com/channels/786703927705862175/1024732940233605190)
-
-
+
+
## Install
+
Install the latest version of Python-graphblas via conda:
+
```
$ conda install -c conda-forge python-graphblas
```
+
or pip:
+
```
-$ pip install python-graphblas[default]
+$ pip install 'python-graphblas[default]'
```
+
This will also install the [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS) compiled C library.
We currently support the [GraphBLAS C API 2.0 specification](https://graphblas.org/docs/GraphBLAS_API_C_v2.0.0.pdf).
@@ -55,6 +62,7 @@ The following are not required by python-graphblas, but may be needed for certai
- `fast-matrix-market` - for faster read/write of Matrix Market files with `gb.io.mmread` and `gb.io.mmwrite`.
## Description
+
Currently works with [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS), but the goal is to make it work with all implementations of the GraphBLAS spec.
The approach taken with this library is to follow the C-API 2.0 specification as closely as possible while making improvements
@@ -68,10 +76,12 @@ with how Python handles assignment, so instead we (ab)use the left-shift `<<` no
assignment. This opens up all kinds of nice possibilities.
This is an example of how the mapping works:
+
```C
// C call
GrB_Matrix_mxm(M, mask, GrB_PLUS_INT64, GrB_MIN_PLUS_INT64, A, B, NULL)
```
+
```python
# Python call
M(mask.V, accum=binary.plus) << A.mxm(B, semiring.min_plus)
@@ -89,10 +99,12 @@ is a much better approach, even if it doesn't feel very Pythonic.
Descriptor flags are set on the appropriate elements to keep logic close to what it affects. Here is the same call
with descriptor bits set. `ttcsr` indicates transpose the first and second matrices, complement the structure of the mask,
and do a replacement on the output.
+
```C
// C call
GrB_Matrix_mxm(M, mask, GrB_PLUS_INT64, GrB_MIN_PLUS_INT64, A, B, desc.ttcsr)
```
+
```python
# Python call
M(~mask.S, accum=binary.plus, replace=True) << A.T.mxm(B.T, semiring.min_plus)
@@ -102,16 +114,20 @@ The objects receiving the flag operations (A.T, ~mask, etc) are also delayed obj
do no computation, allowing the correct descriptor bits to be set in a single GraphBLAS call.
**If no mask or accumulator is used, the call looks like this**:
+
```python
M << A.mxm(B, semiring.min_plus)
```
+
The use of `<<` to indicate updating is actually just syntactic sugar for a real `.update()` method. The above
expression could be written as:
+
```python
M.update(A.mxm(B, semiring.min_plus))
```
## Operations
+
```python
M(mask, accum) << A.mxm(B, semiring) # mxm
w(mask, accum) << A.mxv(v, semiring) # mxv
@@ -121,14 +137,18 @@ M(mask, accum) << A.ewise_mult(B, binaryop) # eWiseMult
M(mask, accum) << A.kronecker(B, binaryop) # kronecker
M(mask, accum) << A.T # transpose
```
+
## Extract
+
```python
M(mask, accum) << A[rows, cols] # rows and cols are a list or a slice
w(mask, accum) << A[rows, col_index] # extract column
w(mask, accum) << A[row_index, cols] # extract row
s = A[row_index, col_index].value # extract single element
```
+
## Assign
+
```python
M(mask, accum)[rows, cols] << A # rows and cols are a list or a slice
M(mask, accum)[rows, col_index] << v # assign column
@@ -138,31 +158,42 @@ M[row_index, col_index] << s # assign scalar to single element
# (mask and accum not allowed)
del M[row_index, col_index] # remove single element
```
+
## Apply
+
```python
M(mask, accum) << A.apply(unaryop)
M(mask, accum) << A.apply(binaryop, left=s) # bind-first
M(mask, accum) << A.apply(binaryop, right=s) # bind-second
```
+
## Reduce
+
```python
v(mask, accum) << A.reduce_rowwise(op) # reduce row-wise
v(mask, accum) << A.reduce_columnwise(op) # reduce column-wise
s(accum) << A.reduce_scalar(op)
s(accum) << v.reduce(op)
```
+
## Creating new Vectors / Matrices
+
```python
A = Matrix.new(dtype, num_rows, num_cols) # new_type
B = A.dup() # dup
A = Matrix.from_coo([row_indices], [col_indices], [values]) # build
```
+
## New from delayed
+
Delayed objects can be used to create a new object using `.new()` method
+
```python
C = A.mxm(B, semiring).new()
```
+
## Properties
+
```python
size = v.size # size
nrows = M.nrows # nrows
@@ -170,23 +201,30 @@ ncols = M.ncols # ncols
nvals = M.nvals # nvals
rindices, cindices, vals = M.to_coo() # extractTuples
```
+
## Initialization
+
There is a mechanism to initialize `graphblas` with a context prior to use. This allows for setting the backend to
use as well as the blocking/non-blocking mode. If the context is not initialized, a default initialization will
be performed automatically.
+
```python
import graphblas as gb
+
# Context initialization must happen before any other imports
-gb.init('suitesparse', blocking=True)
+gb.init("suitesparse", blocking=True)
# Now we can import other items from graphblas
from graphblas import binary, semiring
from graphblas import Matrix, Vector, Scalar
```
+
## Performant User Defined Functions
+
Python-graphblas requires `numba` which enables compiling user-defined Python functions to native C for use in GraphBLAS.
Example customized UnaryOp:
+
```python
from graphblas import unary
@@ -195,22 +233,42 @@ def force_odd_func(x):
return x + 1
return x
-unary.register_new('force_odd', force_odd_func)
+unary.register_new("force_odd", force_odd_func)
v = Vector.from_coo([0, 1, 3], [1, 2, 3])
w = v.apply(unary.force_odd).new()
w # indexes=[0, 1, 3], values=[1, 3, 3]
```
+
Similar methods exist for BinaryOp, Monoid, and Semiring.
+## Relation to other network analysis libraries
+
+Python-graphblas aims to provide an efficient and consistent expression
+of graph operations using linear algebra. This allows the development of
+high-performance implementations of existing and new graph algorithms
+(also see [`graphblas-algorithms`](https://github.com/python-graphblas/graphblas-algorithms)).
+
+While end-to-end analysis can be done using `python-graphblas`, users
+might find that other libraries in the Python ecosystem provide a more
+convenient high-level interface for data pre-processing and transformation
+(e.g. `pandas`, `scipy.sparse`), visualization (e.g. `networkx`, `igraph`),
+interactive exploration and analysis (e.g. `networkx`, `igraph`) or for
+algorithms that are not (yet) implemented in `graphblas-algorithms` (e.g.
+`networkx`, `igraph`, `scipy.sparse.csgraph`). To facilitate communication with
+other libraries, `graphblas.io` contains multiple connectors, see the
+following section.
+
## Import/Export connectors to the Python ecosystem
+
`graphblas.io` contains functions for converting to and from:
+
```python
import graphblas as gb
# scipy.sparse matrices
A = gb.io.from_scipy_sparse(m)
-m = gb.io.to_scipy_sparse(m, format='csr')
+m = gb.io.to_scipy_sparse(m, format="csr")
# networkx graphs
A = gb.io.from_networkx(g)
diff --git a/binder/environment.yml b/binder/environment.yml
index ef72a4d2b..9548f2126 100644
--- a/binder/environment.yml
+++ b/binder/environment.yml
@@ -1,10 +1,12 @@
name: graphblas
channels:
- - conda-forge
+ - conda-forge
dependencies:
- - python=3.10
- - python-graphblas
- - matplotlib
- - networkx
- - pandas
- - scipy
+ - python=3.11
+ - python-graphblas
+ - matplotlib
+ - networkx
+ - pandas
+ - scipy
+ - drawsvg
+ - cairosvg
diff --git a/docs/_static/custom.css b/docs/_static/custom.css
index 1b14402cd..f7dd59b74 100644
--- a/docs/_static/custom.css
+++ b/docs/_static/custom.css
@@ -1,78 +1,78 @@
-
/* Main Page Stylings */
.intro-card {
- background-color: var(--pst-color-background);
- margin-bottom: 30px;
+ background-color: var(--pst-color-background);
+ margin-bottom: 30px;
}
.intro-card:hover {
- box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-link) !important;
+ box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-link) !important;
}
.intro-card .card-header {
- background-color: inherit;
+ background-color: inherit;
}
.intro-card .card-header .card-text {
- font-weight: bold;
+ font-weight: bold;
}
.intro-card .card-body {
- margin-top: 0;
+ margin-top: 0;
}
.intro-card .card-body .card-text:first-child {
- margin-bottom: 0;
+ margin-bottom: 0;
}
.shadow {
- box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-text-muted) !important;
+ box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-text-muted) !important;
}
.table {
- font-size: smaller;
- width: inherit;
+ font-size: smaller;
+ width: inherit;
}
-.table td, .table th {
- padding: 0 .75rem;
+.table td,
+.table th {
+ padding: 0 0.75rem;
}
.table.inline {
- display: inline-table;
- margin-right: 30px;
+ display: inline-table;
+ margin-right: 30px;
}
p.rubric {
- border-bottom: none;
+ border-bottom: none;
}
button.navbar-btn.rounded-circle {
- padding: 0.25rem;
+ padding: 0.25rem;
}
button.navbar-btn.search-button {
- color: var(--pst-color-text-muted);
- padding: 0;
+ color: var(--pst-color-text-muted);
+ padding: 0;
}
-button.navbar-btn:hover
-{
- color: var(--pst-color-primary);
+button.navbar-btn:hover {
+ color: var(--pst-color-primary);
}
button.theme-switch-button {
- font-size: calc(var(--pst-font-size-icon) - .1rem);
- border: none;
+ font-size: calc(var(--pst-font-size-icon) - 0.1rem);
+ border: none;
}
button span.theme-switch:hover {
- color: var(--pst-color-primary);
+ color: var(--pst-color-primary);
}
/* Styling for Jupyter Notebook ReST Exports */
-.dataframe tbody th, .dataframe tbody td {
- padding: 10px;
+.dataframe tbody th,
+.dataframe tbody td {
+ padding: 10px;
}
diff --git a/docs/_static/img/logo-horizontal-dark.svg b/docs/_static/img/logo-horizontal-dark.svg
new file mode 100644
index 000000000..be9e5ccca
--- /dev/null
+++ b/docs/_static/img/logo-horizontal-dark.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/_static/img/logo-horizontal-light.svg b/docs/_static/img/logo-horizontal-light.svg
new file mode 100644
index 000000000..5894eed9a
--- /dev/null
+++ b/docs/_static/img/logo-horizontal-light.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/_static/img/logo-horizontal-medium-big.svg b/docs/_static/img/logo-horizontal-medium-big.svg
new file mode 100644
index 000000000..649c2aef3
--- /dev/null
+++ b/docs/_static/img/logo-horizontal-medium-big.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/_static/img/logo-horizontal-medium.svg b/docs/_static/img/logo-horizontal-medium.svg
new file mode 100644
index 000000000..038781a3f
--- /dev/null
+++ b/docs/_static/img/logo-horizontal-medium.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/_static/img/logo-name-light.svg b/docs/_static/img/logo-name-light.svg
index e9d9738ee..3331ae561 100644
--- a/docs/_static/img/logo-name-light.svg
+++ b/docs/_static/img/logo-name-light.svg
@@ -1 +1 @@
-
+
diff --git a/docs/_static/img/logo-name-medium-big.svg b/docs/_static/img/logo-name-medium-big.svg
new file mode 100644
index 000000000..7bb245898
--- /dev/null
+++ b/docs/_static/img/logo-name-medium-big.svg
@@ -0,0 +1 @@
+
diff --git a/docs/_static/img/logo-name-medium.svg b/docs/_static/img/logo-name-medium.svg
index 2c718ba26..3128fda35 100644
--- a/docs/_static/img/logo-name-medium.svg
+++ b/docs/_static/img/logo-name-medium.svg
@@ -1 +1 @@
-
+
diff --git a/docs/_static/img/logo-vertical-dark.svg b/docs/_static/img/logo-vertical-dark.svg
new file mode 100644
index 000000000..25dcefc17
--- /dev/null
+++ b/docs/_static/img/logo-vertical-dark.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/_static/img/logo-vertical-light.svg b/docs/_static/img/logo-vertical-light.svg
new file mode 100644
index 000000000..1cb22644d
--- /dev/null
+++ b/docs/_static/img/logo-vertical-light.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/_static/img/logo-vertical-medium.svg b/docs/_static/img/logo-vertical-medium.svg
new file mode 100644
index 000000000..db2fcaefe
--- /dev/null
+++ b/docs/_static/img/logo-vertical-medium.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/_static/img/python-graphblas-logo.svg b/docs/_static/img/python-graphblas-logo.svg
new file mode 100644
index 000000000..2422973ff
--- /dev/null
+++ b/docs/_static/img/python-graphblas-logo.svg
@@ -0,0 +1 @@
+
diff --git a/docs/_static/matrix.css b/docs/_static/matrix.css
new file mode 100644
index 000000000..1937178e5
--- /dev/null
+++ b/docs/_static/matrix.css
@@ -0,0 +1,104 @@
+/* Based on the stylesheet used by matrepr (https://github.com/alugowski/matrepr) and modified for sphinx */
+
+table.matrix {
+ border-collapse: collapse;
+ border: 0px;
+}
+
+/* Disable a horizintal line from the default stylesheet */
+.table.matrix > :not(caption) > * > * {
+ border-bottom-width: 0px;
+}
+
+/* row indices */
+table.matrix > tbody tr th {
+ font-size: smaller;
+ font-weight: bolder;
+ vertical-align: middle;
+ text-align: right;
+}
+/* row indices are often made bold in the source data; here make them match the boldness of the th column label style*/
+table.matrix strong {
+ font-weight: bold;
+}
+
+/* column indices */
+table.matrix > thead tr th {
+ font-size: smaller;
+ font-weight: bolder;
+ vertical-align: middle;
+ text-align: center;
+}
+
+/* cells */
+table.matrix > tbody tr td {
+ vertical-align: middle;
+ text-align: center;
+ position: relative;
+}
+
+/* left border */
+table.matrix > tbody tr td:first-of-type {
+ border-left: solid 2px var(--pst-color-text-base);
+}
+/* right border */
+table.matrix > tbody tr td:last-of-type {
+ border-right: solid 2px var(--pst-color-text-base);
+}
+
+/* prevents empty cells from collapsing, especially empty rows */
+table.matrix > tbody tr td:empty::before {
+ /* basicaly fills empty cells with */
+ content: "\00a0\00a0\00a0";
+ visibility: hidden;
+}
+table.matrix > tbody tr td:empty::after {
+ content: "\00a0\00a0\00a0";
+ visibility: hidden;
+}
+
+/* matrix bracket ticks */
+table.matrix > tbody > tr:first-child > td:first-of-type::before {
+ content: "";
+ width: 4px;
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ visibility: visible;
+ left: 0;
+ right: auto;
+ border-top: solid 2px var(--pst-color-text-base);
+}
+table.matrix > tbody > tr:last-child > td:first-of-type::before {
+ content: "";
+ width: 4px;
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ visibility: visible;
+ left: 0;
+ right: auto;
+ border-bottom: solid 2px var(--pst-color-text-base);
+}
+table.matrix > tbody > tr:first-child > td:last-of-type::after {
+ content: "";
+ width: 4px;
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ visibility: visible;
+ left: auto;
+ right: 0;
+ border-top: solid 2px var(--pst-color-text-base);
+}
+table.matrix > tbody > tr:last-child > td:last-of-type::after {
+ content: "";
+ width: 4px;
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ visibility: visible;
+ left: auto;
+ right: 0;
+ border-bottom: solid 2px var(--pst-color-text-base);
+}
diff --git a/docs/api_reference/io.rst b/docs/api_reference/io.rst
index 1b42c0648..1cfc98516 100644
--- a/docs/api_reference/io.rst
+++ b/docs/api_reference/io.rst
@@ -10,15 +10,18 @@ These methods require `networkx `_ to be installed.
.. autofunction:: graphblas.io.to_networkx
-Numpy
+NumPy
~~~~~
-These methods require `scipy `_ to be installed, as some
-of the scipy.sparse machinery is used during the conversion process.
+These methods convert to and from dense arrays. For more, see :ref:`IO in the user guide `.
-.. autofunction:: graphblas.io.from_numpy
+.. automethod:: graphblas.core.matrix.Matrix.from_dense
-.. autofunction:: graphblas.io.to_numpy
+.. automethod:: graphblas.core.matrix.Matrix.to_dense
+
+.. automethod:: graphblas.core.vector.Vector.from_dense
+
+.. automethod:: graphblas.core.vector.Vector.to_dense
Scipy Sparse
~~~~~~~~~~~~
@@ -49,7 +52,24 @@ These methods require `scipy `_ to be installed.
.. autofunction:: graphblas.io.mmwrite
+Awkward Array
+~~~~~~~~~~~~~
+
+`Awkward Array `_ is a library for nested,
+variable-sized data, including arbitrary-length lists, records, mixed types,
+and missing data, using NumPy-like idioms. Note that the intended use of the
+``awkward-array``-related ``io`` functions is to convert ``graphblas`` objects to awkward,
+perform necessary computations/transformations and, if required, convert the
+awkward array back to ``graphblas`` format. To facilitate this conversion process,
+``graphblas.io.to_awkward`` adds top-level attribute ``format``, describing the
+format of the ``graphblas`` object (this attributed is used by the
+``graphblas.io.from_awkward`` function to reconstruct the ``graphblas`` object).
+
+.. autofunction:: graphblas.io.to_awkward
+
+.. autofunction:: graphblas.io.from_awkward
+
Visualization
~~~~~~~~~~~~~
-.. autofunction:: graphblas.io.draw
+.. autofunction:: graphblas.viz.draw
diff --git a/docs/conf.py b/docs/conf.py
index 3e1a8c85b..283f6d047 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -19,7 +19,7 @@
# -- Project information -----------------------------------------------------
project = "python-graphblas"
-copyright = "2022, Anaconda, Inc"
+copyright = "2020-2023, Anaconda, Inc. and contributors"
author = "Anaconda, Inc"
# The full version, including alpha/beta/rc tags
@@ -36,7 +36,7 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "numpydoc", "sphinx_panels", "nbsphinx"]
-html_css_files = ["custom.css"]
+html_css_files = ["custom.css", "matrix.css"]
html_js_files = ["custom.js"]
# Add any paths that contain templates here, relative to this directory.
@@ -55,14 +55,16 @@
#
html_theme = "pydata_sphinx_theme"
+html_favicon = "_static/img/python-graphblas-logo.svg"
+
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"logo": {
- "image_light": "_static/img/logo-name-light.svg",
- "image_dark": "_static/img/logo-name-dark.svg",
+ "image_light": "_static/img/logo-horizontal-light.svg",
+ "image_dark": "_static/img/logo-horizontal-dark.svg",
},
"github_url": "https://github.com/python-graphblas/python-graphblas",
}
diff --git a/docs/contributor_guide/index.rst b/docs/contributor_guide/index.rst
index e8078f933..3b94f2f35 100644
--- a/docs/contributor_guide/index.rst
+++ b/docs/contributor_guide/index.rst
@@ -58,7 +58,7 @@ Here are instructions for two popular environment managers:
::
# Create a conda environment named ``graphblas-dev`` using environment.yml in the repository root
- conda create -f environment.yml
+ conda env create -f environment.yml
# Activate it
conda activate graphblas-dev
# Install python-graphblas from source
diff --git a/docs/env.yml b/docs/env.yml
index c0c4c8999..78a50afbe 100644
--- a/docs/env.yml
+++ b/docs/env.yml
@@ -1,23 +1,23 @@
name: python-graphblas-docs
channels:
- - conda-forge
- - nodefaults
+ - conda-forge
+ - nodefaults
dependencies:
- - python=3.10
- - pip
- # python-graphblas dependencies
- - donfig
- - numba
- - python-suitesparse-graphblas>=7.4.0.0
- - pyyaml
- # extra dependencies
- - matplotlib
- - networkx
- - pandas
- - scipy>=1.7.0
- # docs dependencies
- - commonmark # For RTD
- - nbsphinx
- - numpydoc
- - pydata-sphinx-theme=0.13.1
- - sphinx-panels=0.6.0
+ - python=3.10
+ - pip
+ # python-graphblas dependencies
+ - donfig
+ - numba
+ - python-suitesparse-graphblas>=7.4.0.0
+ - pyyaml
+ # extra dependencies
+ - matplotlib
+ - networkx
+ - pandas
+ - scipy>=1.7.0
+ # docs dependencies
+ - commonmark # For RTD
+ - nbsphinx
+ - numpydoc
+ - pydata-sphinx-theme=0.13.1
+ - sphinx-panels=0.6.0
diff --git a/docs/getting_started/faq.rst b/docs/getting_started/faq.rst
index 1e60a1bd4..2609e7929 100644
--- a/docs/getting_started/faq.rst
+++ b/docs/getting_started/faq.rst
@@ -101,11 +101,10 @@ Bugs are not considered deprecations and may be fixed immediately.
What is the version support policy?
+++++++++++++++++++++++++++++++++++
-Each major Python version will be supported for at least 36 to 42 months.
+Each major Python version will be supported for at least 36.
Major dependencies such as NumPy should be supported for at least 24 months.
-This is motivated by these guidelines:
+We aim to follow SPEC 0:
-- https://numpy.org/neps/nep-0029-deprecation_policy.html
- https://scientific-python.org/specs/spec-0000/
``python-graphblas`` itself follows a "single trunk" versioning strategy.
diff --git a/docs/getting_started/primer.rst b/docs/getting_started/primer.rst
index 710dca702..b5bec26ee 100644
--- a/docs/getting_started/primer.rst
+++ b/docs/getting_started/primer.rst
@@ -89,26 +89,13 @@ makes for faster graph algorithms.
# networkx-style storage of an undirected graph
G = {
- 0: {1: {'weight': 5.6},
- 2: {'weight': 2.3},
- 3: {'weight': 4.6}},
- 1: {0: {'weight': 5.6},
- 2: {'weight': 1.9},
- 3: {'weight': 6.2}},
- 2: {0: {'weight': 2.3},
- 1: {'weight': 1.9},
- 3: {'weight': 3.0}},
- 3: {0: {'weight': 4.6},
- 1: {'weight': 6.2},
- 2: {'weight': 3.0},
- 4: {'weight': 1.4}},
- 4: {3: {'weight': 1.4},
- 5: {'weight': 4.4},
- 6: {'weight': 1.0}},
- 5: {4: {'weight': 4.4},
- 6: {'weight': 2.8}},
- 6: {4: {'weight': 1.0},
- 5: {'weight': 2.8}}
+ 0: {1: {"weight": 5.6}, 2: {"weight": 2.3}, 3: {"weight": 4.6}},
+ 1: {0: {"weight": 5.6}, 2: {"weight": 1.9}, 3: {"weight": 6.2}},
+ 2: {0: {"weight": 2.3}, 1: {"weight": 1.9}, 3: {"weight": 3.0}},
+ 3: {0: {"weight": 4.6}, 1: {"weight": 6.2}, 2: {"weight": 3.0}, 4: {"weight": 1.4}},
+ 4: {3: {"weight": 1.4}, 5: {"weight": 4.4}, 6: {"weight": 1.0}},
+ 5: {4: {"weight": 4.4}, 6: {"weight": 2.8}},
+ 6: {4: {"weight": 1.0}, 5: {"weight": 2.8}},
}
An alternative way to store a graph is as an adjacency matrix. Each node becomes both a row
@@ -240,7 +227,9 @@ node 0.
[0, 0, 1, 1, 2],
[1, 2, 2, 3, 3],
[2.0, 5.0, 1.5, 4.25, 0.5],
- nrows=4, ncols=4)
+ nrows=4,
+ ncols=4
+ )
v = Vector.from_coo([start_node], [0.0], size=4)
# Compute SSSP
@@ -274,7 +263,7 @@ and showing that linear algebra can be used to compute graph algorithms with the
of semirings.
This is a somewhat new field of research, so many academic papers and talks are being given every year.
-`Graphblas.org `_ remains the best source for keeping up-to-date with the latest
+`Graphblas.org `_ remains the best source for keeping up-to-date with the latest
developments in this area.
Many people will benefit from faster graph algorithms written in GraphBLAS, but for those that want
diff --git a/docs/make.bat b/docs/make.bat
index 2119f5109..153be5e2f 100644
--- a/docs/make.bat
+++ b/docs/make.bat
@@ -21,7 +21,7 @@ if errorlevel 9009 (
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
- echo.http://sphinx-doc.org/
+ echo.https://www.sphinx-doc.org/
exit /b 1
)
diff --git a/docs/user_guide/collections.rst b/docs/user_guide/collections.rst
index 2ce759bf4..de7469c6d 100644
--- a/docs/user_guide/collections.rst
+++ b/docs/user_guide/collections.rst
@@ -145,7 +145,7 @@ The shape and dtype remain unchanged, but the collection will be fully sparse (i
to_coo
~~~~~~
-To go from a collection back to the index and values, ``.to_coo()`` can be called. Numpy arrays
+To go from a collection back to the index and values, ``.to_coo()`` can be called. NumPy arrays
will be returned in a tuple.
.. code-block:: python
diff --git a/docs/user_guide/init.rst b/docs/user_guide/init.rst
index 62f81b50f..ffb6a3463 100644
--- a/docs/user_guide/init.rst
+++ b/docs/user_guide/init.rst
@@ -8,8 +8,9 @@ GraphBLAS must be initialized before it can be used. This is done with the
.. code-block:: python
import graphblas as gb
+
# Context initialization must happen before any other imports
- gb.init('suitesparse', blocking=False)
+ gb.init("suitesparse", blocking=False)
# Now we can import other items from graphblas
from graphblas import binary, semiring
diff --git a/docs/user_guide/io.rst b/docs/user_guide/io.rst
index c13fda5d6..f27b40bd3 100644
--- a/docs/user_guide/io.rst
+++ b/docs/user_guide/io.rst
@@ -4,6 +4,8 @@ Input/Output
There are several ways to get data into and out of python-graphblas.
+.. _from-to-values:
+
From/To Values
--------------
@@ -29,6 +31,7 @@ array will match the collection dtype.
v = gb.Vector.from_coo([1, 3, 6], [2, 3, 4], float, size=10)
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5,6,7,8,9,10
,2.0,,3.0,,,4.0,,,
diff --git a/docs/user_guide/operations.rst b/docs/user_guide/operations.rst
index 9ee76ab4c..18d0352d7 100644
--- a/docs/user_guide/operations.rst
+++ b/docs/user_guide/operations.rst
@@ -8,7 +8,7 @@ Matrix Multiply
The GraphBLAS spec contains three methods for matrix multiplication, depending on whether
the inputs are Matrix or Vector.
- - **mxm** -- Matrix-Matrix multplications
+ - **mxm** -- Matrix-Matrix multiplication
- **mxv** -- Matrix-Vector multiplication
- **vxm** -- Vector-Matrix multiplication
@@ -26,19 +26,28 @@ a Vector is treated as an nx1 column matrix.
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 2, 3, 3],
- [2., 5., 1.5, 4.25, 0.5], nrows=4, ncols=4)
- B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2, 3, 3], [1, 2, 0, 1, 1, 2, 0, 1],
- [3., 2., 9., 6., 3., 1., 0., 5.])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2],
+ [1, 2, 2, 3, 3],
+ [2., 5., 1.5, 4.25, 0.5],
+ nrows=4,
+ ncols=4
+ )
+ B = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2, 3, 3],
+ [1, 2, 0, 1, 1, 2, 0, 1],
+ [3., 2., 9., 6., 3., 1., 0., 5.]
+ )
C = gb.Matrix(float, A.nrows, B.ncols)
# These are equivalent
- C << A.mxm(B, op='min_plus') # method style
+ C << A.mxm(B, op="min_plus") # method style
C << gb.semiring.min_plus(A @ B) # functional style
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,3
+ :stub-columns: 1
**0**,,2.0,5.0,
**1**,,,1.5,4.25
@@ -46,8 +55,9 @@ a Vector is treated as an nx1 column matrix.
**3**,,,,
.. csv-table:: B
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,3.0,2.0
**1**,9.0,6.0,
@@ -55,8 +65,9 @@ a Vector is treated as an nx1 column matrix.
**3**,0.0,5.0,
.. csv-table:: C << min_plus(A @ B)
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,11.0,8.0,6.0
**1**,4.25,4.5,2.5
@@ -67,18 +78,24 @@ a Vector is treated as an nx1 column matrix.
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 2, 3, 3],
- [2., 5., 1.5, 4.25, 0.5], nrows=4, ncols=4)
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2],
+ [1, 2, 2, 3, 3],
+ [2., 5., 1.5, 4.25, 0.5],
+ nrows=4,
+ ncols=4
+ )
v = gb.Vector.from_coo([0, 1, 3], [10., 20., 40.])
w = gb.Vector(float, A.nrows)
# These are equivalent
- w << A.mxv(v, op='plus_times') # method style
+ w << A.mxv(v, op="plus_times") # method style
w << gb.semiring.plus_times(A @ v) # functional style
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,3
+ :stub-columns: 1
**0**,,2.0,5.0,
**1**,,,1.5,4.25
@@ -86,13 +103,13 @@ a Vector is treated as an nx1 column matrix.
**3**,,,,
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
10.0,20.0,,40.0
.. csv-table:: w << plus_times(A @ v)
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
40.0,170.0,20.0,
@@ -102,23 +119,27 @@ a Vector is treated as an nx1 column matrix.
.. code-block:: python
v = gb.Vector.from_coo([0, 1, 3], [10., 20., 40.])
- B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2, 3, 3], [1, 2, 0, 1, 1, 2, 0, 1],
- [3., 2., 9., 6., 3., 1., 0., 5.])
+ B = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2, 3, 3],
+ [1, 2, 0, 1, 1, 2, 0, 1],
+ [3., 2., 9., 6., 3., 1., 0., 5.]
+ )
u = gb.Vector(float, B.ncols)
# These are equivalent
- u << v.vxm(B, op='plus_plus') # method style
+ u << v.vxm(B, op="plus_plus") # method style
u << gb.semiring.plus_plus(v @ B) # functional style
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
10.0,20.0,,40.0
.. csv-table:: B
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,3.0,2.0
**1**,9.0,6.0,
@@ -126,7 +147,7 @@ a Vector is treated as an nx1 column matrix.
**3**,0.0,5.0,
.. csv-table:: u << plus_plus(v @ B)
- :class: inline
+ :class: inline matrix
:header: 0,1,2
69.0,84.0,12.0
@@ -148,35 +169,44 @@ Example usage:
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 0, 2, 1],
- [2.0, 5.0, 1.5, 4.0, 0.5])
- B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 1, 2],
- [3., -2., 0., 6., 3., 1.])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2],
+ [1, 2, 0, 2, 1],
+ [2., 5., 1.5, 4., 0.5]
+ )
+ B = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 2, 0, 1, 1, 2],
+ [3., -2., 0., 6., 3., 1.]
+ )
C = gb.Matrix(float, A.nrows, A.ncols)
# These are equivalent
- C << A.ewise_mult(B, op='min') # method style
+ C << A.ewise_mult(B, op="min") # method style
C << gb.binary.min(A & B) # functional style
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,1.5,,4.0
**2**,,0.5,
.. csv-table:: B
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,3.0,-2.0
**1**,0.0,6.0,
**2**,,3.0,1.0
.. csv-table:: C << min(A & B)
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,-2.0
**1**,0.0,,
@@ -225,35 +255,45 @@ should be used with the functional syntax, ``left_default`` and ``right_default`
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 0, 1, 1], [0, 1, 2, 0, 2],
- [9.0, 2.0, 5.0, 1.5, 4.0], nrows=3)
- B = gb.Matrix.from_coo([0, 0, 0, 2, 2, 2], [0, 1, 2, 0, 1, 2],
- [4., 0., -2., 6., 3., 1.])
+ A = gb.Matrix.from_coo(
+ [0, 0, 0, 1, 1],
+ [0, 1, 2, 0, 2],
+ [9., 2., 5., 1.5, 4.],
+ nrows=3
+ )
+ B = gb.Matrix.from_coo(
+ [0, 0, 0, 2, 2, 2],
+ [0, 1, 2, 0, 1, 2],
+ [4., 0., -2., 6., 3., 1.]
+ )
C = gb.Matrix(float, A.nrows, A.ncols)
# These are equivalent
- C << A.ewise_add(B, op='minus') # method style
+ C << A.ewise_add(B, op="minus") # method style
C << gb.binary.minus(A | B) # functional style
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,9.0,2.0,5.0
**1**,1.5,,4.0
**2**,,,
.. csv-table:: B
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,4.0,0.0,-2.0
**1**,,,
**2**,6.0,3.0,1.0
.. csv-table:: C << A.ewise_add(B, 'minus')
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,
+ :stub-columns: 1
**0**,5.0,2.0,7.0
**1**,1.5,,4.0
@@ -263,35 +303,45 @@ should be used with the functional syntax, ``left_default`` and ``right_default`
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 0, 1, 1], [0, 1, 2, 0, 2],
- [9.0, 2.0, 5.0, 1.5, 4.0], nrows=3)
- B = gb.Matrix.from_coo([0, 0, 0, 2, 2, 2], [0, 1, 2, 0, 1, 2],
- [4., 0., -2., 6., 3., 1.])
+ A = gb.Matrix.from_coo(
+ [0, 0, 0, 1, 1],
+ [0, 1, 2, 0, 2],
+ [9., 2., 5., 1.5, 4.],
+ nrows=3
+ )
+ B = gb.Matrix.from_coo(
+ [0, 0, 0, 2, 2, 2],
+ [0, 1, 2, 0, 1, 2],
+ [4., 0., -2., 6., 3., 1.]
+ )
C = gb.Matrix(float, A.nrows, A.ncols)
# These are equivalent
- C << A.ewise_union(B, op='minus', left_default=0, right_default=0) # method style
+ C << A.ewise_union(B, op="minus", left_default=0, right_default=0) # method style
C << gb.binary.minus(A | B, left_default=0, right_default=0) # functional style
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,9.0,2.0,5.0
**1**,1.5,,4.0
**2**,,,
.. csv-table:: B
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,4.0,0.0,-2.0
**1**,,,
**2**,6.0,3.0,1.0
.. csv-table:: C << A.ewise_union(B, 'minus', 0, 0)
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,
+ :stub-columns: 1
**0**,5.0,2.0,7.0
**1**,1.5,,4.0
@@ -326,13 +376,13 @@ Vector Slice Example:
w << v[:4]
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3,4,5,6
10.0,2.0,,40.0,-5.0,,24.0
.. csv-table:: w << v[:4]
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
10.0,2.0,,40.0
@@ -341,23 +391,28 @@ Matrix List Example:
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 2, 0, 1, 0, 2],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
C = gb.Matrix(float, 2, A.ncols)
C << A[[0, 2], :]
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,1.5,4.0,
**2**,0.5,,-7.0
.. csv-table:: C << A[[0, 2], :]
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,0.5,,-7.0
@@ -382,31 +437,39 @@ Matrix-Matrix Assignment Example:
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
- B = gb.Matrix.from_coo([0, 0, 1, 1], [0, 1, 0, 1],
- [-99., -98., -97., -96.])
-
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 2, 0, 1, 0, 2],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
+ B = gb.Matrix.from_coo(
+ [0, 0, 1, 1],
+ [0, 1, 0, 1],
+ [-99., -98., -97., -96.]
+ )
A[::2, ::2] << B
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,1.5,4.0,
**2**,0.5,,-7.0
.. csv-table:: B
- :class: inline
+ :class: inline matrix
:header: ,0,1
+ :stub-columns: 1
**0**,-99.0,-98.0
**1**,-97.0,-96.0
.. csv-table:: A[::2, ::2] << B
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,-99.0,2.0,-98.0
**1**,1.5,4.0,
@@ -416,29 +479,34 @@ Matrix-Vector Assignment Example:
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 2, 0, 1, 0, 2],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
v = gb.Vector.from_coo([2], [-99.])
A[1, :] << v
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,1.5,4.0,
**2**,0.5,,-7.0
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2
,,-99.0
.. csv-table:: A[1, :] << v
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,,,-99.0
@@ -453,13 +521,13 @@ Vector-Scalar Assignment Example:
v[:4] << 99
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3,4,5,6
10,2,,40,-5,,24
.. csv-table:: v[:4] << 99
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3,4,5,6
99,99,99,99,-5,,24
@@ -488,13 +556,13 @@ function with the collection as the argument.
w << gb.unary.minv(v)
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
10.0,20.0,,40.0
.. csv-table:: w << minv(v)
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
0.1,0.05,,0.025
@@ -511,13 +579,13 @@ function with the collection as the argument.
w << gb.indexunary.index(v)
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
10.0,20.0,,40.0
.. csv-table:: w << index(v)
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
0,1,,3
@@ -530,18 +598,18 @@ function with the collection as the argument.
w = gb.Vector(float, v.size)
# These are all equivalent
- w << v.apply('minus', right=15)
+ w << v.apply("minus", right=15)
w << gb.binary.minus(v, right=15)
w << v - 15
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3
10.0,20.0,,40.0
.. csv-table:: w << v.apply('minus', right=15)
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3,
-5.0,5.0,,25.0
@@ -557,25 +625,30 @@ Upper Triangle Example:
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 1, 2],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 2, 0, 2, 1, 2],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
C = gb.Matrix(float, A.nrows, A.ncols)
# These are equivalent
- C << A.select('triu')
+ C << A.select("triu")
C << gb.select.triu(A)
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,1.5,,4.0
**2**,,0.5,-7.0
.. csv-table:: C << select.triu(A)
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,,,4.0
@@ -589,17 +662,17 @@ Select by Value Example:
w = gb.Vector(float, v.size)
# These are equivalent
- w << v.select('>=', 5)
+ w << v.select(">=", 5)
w << gb.select.value(v >= 5)
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3,4,5,6
10.0,2.0,,40.0,-5.0,,24.0
.. csv-table:: w << select.value(v >= 5)
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3,4,5,6
10.0,,,40.0,,,24.0
@@ -618,22 +691,26 @@ A monoid or aggregator is used to perform the reduction.
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 1],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 3, 0, 1, 0, 1],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
w = gb.Vector(float, A.ncols)
- w << A.reduce_columnwise('times')
+ w << A.reduce_columnwise("times")
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,3
+ :stub-columns: 1
**0**,,2.0,,5.0
**1**,1.5,4.0,,
**2**,0.5,-7.0,,
.. csv-table:: w << A.reduce_columnwise('times')
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,3
,0.75,-56.0,,5.0
@@ -642,22 +719,26 @@ A monoid or aggregator is used to perform the reduction.
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 1],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 3, 0, 1, 0, 1],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
s = gb.Scalar(float)
- s << A.reduce_scalar('max')
+ s << A.reduce_scalar("max")
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,3
+ :stub-columns: 1
**0**,,2.0,,5.0
**1**,1.5,4.0,,
**2**,0.5,-7.0,,
.. csv-table:: s << A.reduce_scalar('max')
- :class: inline
+ :class: inline matrix
:header: ,,,,
5.0
@@ -670,17 +751,17 @@ A monoid or aggregator is used to perform the reduction.
s = gb.Scalar(int)
# These are equivalent
- s << v.reduce('argmin')
+ s << v.reduce("argmin")
s << gb.agg.argmin(v)
.. csv-table:: v
- :class: inline
+ :class: inline matrix
:header: 0,1,2,3,4,5,6
10.0,2.0,,40.0,-5.0,,24.0
.. csv-table:: s << argmin(v)
- :class: inline
+ :class: inline matrix
:header: ,,,
4
@@ -695,23 +776,28 @@ To force the transpose to be computed by itself, use it by itself as the right-h
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 2],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 3, 0, 1, 0, 2],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
C = gb.Matrix(float, A.ncols, A.nrows)
C << A.T
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,3
+ :stub-columns: 1
**0**,,2.0,,5.0
**1**,1.5,4.0,,
**2**,0.5,,-7.0,
.. csv-table:: C << A.T
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,1.5,0.5
**1**,2.0,4.0,
@@ -728,31 +814,41 @@ The Kronecker product uses a binary operator.
.. code-block:: python
- A = gb.Matrix.from_coo([0, 0, 1], [0, 1, 0], [1., -2., 3.])
- B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2],
- [2.0, 5.0, 1.5, 4.0, 0.5, -7.0])
+ A = gb.Matrix.from_coo(
+ [0, 0, 1],
+ [0, 1, 0],
+ [1., -2., 3.]
+ )
+ B = gb.Matrix.from_coo(
+ [0, 0, 1, 1, 2, 2],
+ [1, 2, 0, 1, 0, 2],
+ [2., 5., 1.5, 4., 0.5, -7.]
+ )
C = gb.Matrix(float, A.nrows * B.nrows, A.ncols * B.ncols)
- C << A.kronecker(B, 'times')
+ C << A.kronecker(B, "times")
.. csv-table:: A
- :class: inline
+ :class: inline matrix
:header: ,0,1
+ :stub-columns: 1
**0**,1.0,-2.0
**1**,3.0,
.. csv-table:: B
- :class: inline
+ :class: inline matrix
:header: ,0,1,2
+ :stub-columns: 1
**0**,,2.0,5.0
**1**,1.5,4.0,
**2**,0.5,,-7.0
.. csv-table:: C << A.kronecker(B, 'times')
- :class: inline
+ :class: inline matrix
:header: ,0,1,2,3,4,5
+ :stub-columns: 1
**0**,,2.0,5.0,,-4.0,-10.0
**1**,1.5,4.0,,-3.0,-8.0,
diff --git a/docs/user_guide/operators.rst b/docs/user_guide/operators.rst
index 84fe9312c..8bb5e9fa8 100644
--- a/docs/user_guide/operators.rst
+++ b/docs/user_guide/operators.rst
@@ -89,9 +89,12 @@ registered from numpy are located in ``graphblas.binary.numpy``.
Monoids
-------
-Monoids extend the concept of a binary operator to require a single domain for all inputs and
-the output. Monoids are also associative, so the order of the inputs does not matter. And finally,
-monoids have a default identity such that ``A op identity == A``.
+Monoids extend the concept of a binary operator to require a single domain for all inputs and the output.
+Monoids are also associative so the order of operations does not matter
+(for example, ``(a + b) + c == a + (b + c)``).
+GraphBLAS primarily uses *commutative monoids* (for example, ``a + b == b + a``),
+and all standard monoids in python-graphblas commute.
+And finally, monoids have a default identity such that ``A op identity == A``.
Monoids are commonly for reductions, collapsing all elements down to a single value.
@@ -273,7 +276,7 @@ Example usage:
minval = v.reduce(gb.monoid.min).value
# This will force the FP32 version of min to be used, possibly type casting the elements
- minvalFP32 = v.reduce(gb.monoid.min['FP32']).value
+ minvalFP32 = v.reduce(gb.monoid.min["FP32"]).value
The gb.op Namespace
@@ -311,12 +314,14 @@ each symbol. Each is detailed below.
The following objects will be used to demonstrate the behavior.
.. csv-table:: Vector v
+ :class: matrix
:header: 0,1,2,3,4,5
1.0,,2.0,3.5,,9.0
.. csv-table:: Vector w
+ :class: matrix
:header: 0,1,2,3,4,5
7.0,5.2,,3.0,,2.5
@@ -340,6 +345,7 @@ Addition performs an element-wise union between collections, adding overlapping
v + w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
8.0,5.2,2.0,6.5,,11.5
@@ -355,6 +361,7 @@ and negating any standalone elements from the right-hand object.
v - w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
-6.0,-5.2,2.0,0.5,,6.5
@@ -370,6 +377,7 @@ overlapping elements.
v * w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
7.0,,,10.5,,22.5
@@ -389,6 +397,7 @@ elements and always results in a floating-point dtype.
v / w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
0.142857,,,1.166667,,3.6
@@ -404,6 +413,7 @@ Dividing by zero with floor division will raise a ``ZeroDivisionError``.
v // w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
0.0,,,1.0,,3.0
@@ -419,6 +429,7 @@ of dividing overlapping elements.
v % w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
1.0,,,0.5,,1.5
@@ -431,9 +442,10 @@ the power of y for overlapping elements.
.. code-block:: python
- v ** w
+ v**w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
1.0,,,42.875,,243.0
@@ -452,6 +464,7 @@ rather than ``all(A == B)``
v > w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
False,,,True,,True
@@ -461,6 +474,7 @@ rather than ``all(A == B)``
v == w
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
False,,,False,,False
diff --git a/docs/user_guide/recorder.rst b/docs/user_guide/recorder.rst
index ee6d2bbb9..3355d93ce 100644
--- a/docs/user_guide/recorder.rst
+++ b/docs/user_guide/recorder.rst
@@ -25,7 +25,9 @@ Instead, only the calls from the last iteration will be returned.
[0, 0, 1, 1, 2],
[1, 2, 2, 3, 3],
[2.0, 5.0, 1.5, 4.25, 0.5],
- nrows=4, ncols=4)
+ nrows=4,
+ ncols=4
+ )
v = Vector.from_coo([start_node], [0.0], size=4)
# Compute SSSP
diff --git a/docs/user_guide/udf.rst b/docs/user_guide/udf.rst
index 6c72535fc..e7b984b44 100644
--- a/docs/user_guide/udf.rst
+++ b/docs/user_guide/udf.rst
@@ -21,12 +21,13 @@ Example user-defined UnaryOp:
return x + 1
return x
- unary.register_new('force_odd', force_odd_func)
+ unary.register_new("force_odd", force_odd_func)
v = Vector.from_coo([0, 1, 3, 4, 5], [1, 2, 3, 8, 14])
w = v.apply(unary.force_odd).new()
.. csv-table:: w
+ :class: matrix
:header: 0,1,2,3,4,5
1,3,,3,9,15
@@ -48,6 +49,7 @@ Example lambda usage:
v.apply(lambda x: x % 5 - 2).new()
.. csv-table::
+ :class: matrix
:header: 0,1,2,3,4,5
-1,0,,1,1,2
diff --git a/environment.yml b/environment.yml
index 875ec5cbd..2bae0b76e 100644
--- a/environment.yml
+++ b/environment.yml
@@ -11,97 +11,100 @@
# It is okay to comment out sections below that you don't need such as viz or building docs.
name: graphblas-dev
channels:
- - conda-forge
- - nodefaults # Only install packages from conda-forge for faster solving
+ - conda-forge
+ - nodefaults # Only install packages from conda-forge for faster solving
dependencies:
- - python
- - donfig
- - numba
- - python-suitesparse-graphblas
- - pyyaml
- # For repr
- - pandas
- # For I/O
- - awkward
- - fast_matrix_market
- - networkx
- - scipy
- - sparse
- # For viz
- - datashader
- - hvplot
- - matplotlib
- # For linting
- - pre-commit
- # For testing
- - packaging
- - pytest-cov
- - tomli
- # For debugging
- - icecream
- - ipykernel
- - ipython
- # For type annotations
- - mypy
- # For building docs
- - nbsphinx
- - numpydoc
- - pydata-sphinx-theme
- - sphinx-panels
- # EXTRA (optional; uncomment as desired)
- # - autoflake
- # - black
- # - black-jupyter
- # - build
- # - codespell
- # - commonmark
- # - cython
- # - cytoolz
- # - distributed
- # - flake8
- # - flake8-bugbear
- # - flake8-comprehensions
- # - flake8-print
- # - flake8-quotes
- # - flake8-simplify
- # - gcc
- # - gh
- # - graph-tool
- # - xorg-libxcursor # for graph-tool
- # - grayskull
- # - h5py
- # - hiveplot
- # - igraph
- # - ipycytoscape
- # - isort
- # - jupyter
- # - jupyterlab
- # - line_profiler
- # - lxml
- # - make
- # - memory_profiler
- # - nbqa
- # - netcdf4
- # - networkit
- # - nxviz
- # - pycodestyle
- # - pydot
- # - pygraphviz
- # - pylint
- # - pytest-runner
- # - pytest-xdist
- # - python-graphviz
- # - python-igraph
- # - python-louvain
- # - pyupgrade
- # - ruff
- # - scalene
- # - setuptools-git-versioning
- # - snakeviz
- # - sphinx-lint
- # - sympy
- # - tuna
- # - twine
- # - vim
- # - yesqa
- # - zarr
+ - python
+ - donfig
+ - numba
+ - python-suitesparse-graphblas
+ - pyyaml
+ # For repr
+ - pandas
+ # For I/O
+ - awkward
+ - networkx
+ - scipy
+ - sparse
+ # For viz
+ - datashader
+ - hvplot
+ - matplotlib
+ # For linting
+ - pre-commit
+ # For testing
+ - packaging
+ - pytest-cov
+ - tomli
+ # For debugging
+ - icecream
+ - ipykernel
+ - ipython
+ # For type annotations
+ - mypy
+ # For building docs
+ - nbsphinx
+ - numpydoc
+ - pydata-sphinx-theme
+ - sphinx-panels
+ # For building logo
+ - drawsvg
+ - cairosvg
+ # EXTRA (optional; uncomment as desired)
+ # - autoflake
+ # - black
+ # - black-jupyter
+ # - codespell
+ # - commonmark
+ # - cython
+ # - cytoolz
+ # - distributed
+ # - flake8
+ # - flake8-bugbear
+ # - flake8-comprehensions
+ # - flake8-print
+ # - flake8-quotes
+ # - flake8-simplify
+ # - gcc
+ # - gh
+ # - git
+ # - graph-tool
+ # - xorg-libxcursor # for graph-tool
+ # - grayskull
+ # - h5py
+ # - hiveplot
+ # - igraph
+ # - ipycytoscape
+ # - isort
+ # - jupyter
+ # - jupyterlab
+ # - line_profiler
+ # - lxml
+ # - make
+ # - memory_profiler
+ # - nbqa
+ # - netcdf4
+ # - networkit
+ # - nxviz
+ # - pycodestyle
+ # - pydot
+ # - pygraphviz
+ # - pylint
+ # - pytest-runner
+ # - pytest-xdist
+ # - python-graphviz
+ # - python-igraph
+ # - python-louvain
+ # - pyupgrade
+ # - rich
+ # - ruff
+ # - scalene
+ # - scikit-network
+ # - setuptools-git-versioning
+ # - snakeviz
+ # - sphinx-lint
+ # - sympy
+ # - tuna
+ # - twine
+ # - vim
+ # - zarr
diff --git a/graphblas/__init__.py b/graphblas/__init__.py
index a9895cb6a..63110eeeb 100644
--- a/graphblas/__init__.py
+++ b/graphblas/__init__.py
@@ -39,6 +39,7 @@ def get_config():
backend = None
_init_params = None
_SPECIAL_ATTRS = {
+ "MAX_SIZE", # The maximum size of Vector and Matrix dimensions (GrB_INDEX_MAX + 1)
"Matrix",
"Recorder",
"Scalar",
@@ -205,6 +206,10 @@ def _load(name):
if name in {"Matrix", "Vector", "Scalar", "Recorder"}:
module = _import_module(f".core.{name.lower()}", __name__)
globals()[name] = getattr(module, name)
+ elif name == "MAX_SIZE":
+ from .core import lib
+
+ globals()[name] = lib.GrB_INDEX_MAX + 1
else:
# Everything else is a module
globals()[name] = _import_module(f".{name}", __name__)
diff --git a/graphblas/agg/__init__.py b/graphblas/agg/__init__.py
index c1319facb..da7c13591 100644
--- a/graphblas/agg/__init__.py
+++ b/graphblas/agg/__init__.py
@@ -1,4 +1,4 @@
-"""`graphblas.agg` is an experimental module for exploring Aggregators.
+"""``graphblas.agg`` is an experimental module for exploring Aggregators.
Aggregators may be used in reduce methods:
- Matrix.reduce_rowwise
@@ -59,9 +59,9 @@
- ss.argmax
.. deprecated:: 2023.1.0
- Aggregators `first`, `last`, `first_index`, `last_index`, `argmin`, and `argmax` are
- deprecated in the `agg` namespace such as `agg.first`. Use them from `agg.ss` namespace
- instead such as `agg.ss.first`. Will be removed in version 2023.9.0 or later.
+ Aggregators ``first``, ``last``, ``first_index``, ``last_index``, ``argmin``, and ``argmax``
+ are deprecated in the ``agg`` namespace such as ``agg.first``. Use them from ``agg.ss``
+ namespace instead such as ``agg.ss.first``. Will be removed in version 2023.9.0 or later.
# Possible aggregators:
# - absolute_deviation, sum(abs(x - mean(x))), sum_absminus(x, mean(x))
@@ -73,7 +73,8 @@
# - bxnor monoid: even bits
# - bnor monoid: odd bits
"""
-# All items are dynamically added by classes in core/agg.py
+
+# All items are dynamically added by classes in core/operator/agg.py
# This module acts as a container of Aggregator instances
_deprecated = {}
diff --git a/graphblas/binary/numpy.py b/graphblas/binary/numpy.py
index 7c03977e4..bb22d0b07 100644
--- a/graphblas/binary/numpy.py
+++ b/graphblas/binary/numpy.py
@@ -5,6 +5,7 @@
https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations
"""
+
import numpy as _np
from .. import _STANDARD_OPERATOR_NAMES
diff --git a/graphblas/binary/ss.py b/graphblas/binary/ss.py
index e45cbcda0..0c294e322 100644
--- a/graphblas/binary/ss.py
+++ b/graphblas/binary/ss.py
@@ -1,3 +1,6 @@
from ..core import operator
+from ..core.ss.binary import register_new # noqa: F401
+
+_delayed = {}
del operator
diff --git a/graphblas/core/agg.py b/graphblas/core/agg.py
deleted file mode 100644
index b9f1977ab..000000000
--- a/graphblas/core/agg.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""graphblas.core.agg namespace is deprecated; please use graphblas.core.operator.agg instead.
-
-.. deprecated:: 2023.3.0
-`graphblas.core.agg` will be removed in a future release.
-Use `graphblas.core.operator.agg` instead.
-Will be removed in version 2023.11.0 or later.
-
-"""
-import warnings
-
-from .operator.agg import * # pylint: disable=wildcard-import,unused-wildcard-import
-
-warnings.warn(
- "graphblas.core.agg namespace is deprecated; please use graphblas.core.operator.agg instead.",
- DeprecationWarning,
- stacklevel=1,
-)
diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py
index 98dc61137..600a6e139 100644
--- a/graphblas/core/automethods.py
+++ b/graphblas/core/automethods.py
@@ -1,12 +1,13 @@
"""Define functions to use as property methods on expressions.
-These will automatically compute the value and avoid the need for `.new()`.
+These will automatically compute the value and avoid the need for ``.new()``.
To automatically create the functions, run:
$ python -m graphblas.core.automethods
"""
+
from .. import config
@@ -213,6 +214,10 @@ def outer(self):
return self._get_value("outer")
+def power(self):
+ return self._get_value("power")
+
+
def reduce(self):
return self._get_value("reduce")
@@ -277,10 +282,6 @@ def to_edgelist(self):
return self._get_value("to_edgelist")
-def to_values(self):
- return self._get_value("to_values")
-
-
def value(self):
return self._get_value("value")
@@ -394,7 +395,6 @@ def _main():
"ss",
"to_coo",
"to_dense",
- "to_values",
}
vector = {
"_as_matrix",
@@ -410,6 +410,7 @@ def _main():
"kronecker",
"mxm",
"mxv",
+ "power",
"reduce_columnwise",
"reduce_rowwise",
"reduce_scalar",
diff --git a/graphblas/core/base.py b/graphblas/core/base.py
index a4e48b612..24a49ba1a 100644
--- a/graphblas/core/base.py
+++ b/graphblas/core/base.py
@@ -263,23 +263,31 @@ def __call__(
)
def __or__(self, other):
- from .infix import _ewise_infix_expr
+ from .infix import _ewise_infix_expr, _ewise_mult_expr_types
+ if isinstance(other, _ewise_mult_expr_types):
+ raise TypeError("XXX")
return _ewise_infix_expr(self, other, method="ewise_add", within="__or__")
def __ror__(self, other):
- from .infix import _ewise_infix_expr
+ from .infix import _ewise_infix_expr, _ewise_mult_expr_types
+ if isinstance(other, _ewise_mult_expr_types):
+ raise TypeError("XXX")
return _ewise_infix_expr(other, self, method="ewise_add", within="__ror__")
def __and__(self, other):
- from .infix import _ewise_infix_expr
+ from .infix import _ewise_add_expr_types, _ewise_infix_expr
+ if isinstance(other, _ewise_add_expr_types):
+ raise TypeError("XXX")
return _ewise_infix_expr(self, other, method="ewise_mult", within="__and__")
def __rand__(self, other):
- from .infix import _ewise_infix_expr
+ from .infix import _ewise_add_expr_types, _ewise_infix_expr
+ if isinstance(other, _ewise_add_expr_types):
+ raise TypeError("XXX")
return _ewise_infix_expr(other, self, method="ewise_mult", within="__rand__")
def __matmul__(self, other):
@@ -348,7 +356,7 @@ def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, *
return
if opts:
# Ignore opts for now
- descriptor_lookup(**opts)
+ desc = descriptor_lookup(**opts)
self.value = expr
return
@@ -371,7 +379,7 @@ def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, *
else:
if opts:
# Ignore opts for now
- descriptor_lookup(**opts)
+ desc = descriptor_lookup(**opts)
self.value = expr
return
else:
@@ -505,7 +513,7 @@ def _name_html(self):
_expect_op = _expect_op
# Don't let non-scalars be coerced to numpy arrays
- def __array__(self, dtype=None):
+ def __array__(self, dtype=None, *, copy=None):
raise TypeError(
f"{type(self).__name__} can't be directly converted to a numpy array; "
f"perhaps use `{self.name}.to_coo()` method instead."
@@ -571,7 +579,7 @@ def _new(self, dtype, mask, name, is_cscalar=None, **opts):
):
if opts:
# Ignore opts for now
- descriptor_lookup(**opts)
+ desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context)
if self._is_scalar and self._value._is_cscalar != is_cscalar:
return self._value.dup(is_cscalar=is_cscalar, name=name)
rv = self._value
diff --git a/graphblas/core/descriptor.py b/graphblas/core/descriptor.py
index 1e195e3fe..11f634afd 100644
--- a/graphblas/core/descriptor.py
+++ b/graphblas/core/descriptor.py
@@ -26,6 +26,7 @@ def __init__(
self.mask_structure = mask_structure
self.transpose_first = transpose_first
self.transpose_second = transpose_second
+ self._context = None # Used by SuiteSparse:GraphBLAS 8
@property
def _carg(self):
diff --git a/graphblas/dtypes.py b/graphblas/core/dtypes.py
similarity index 67%
rename from graphblas/dtypes.py
rename to graphblas/core/dtypes.py
index 920610b95..2d4178b14 100644
--- a/graphblas/dtypes.py
+++ b/graphblas/core/dtypes.py
@@ -1,20 +1,17 @@
-import warnings as _warnings
+import warnings
+from ast import literal_eval
-import numpy as _np
-from numpy import find_common_type as _find_common_type
-from numpy import promote_types as _promote_types
+import numpy as np
+from numpy import promote_types, result_type
-from . import backend
-from .core import NULL as _NULL
-from .core import _has_numba
-from .core import ffi as _ffi
-from .core import lib as _lib
+from .. import backend, dtypes
+from ..core import NULL, _has_numba, ffi, lib
if _has_numba:
- import numba as _numba
+ import numba
# Default assumption unless FC32/FC64 are found in lib
-_supports_complex = hasattr(_lib, "GrB_FC64") or hasattr(_lib, "GxB_FC64")
+_supports_complex = hasattr(lib, "GrB_FC64") or hasattr(lib, "GxB_FC64")
class DataType:
@@ -26,7 +23,7 @@ def __init__(self, name, gb_obj, gb_name, c_type, numba_type, np_type):
self.gb_name = gb_name
self.c_type = c_type
self.numba_type = numba_type
- self.np_type = _np.dtype(np_type)
+ self.np_type = np.dtype(np_type) if np_type is not None else None
def __repr__(self):
return self.name
@@ -62,7 +59,7 @@ def _carg(self):
@property
def _is_anonymous(self):
- return globals().get(self.name) is not self
+ return getattr(dtypes, self.name, None) is not self
@property
def _is_udt(self):
@@ -80,27 +77,29 @@ def _deserialize(name, dtype, is_anonymous):
def register_new(name, dtype):
if not name.isidentifier():
raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}")
- if name in _registry or name in globals():
+ if name in _registry or hasattr(dtypes, name):
raise ValueError(f"{name!r} name for dtype is unavailable")
rv = register_anonymous(dtype, name)
_registry[name] = rv
- globals()[name] = rv
+ setattr(dtypes, name, rv)
return rv
def register_anonymous(dtype, name=None):
try:
- dtype = _np.dtype(dtype)
+ dtype = np.dtype(dtype)
except TypeError:
if isinstance(dtype, dict):
# Allow dtypes such as `{'x': int, 'y': float}` for convenience
- dtype = _np.dtype([(key, lookup_dtype(val).np_type) for key, val in dtype.items()])
+ dtype = np.dtype(
+ [(key, lookup_dtype(val).np_type) for key, val in dtype.items()], align=True
+ )
elif isinstance(dtype, str) and "[" in dtype and dtype.endswith("]"):
# Allow dtypes such as `"INT64[3, 4]"` for convenience
base_dtype, shape = dtype.split("[", 1)
base_dtype = lookup_dtype(base_dtype)
- shape = _np.lib.format.safe_eval(f"[{shape}")
- dtype = _np.dtype((base_dtype.np_type, shape))
+ shape = literal_eval(f"[{shape}")
+ dtype = np.dtype((base_dtype.np_type, shape))
else:
raise
if dtype in _registry:
@@ -114,36 +113,46 @@ def register_anonymous(dtype, name=None):
if dtype.hasobject:
raise ValueError("dtype must not allow Python objects")
- from .exceptions import check_status_carg
+ from ..exceptions import check_status_carg
+
+ gb_obj = ffi.new("GrB_Type*")
- gb_obj = _ffi.new("GrB_Type*")
- if backend == "suitesparse":
+ if hasattr(lib, "GrB_Type_set_String"):
+ # We name this so that we can serialize and deserialize UDTs
+ # We don't yet have C definitions
+ np_repr = _dtype_to_string(dtype)
+ status = lib.GrB_Type_new(gb_obj, dtype.itemsize)
+ check_status_carg(status, "Type", gb_obj[0])
+ val_obj = ffi.new("char[]", np_repr.encode())
+ status = lib.GrB_Type_set_String(gb_obj[0], val_obj, lib.GrB_NAME)
+ elif backend == "suitesparse":
+ # For SuiteSparse < 9
# We name this so that we can serialize and deserialize UDTs
# We don't yet have C definitions
np_repr = _dtype_to_string(dtype).encode()
- if len(np_repr) > _lib.GxB_MAX_NAME_LEN:
+ if len(np_repr) > lib.GxB_MAX_NAME_LEN:
msg = (
f"UDT repr is too large to serialize ({len(repr(dtype).encode())} > "
- f"{_lib.GxB_MAX_NAME_LEN})."
+ f"{lib.GxB_MAX_NAME_LEN})."
)
if name is not None:
- np_repr = name.encode()[: _lib.GxB_MAX_NAME_LEN]
+ np_repr = name.encode()[: lib.GxB_MAX_NAME_LEN]
else:
- np_repr = np_repr[: _lib.GxB_MAX_NAME_LEN]
- _warnings.warn(
+ np_repr = np_repr[: lib.GxB_MAX_NAME_LEN]
+ warnings.warn(
f"{msg}. It will use the following name, "
f"and the dtype may need to be specified when deserializing: {np_repr}",
stacklevel=2,
)
- status = _lib.GxB_Type_new(gb_obj, dtype.itemsize, np_repr, _NULL)
+ status = lib.GxB_Type_new(gb_obj, dtype.itemsize, np_repr, NULL)
else:
- status = _lib.GrB_Type_new(gb_obj, dtype.itemsize)
+ status = lib.GrB_Type_new(gb_obj, dtype.itemsize)
check_status_carg(status, "Type", gb_obj[0])
# For now, let's use "opaque" unsigned bytes for the c type.
if name is None:
name = _default_name(dtype)
- numba_type = _numba.typeof(dtype).dtype if _has_numba else None
+ numba_type = numba.typeof(dtype).dtype if _has_numba else None
rv = DataType(name, gb_obj, None, f"uint8_t[{dtype.itemsize}]", numba_type, dtype)
_registry[gb_obj] = rv
_registry[dtype] = rv
@@ -155,153 +164,153 @@ def register_anonymous(dtype, name=None):
BOOL = DataType(
"BOOL",
- _lib.GrB_BOOL,
+ lib.GrB_BOOL,
"GrB_BOOL",
"_Bool",
- _numba.types.bool_ if _has_numba else None,
- _np.bool_,
+ numba.types.bool_ if _has_numba else None,
+ np.bool_,
)
INT8 = DataType(
- "INT8", _lib.GrB_INT8, "GrB_INT8", "int8_t", _numba.types.int8 if _has_numba else None, _np.int8
+ "INT8", lib.GrB_INT8, "GrB_INT8", "int8_t", numba.types.int8 if _has_numba else None, np.int8
)
UINT8 = DataType(
"UINT8",
- _lib.GrB_UINT8,
+ lib.GrB_UINT8,
"GrB_UINT8",
"uint8_t",
- _numba.types.uint8 if _has_numba else None,
- _np.uint8,
+ numba.types.uint8 if _has_numba else None,
+ np.uint8,
)
INT16 = DataType(
"INT16",
- _lib.GrB_INT16,
+ lib.GrB_INT16,
"GrB_INT16",
"int16_t",
- _numba.types.int16 if _has_numba else None,
- _np.int16,
+ numba.types.int16 if _has_numba else None,
+ np.int16,
)
UINT16 = DataType(
"UINT16",
- _lib.GrB_UINT16,
+ lib.GrB_UINT16,
"GrB_UINT16",
"uint16_t",
- _numba.types.uint16 if _has_numba else None,
- _np.uint16,
+ numba.types.uint16 if _has_numba else None,
+ np.uint16,
)
INT32 = DataType(
"INT32",
- _lib.GrB_INT32,
+ lib.GrB_INT32,
"GrB_INT32",
"int32_t",
- _numba.types.int32 if _has_numba else None,
- _np.int32,
+ numba.types.int32 if _has_numba else None,
+ np.int32,
)
UINT32 = DataType(
"UINT32",
- _lib.GrB_UINT32,
+ lib.GrB_UINT32,
"GrB_UINT32",
"uint32_t",
- _numba.types.uint32 if _has_numba else None,
- _np.uint32,
+ numba.types.uint32 if _has_numba else None,
+ np.uint32,
)
INT64 = DataType(
"INT64",
- _lib.GrB_INT64,
+ lib.GrB_INT64,
"GrB_INT64",
"int64_t",
- _numba.types.int64 if _has_numba else None,
- _np.int64,
+ numba.types.int64 if _has_numba else None,
+ np.int64,
)
# _Index (like UINT64) is for internal use only and shouldn't be exposed to the user
_INDEX = DataType(
"UINT64",
- _lib.GrB_UINT64,
+ lib.GrB_UINT64,
"GrB_Index",
"GrB_Index",
- _numba.types.uint64 if _has_numba else None,
- _np.uint64,
+ numba.types.uint64 if _has_numba else None,
+ np.uint64,
)
UINT64 = DataType(
"UINT64",
- _lib.GrB_UINT64,
+ lib.GrB_UINT64,
"GrB_UINT64",
"uint64_t",
- _numba.types.uint64 if _has_numba else None,
- _np.uint64,
+ numba.types.uint64 if _has_numba else None,
+ np.uint64,
)
FP32 = DataType(
"FP32",
- _lib.GrB_FP32,
+ lib.GrB_FP32,
"GrB_FP32",
"float",
- _numba.types.float32 if _has_numba else None,
- _np.float32,
+ numba.types.float32 if _has_numba else None,
+ np.float32,
)
FP64 = DataType(
"FP64",
- _lib.GrB_FP64,
+ lib.GrB_FP64,
"GrB_FP64",
"double",
- _numba.types.float64 if _has_numba else None,
- _np.float64,
+ numba.types.float64 if _has_numba else None,
+ np.float64,
)
-if _supports_complex and hasattr(_lib, "GxB_FC32"):
+if _supports_complex and hasattr(lib, "GxB_FC32"):
FC32 = DataType(
"FC32",
- _lib.GxB_FC32,
+ lib.GxB_FC32,
"GxB_FC32",
"float _Complex",
- _numba.types.complex64 if _has_numba else None,
- _np.complex64,
+ numba.types.complex64 if _has_numba else None,
+ np.complex64,
)
-if _supports_complex and hasattr(_lib, "GrB_FC32"): # pragma: no cover (unused)
+if _supports_complex and hasattr(lib, "GrB_FC32"): # pragma: no cover (unused)
FC32 = DataType(
"FC32",
- _lib.GrB_FC32,
+ lib.GrB_FC32,
"GrB_FC32",
"float _Complex",
- _numba.types.complex64 if _has_numba else None,
- _np.complex64,
+ numba.types.complex64 if _has_numba else None,
+ np.complex64,
)
-if _supports_complex and hasattr(_lib, "GxB_FC64"):
+if _supports_complex and hasattr(lib, "GxB_FC64"):
FC64 = DataType(
"FC64",
- _lib.GxB_FC64,
+ lib.GxB_FC64,
"GxB_FC64",
"double _Complex",
- _numba.types.complex128 if _has_numba else None,
- _np.complex128,
+ numba.types.complex128 if _has_numba else None,
+ np.complex128,
)
-if _supports_complex and hasattr(_lib, "GrB_FC64"): # pragma: no cover (unused)
+if _supports_complex and hasattr(lib, "GrB_FC64"): # pragma: no cover (unused)
FC64 = DataType(
"FC64",
- _lib.GrB_FC64,
+ lib.GrB_FC64,
"GrB_FC64",
"double _Complex",
- _numba.types.complex128 if _has_numba else None,
- _np.complex128,
+ numba.types.complex128 if _has_numba else None,
+ np.complex128,
)
# Used for testing user-defined functions
_sample_values = {
- INT8: _np.int8(1),
- UINT8: _np.uint8(1),
- INT16: _np.int16(1),
- UINT16: _np.uint16(1),
- INT32: _np.int32(1),
- UINT32: _np.uint32(1),
- INT64: _np.int64(1),
- UINT64: _np.uint64(1),
- FP32: _np.float32(0.5),
- FP64: _np.float64(0.5),
- BOOL: _np.bool_(True),
+ INT8: np.int8(1),
+ UINT8: np.uint8(1),
+ INT16: np.int16(1),
+ UINT16: np.uint16(1),
+ INT32: np.int32(1),
+ UINT32: np.uint32(1),
+ INT64: np.int64(1),
+ UINT64: np.uint64(1),
+ FP32: np.float32(0.5),
+ FP64: np.float64(0.5),
+ BOOL: np.bool_(True),
}
if _supports_complex:
_sample_values.update(
{
- FC32: _np.complex64(complex(0, 0.5)),
- FC64: _np.complex128(complex(0, 0.5)),
+ FC32: np.complex64(complex(0, 0.5)),
+ FC64: np.complex128(complex(0, 0.5)),
}
)
@@ -377,8 +386,7 @@ def lookup_dtype(key, value=None):
def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False):
- """
- Returns a type that can hold both type1 and type2.
+ """Returns a type that can hold both type1 and type2.
For example:
unify(INT32, INT64) -> INT64
@@ -389,19 +397,11 @@ def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False):
if type1 is type2:
return type1
if is_left_scalar:
- scalar_types = [type1.np_type]
- array_types = []
- elif not is_right_scalar:
- # Using `promote_types` is faster than `find_common_type`
- return lookup_dtype(_promote_types(type1.np_type, type2.np_type))
- else:
- scalar_types = []
- array_types = [type1.np_type]
- if is_right_scalar:
- scalar_types.append(type2.np_type)
- else:
- array_types.append(type2.np_type)
- return lookup_dtype(_find_common_type(array_types, scalar_types))
+ if not is_right_scalar:
+ return lookup_dtype(result_type(np.array(0, type1.np_type), type2.np_type))
+ elif is_right_scalar:
+ return lookup_dtype(result_type(type1.np_type, np.array(0, type2.np_type)))
+ return lookup_dtype(promote_types(type1.np_type, type2.np_type))
def _default_name(dtype):
@@ -431,7 +431,7 @@ def _dtype_to_string(dtype):
>>> dtype == new_dtype
True
"""
- if isinstance(dtype, _np.dtype) and dtype not in _registry:
+ if isinstance(dtype, np.dtype) and dtype not in _registry:
np_type = dtype
else:
dtype = lookup_dtype(dtype)
@@ -440,11 +440,11 @@ def _dtype_to_string(dtype):
np_type = dtype.np_type
s = str(np_type)
try:
- if _np.dtype(_np.lib.format.safe_eval(s)) == np_type: # pragma: no branch (safety)
+ if np.dtype(literal_eval(s)) == np_type: # pragma: no branch (safety)
return s
except Exception:
pass
- if _np.dtype(np_type.str) != np_type: # pragma: no cover (safety)
+ if np.dtype(np_type.str) != np_type: # pragma: no cover (safety)
raise ValueError(f"Unable to reliably convert dtype to string and back: {dtype}")
return repr(np_type.str)
@@ -459,5 +459,5 @@ def _string_to_dtype(s):
return lookup_dtype(s)
except Exception:
pass
- np_type = _np.dtype(_np.lib.format.safe_eval(s))
+ np_type = np.dtype(literal_eval(s))
return lookup_dtype(np_type)
diff --git a/graphblas/core/expr.py b/graphblas/core/expr.py
index 48839bcff..efec2db5f 100644
--- a/graphblas/core/expr.py
+++ b/graphblas/core/expr.py
@@ -147,13 +147,13 @@ def py_indices(self):
return self.indices[0]._py_index()
def parse_indices(self, indices, shape):
- """
- Returns
+ """Returns
-------
[(rows, rowsize), (cols, colsize)] for Matrix
[(idx, idx_size)] for Vector
Within each tuple, if the index is of type int, the size will be None
+
"""
if len(shape) == 1:
if type(indices) is tuple:
@@ -312,8 +312,8 @@ def update(self, expr, **opts):
Updater(self.parent, opts=opts)._setitem(self.resolved_indexes, expr, is_submask=False)
def new(self, dtype=None, *, mask=None, input_mask=None, name=None, **opts):
- """
- Force extraction of the indexes into a new object
+ """Force extraction of the indexes into a new object.
+
dtype and mask are the only controllable parameters.
"""
if input_mask is not None:
@@ -421,7 +421,7 @@ def _setitem(self, resolved_indexes, obj, *, is_submask):
# Fast path using assignElement
if self.opts:
# Ignore opts for now
- descriptor_lookup(**self.opts)
+ desc = descriptor_lookup(**self.opts) # noqa: F841 (keep desc in scope for context)
self.parent._assign_element(resolved_indexes, obj)
else:
mask = self.kwargs.get("mask")
diff --git a/graphblas/core/formatting.py b/graphblas/core/formatting.py
index aefb87f94..0b6252101 100644
--- a/graphblas/core/formatting.py
+++ b/graphblas/core/formatting.py
@@ -630,7 +630,7 @@ def create_header(type_name, keys, vals, *, lower_border=False, name="", quote=T
name = f'"{name}"'
key_text = []
val_text = []
- for key, val in zip(keys, vals):
+ for key, val in zip(keys, vals, strict=True):
width = max(len(key), len(val)) + 2
key_text.append(key.rjust(width))
val_text.append(val.rjust(width))
diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py
index bd1d10a92..24c109639 100644
--- a/graphblas/core/infix.py
+++ b/graphblas/core/infix.py
@@ -1,8 +1,9 @@
from .. import backend, binary
from ..dtypes import BOOL
+from ..exceptions import DimensionMismatch
from ..monoid import land, lor
from ..semiring import any_pair
-from . import automethods, utils
+from . import automethods, recorder, utils
from .base import _expect_op, _expect_type
from .expr import InfixExprBase
from .mask import Mask
@@ -125,6 +126,19 @@ class ScalarEwiseAddExpr(ScalarInfixExpr):
_to_expr = _ewise_add_to_expr
+ # Allow e.g. `plus(x | y | z)`
+ __or__ = Scalar.__or__
+ __ror__ = Scalar.__ror__
+ _ewise_add = Scalar._ewise_add
+ _ewise_union = Scalar._ewise_union
+
+ # Don't allow e.g. `plus(x | y & z)`
+ def __and__(self, other):
+ raise TypeError("XXX")
+
+ def __rand__(self, other):
+ raise TypeError("XXX")
+
class ScalarEwiseMultExpr(ScalarInfixExpr):
__slots__ = ()
@@ -134,6 +148,18 @@ class ScalarEwiseMultExpr(ScalarInfixExpr):
_to_expr = _ewise_mult_to_expr
+ # Allow e.g. `plus(x & y & z)`
+ __and__ = Scalar.__and__
+ __rand__ = Scalar.__rand__
+ _ewise_mult = Scalar._ewise_mult
+
+ # Don't allow e.g. `plus(x | y & z)`
+ def __or__(self, other):
+ raise TypeError("XXX")
+
+ def __ror__(self, other):
+ raise TypeError("XXX")
+
class ScalarMatMulExpr(ScalarInfixExpr):
__slots__ = ()
@@ -210,7 +236,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo))
to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense))
to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict))
- to_values = wrapdoc(Vector.to_values)(property(automethods.to_values))
vxm = wrapdoc(Vector.vxm)(property(automethods.vxm))
wait = wrapdoc(Vector.wait)(property(automethods.wait))
# These raise exceptions
@@ -238,6 +263,15 @@ class VectorEwiseAddExpr(VectorInfixExpr):
_to_expr = _ewise_add_to_expr
+ # Allow e.g. `plus(x | y | z)`
+ __or__ = Vector.__or__
+ __ror__ = Vector.__ror__
+ _ewise_add = Vector._ewise_add
+ _ewise_union = Vector._ewise_union
+ # Don't allow e.g. `plus(x | y & z)`
+ __and__ = ScalarEwiseAddExpr.__and__ # raises
+ __rand__ = ScalarEwiseAddExpr.__rand__ # raises
+
class VectorEwiseMultExpr(VectorInfixExpr):
__slots__ = ()
@@ -247,6 +281,14 @@ class VectorEwiseMultExpr(VectorInfixExpr):
_to_expr = _ewise_mult_to_expr
+ # Allow e.g. `plus(x & y & z)`
+ __and__ = Vector.__and__
+ __rand__ = Vector.__rand__
+ _ewise_mult = Vector._ewise_mult
+ # Don't allow e.g. `plus(x | y & z)`
+ __or__ = ScalarEwiseMultExpr.__or__ # raises
+ __ror__ = ScalarEwiseMultExpr.__ror__ # raises
+
class VectorMatMulExpr(VectorInfixExpr):
__slots__ = "method_name"
@@ -258,6 +300,11 @@ def __init__(self, left, right, *, method_name, size):
self.method_name = method_name
self._size = size
+ __matmul__ = Vector.__matmul__
+ __rmatmul__ = Vector.__rmatmul__
+ _inner = Vector._inner
+ _vxm = Vector._vxm
+
utils._output_types[VectorEwiseAddExpr] = Vector
utils._output_types[VectorEwiseMultExpr] = Vector
@@ -269,6 +316,7 @@ class MatrixInfixExpr(InfixExprBase):
ndim = 2
output_type = MatrixExpression
_is_transposed = False
+ __networkx_backend__ = "graphblas"
__networkx_plugin__ = "graphblas"
def __init__(self, left, right):
@@ -330,6 +378,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv))
name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name)
nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals))
+ power = wrapdoc(Matrix.power)(property(automethods.power))
reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise))
reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise))
reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar))
@@ -347,7 +396,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense))
to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts))
to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist))
- to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values))
wait = wrapdoc(Matrix.wait)(property(automethods.wait))
# These raise exceptions
__array__ = Matrix.__array__
@@ -374,6 +422,15 @@ class MatrixEwiseAddExpr(MatrixInfixExpr):
_to_expr = _ewise_add_to_expr
+ # Allow e.g. `plus(x | y | z)`
+ __or__ = Matrix.__or__
+ __ror__ = Matrix.__ror__
+ _ewise_add = Matrix._ewise_add
+ _ewise_union = Matrix._ewise_union
+ # Don't allow e.g. `plus(x | y & z)`
+ __and__ = VectorEwiseAddExpr.__and__ # raises
+ __rand__ = VectorEwiseAddExpr.__rand__ # raises
+
class MatrixEwiseMultExpr(MatrixInfixExpr):
__slots__ = ()
@@ -383,6 +440,14 @@ class MatrixEwiseMultExpr(MatrixInfixExpr):
_to_expr = _ewise_mult_to_expr
+ # Allow e.g. `plus(x & y & z)`
+ __and__ = Matrix.__and__
+ __rand__ = Matrix.__rand__
+ _ewise_mult = Matrix._ewise_mult
+ # Don't allow e.g. `plus(x | y & z)`
+ __or__ = VectorEwiseMultExpr.__or__ # raises
+ __ror__ = VectorEwiseMultExpr.__ror__ # raises
+
class MatrixMatMulExpr(MatrixInfixExpr):
__slots__ = ()
@@ -395,49 +460,73 @@ def __init__(self, left, right, *, nrows, ncols):
self._nrows = nrows
self._ncols = ncols
+ __matmul__ = Matrix.__matmul__
+ __rmatmul__ = Matrix.__rmatmul__
+ _mxm = Matrix._mxm
+ _mxv = Matrix._mxv
+
utils._output_types[MatrixEwiseAddExpr] = Matrix
utils._output_types[MatrixEwiseMultExpr] = Matrix
utils._output_types[MatrixMatMulExpr] = Matrix
+def _dummy(obj, obj_type):
+ with recorder.skip_record:
+ return output_type(obj)(BOOL, *obj.shape, name="")
+
+
+def _mismatched(left, right, method, op):
+ # Create dummy expression to raise on incompatible dimensions
+ getattr(_dummy(left) if isinstance(left, InfixExprBase) else left, method)(
+ _dummy(right) if isinstance(right, InfixExprBase) else right, op
+ )
+ raise DimensionMismatch # pragma: no cover
+
+
def _ewise_infix_expr(left, right, *, method, within):
left_type = output_type(left)
right_type = output_type(right)
types = {Vector, Matrix, TransposedMatrix}
if left_type in types and right_type in types:
- # Create dummy expression to check compatibility of dimensions, etc.
- expr = getattr(left, method)(right, binary.any)
- if expr.output_type is Vector:
- if method == "ewise_mult":
- return VectorEwiseMultExpr(left, right)
- return VectorEwiseAddExpr(left, right)
+ if left_type is Vector:
+ if right_type is Vector:
+ if left._size != right._size:
+ _mismatched(left, right, method, binary.first)
+ if method == "ewise_mult":
+ return VectorEwiseMultExpr(left, right)
+ return VectorEwiseAddExpr(left, right)
+ if left._size != right._nrows:
+ _mismatched(left, right, method, binary.first)
+ elif right_type is Vector:
+ if left._ncols != right._size:
+ _mismatched(left, right, method, binary.first)
+ elif left.shape != right.shape:
+ _mismatched(left, right, method, binary.first)
if method == "ewise_mult":
return MatrixEwiseMultExpr(left, right)
return MatrixEwiseAddExpr(left, right)
+
if within == "__or__" and isinstance(right, Mask):
return right.__ror__(left)
if within == "__and__" and isinstance(right, Mask):
return right.__rand__(left)
if left_type in types:
left._expect_type(right, tuple(types), within=within, argname="right")
- elif right_type in types:
+ if right_type in types:
right._expect_type(left, tuple(types), within=within, argname="left")
- elif left_type is Scalar:
- # Create dummy expression to check compatibility of dimensions, etc.
- expr = getattr(left, method)(right, binary.any)
+ if left_type is Scalar:
if method == "ewise_mult":
return ScalarEwiseMultExpr(left, right)
return ScalarEwiseAddExpr(left, right)
- elif right_type is Scalar:
- # Create dummy expression to check compatibility of dimensions, etc.
- expr = getattr(right, method)(left, binary.any)
+ if right_type is Scalar:
if method == "ewise_mult":
return ScalarEwiseMultExpr(right, left)
return ScalarEwiseAddExpr(right, left)
- else: # pragma: no cover (sanity)
- raise TypeError(f"Bad types for ewise infix: {type(left).__name__}, {type(right).__name__}")
+ raise TypeError( # pragma: no cover (sanity)
+ f"Bad types for ewise infix: {type(left).__name__}, {type(right).__name__}"
+ )
def _matmul_infix_expr(left, right, *, within):
@@ -446,55 +535,55 @@ def _matmul_infix_expr(left, right, *, within):
if left_type is Vector:
if right_type is Matrix or right_type is TransposedMatrix:
- method = "vxm"
- elif right_type is Vector:
- method = "inner"
- else:
- right = left._expect_type(
- right,
- (Matrix, TransposedMatrix),
- within=within,
- argname="right",
- )
- elif left_type is Matrix or left_type is TransposedMatrix:
+ if left._size != right._nrows:
+ _mismatched(left, right, "vxm", any_pair[BOOL])
+ return VectorMatMulExpr(left, right, method_name="vxm", size=right._ncols)
if right_type is Vector:
- method = "mxv"
- elif right_type is Matrix or right_type is TransposedMatrix:
- method = "mxm"
- else:
- right = left._expect_type(
- right,
- (Vector, Matrix, TransposedMatrix),
- within=within,
- argname="right",
- )
- elif right_type is Vector:
- left = right._expect_type(
+ if left._size != right._size:
+ _mismatched(left, right, "inner", any_pair[BOOL])
+ return ScalarMatMulExpr(left, right)
+ left._expect_type(
+ right,
+ (Matrix, TransposedMatrix, Vector),
+ within=within,
+ argname="right",
+ )
+ if left_type is Matrix or left_type is TransposedMatrix:
+ if right_type is Vector:
+ if left._ncols != right._size:
+ _mismatched(left, right, "mxv", any_pair[BOOL])
+ return VectorMatMulExpr(left, right, method_name="mxv", size=left._nrows)
+ if right_type is Matrix or right_type is TransposedMatrix:
+ if left._ncols != right._nrows:
+ _mismatched(left, right, "mxm", any_pair[BOOL])
+ return MatrixMatMulExpr(left, right, nrows=left._nrows, ncols=right._ncols)
+ left._expect_type(
+ right,
+ (Vector, Matrix, TransposedMatrix),
+ within=within,
+ argname="right",
+ )
+ if right_type is Vector:
+ right._expect_type(
left,
(Matrix, TransposedMatrix),
within=within,
argname="left",
)
- elif right_type is Matrix or right_type is TransposedMatrix:
- left = right._expect_type(
+ if right_type is Matrix or right_type is TransposedMatrix:
+ right._expect_type(
left,
(Vector, Matrix, TransposedMatrix),
within=within,
argname="left",
)
- else: # pragma: no cover (sanity)
- raise TypeError(
- f"Bad types for matmul infix: {type(left).__name__}, {type(right).__name__}"
- )
+ raise TypeError( # pragma: no cover (sanity)
+ f"Bad types for matmul infix: {type(left).__name__}, {type(right).__name__}"
+ )
- # Create dummy expression to check compatibility of dimensions, etc.
- expr = getattr(left, method)(right, any_pair[bool])
- if expr.output_type is Vector:
- return VectorMatMulExpr(left, right, method_name=method, size=expr._size)
- if expr.output_type is Matrix:
- return MatrixMatMulExpr(left, right, nrows=expr._nrows, ncols=expr._ncols)
- return ScalarMatMulExpr(left, right)
+_ewise_add_expr_types = (MatrixEwiseAddExpr, VectorEwiseAddExpr, ScalarEwiseAddExpr)
+_ewise_mult_expr_types = (MatrixEwiseMultExpr, VectorEwiseMultExpr, ScalarEwiseMultExpr)
# Import infixmethods, which has side effects
from . import infixmethods # noqa: E402, F401 isort:skip
diff --git a/graphblas/core/mask.py b/graphblas/core/mask.py
index 9ad209095..3bda2188a 100644
--- a/graphblas/core/mask.py
+++ b/graphblas/core/mask.py
@@ -35,7 +35,7 @@ def new(self, dtype=None, *, complement=False, mask=None, name=None, **opts):
"""Return a new object with True values determined by the mask(s).
By default, the result is True wherever the mask(s) would have been applied,
- and empty otherwise. If `complement` is True, then these are switched:
+ and empty otherwise. If ``complement`` is True, then these are switched:
the result is empty where the mask(s) would have been applied, and True otherwise.
In other words, these are equivalent if complement is False (and mask keyword is None):
@@ -48,14 +48,14 @@ def new(self, dtype=None, *, complement=False, mask=None, name=None, **opts):
>>> C(self) << expr
>>> C(~result.S) << expr # equivalent when complement is True
- This can also efficiently merge two masks by using the `mask=` argument.
+ This can also efficiently merge two masks by using the ``mask=`` argument.
This is equivalent to the following (but uses more efficient recipes):
>>> val = Matrix(...)
>>> val(self) << True
>>> val(mask, replace=True) << val
- If `complement=` argument is True, then the *complement* will be returned.
+ If ``complement=`` argument is True, then the *complement* will be returned.
This is equivalent to the following (but uses more efficient recipes):
>>> val = Matrix(...)
@@ -83,7 +83,7 @@ def new(self, dtype=None, *, complement=False, mask=None, name=None, **opts):
def __and__(self, other, **opts):
"""Return the intersection of two masks as a new mask.
- `new_mask = mask1 & mask2` is equivalent to the following:
+ ``new_mask = mask1 & mask2`` is equivalent to the following:
>>> val = Matrix(bool, nrows, ncols)
>>> val(mask1) << True
@@ -109,7 +109,7 @@ def __and__(self, other, **opts):
def __or__(self, other, **opts):
"""Return the union of two masks as a new mask.
- `new_mask = mask1 | mask2` is equivalent to the following:
+ ``new_mask = mask1 | mask2`` is equivalent to the following:
>>> val = Matrix(bool, nrows, ncols)
>>> val(mask1) << True
diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py
index 0183893fd..bf20cc953 100644
--- a/graphblas/core/matrix.py
+++ b/graphblas/core/matrix.py
@@ -1,5 +1,4 @@
import itertools
-import warnings
from collections.abc import Sequence
import numpy as np
@@ -10,9 +9,16 @@
from . import _supports_udfs, automethods, ffi, lib, utils
from .base import BaseExpression, BaseType, _check_mask, call
from .descriptor import lookup as descriptor_lookup
-from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, Updater
+from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, InfixExprBase, Updater
from .mask import Mask, StructuralMask, ValueMask
-from .operator import UNKNOWN_OPCLASS, find_opclass, get_semiring, get_typed_op, op_from_string
+from .operator import (
+ UNKNOWN_OPCLASS,
+ _get_typed_op_from_exprs,
+ find_opclass,
+ get_semiring,
+ get_typed_op,
+ op_from_string,
+)
from .scalar import (
_COMPLETE,
_MATERIALIZE,
@@ -28,6 +34,7 @@
class_property,
get_order,
ints_to_numpy_buffer,
+ maybe_integral,
normalize_values,
output_type,
values_to_numpy_buffer,
@@ -66,13 +73,13 @@ def _m_mult_v(updater, left, right, op):
updater << left.mxm(right.diag(name="M_temp"), get_semiring(monoid.any, op))
-def _m_union_m(updater, left, right, left_default, right_default, op, dtype):
+def _m_union_m(updater, left, right, left_default, right_default, op):
mask = updater.kwargs.get("mask")
opts = updater.opts
- new_left = left.dup(dtype, clear=True)
+ new_left = left.dup(op.type, clear=True)
new_left(mask=mask, **opts) << binary.second(right, left_default)
new_left(mask=mask, **opts) << binary.first(left | new_left)
- new_right = right.dup(dtype, clear=True)
+ new_right = right.dup(op.type2, clear=True)
new_right(mask=mask, **opts) << binary.second(left, right_default)
new_right(mask=mask, **opts) << binary.first(right | new_right)
updater << op(new_left & new_right)
@@ -91,6 +98,72 @@ def _reposition(updater, indices, chunk):
updater[indices] = chunk
+def _power(updater, A, n, op):
+ opts = updater.opts
+ if n == 0:
+ v = Vector.from_scalar(op.binaryop.monoid.identity, A._nrows, A.dtype, name="v_diag")
+ updater << v.diag(name="M_diag")
+ return
+ if n == 1:
+ updater << A
+ return
+ # Use repeated squaring: compute A^2, A^4, A^8, etc., and combine terms as needed.
+ # See `numpy.linalg.matrix_power` for a simpler implementation to understand how this works.
+ # We reuse `result` and `square` outputs, and use `square_expr` so masks can be applied.
+ result = square = square_expr = None
+ n, bit = divmod(n, 2)
+ while True:
+ if bit != 0:
+ # Need to multiply `square_expr` or `A` into the result
+ if square_expr is not None:
+ # Need to evaluate `square_expr`; either into final result, or into `square`
+ if n == 0 and result is None:
+ # Handle `updater << A @ A` without an intermediate value
+ updater << square_expr
+ return
+ if square is None:
+ # Create `square = A @ A`
+ square = square_expr.new(name="Squares", **opts)
+ else:
+ # Compute `square << square @ square`
+ square(**opts) << square_expr
+ square_expr = None
+ if result is None:
+ # First time needing the intermediate result!
+ if square is None:
+ # Use `A` if possible to avoid unnecessary copying
+ # We will detect and handle `result is A` below
+ result = A
+ else:
+ # Copy square as intermediate result
+ result = square.dup(name="Power", **opts)
+ elif n == 0:
+ # All done! No more terms to compute
+ updater << op(result @ square)
+ return
+ elif result is A:
+ # Now we need to create a new matrix for the intermediate result
+ result = op(result @ square).new(name="Power", **opts)
+ else:
+ # Main branch: multiply `square` into `result`
+ result(**opts) << op(result @ square)
+ n, bit = divmod(n, 2)
+ if square_expr is not None:
+ # We need to perform another squaring, so evaluate current `square_expr` first
+ if square is None:
+ # Create `square`
+ square = square_expr.new(name="Squares", **opts)
+ else:
+ # Compute `square`
+ square << square_expr
+ if square is None:
+ # First iteration! Create expression for first square
+ square_expr = op(A @ A)
+ else:
+ # Expression for repeated squaring
+ square_expr = op(square @ square)
+
+
class Matrix(BaseType):
"""Create a new GraphBLAS Sparse Matrix.
@@ -104,12 +177,14 @@ class Matrix(BaseType):
Number of columns.
name : str, optional
Name to give the Matrix. This will be displayed in the ``__repr__``.
+
"""
__slots__ = "_nrows", "_ncols", "_parent", "ss"
ndim = 2
_is_transposed = False
_name_counter = itertools.count()
+ __networkx_backend__ = "graphblas"
__networkx_plugin__ = "graphblas"
def __new__(cls, dtype=FP64, nrows=0, ncols=0, *, name=None):
@@ -155,8 +230,6 @@ def _as_vector(self, *, name=None):
This is SuiteSparse-specific and may change in the future.
This does not copy the matrix.
"""
- from .vector import Vector
-
if self._ncols != 1:
raise ValueError(
f"Matrix must have a single column (not {self._ncols}) to be cast to a Vector"
@@ -225,6 +298,7 @@ def __delitem__(self, keys, **opts):
Examples
--------
>>> del M[1, 5]
+
"""
del Updater(self, opts=opts)[keys]
@@ -239,6 +313,7 @@ def __getitem__(self, keys):
.. code-block:: python
subM = M[[1, 3, 5], :].new()
+
"""
resolved_indexes = IndexerResolver(self, keys)
shape = resolved_indexes.shape
@@ -260,6 +335,7 @@ def __setitem__(self, keys, expr, **opts):
.. code-block:: python
M[0, 0:3] = 17
+
"""
Updater(self, opts=opts)[keys] = expr
@@ -271,6 +347,7 @@ def __contains__(self, index):
.. code-block:: python
(10, 15) in M
+
"""
extractor = self[index]
if not extractor._is_scalar:
@@ -284,7 +361,7 @@ def __contains__(self, index):
def __iter__(self):
"""Iterate over (row, col) indices which are present in the matrix."""
rows, columns, _ = self.to_coo(values=False)
- return zip(rows.flat, columns.flat)
+ return zip(rows.flat, columns.flat, strict=True)
def __sizeof__(self):
if backend == "suitesparse":
@@ -310,6 +387,7 @@ def isequal(self, other, *, check_dtype=False, **opts):
See Also
--------
:meth:`isclose` : For equality check of floating point dtypes
+
"""
other = self._expect_type(
other, (Matrix, TransposedMatrix), within="isequal", argname="other"
@@ -355,7 +433,8 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False, **opts
Returns
-------
bool
- Whether all values of the Matrix are close to the values in `other`.
+ Whether all values of the Matrix are close to the values in ``other``.
+
"""
other = self._expect_type(
other, (Matrix, TransposedMatrix), within="isclose", argname="other"
@@ -443,42 +522,6 @@ def resize(self, nrows, ncols):
self._nrows = nrows.value
self._ncols = ncols.value
- def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=True):
- """Extract the indices and values as a 3-tuple of numpy arrays
- corresponding to the COO format of the Matrix.
-
- .. deprecated:: 2022.11.0
- `Matrix.to_values` will be removed in a future release.
- Use `Matrix.to_coo` instead. Will be removed in version 2023.9.0 or later
-
- Parameters
- ----------
- dtype :
- Requested dtype for the output values array.
- rows : bool, default=True
- Whether to return rows; will return `None` for rows if `False`
- columns :bool, default=True
- Whether to return columns; will return `None` for columns if `False`
- values : bool, default=True
- Whether to return values; will return `None` for values if `False`
- sort : bool, default=True
- Whether to require sorted indices.
- If internally stored rowwise, the sorting will be first by rows, then by column.
- If internally stored columnwise, the sorting will be first by column, then by row.
-
- Returns
- -------
- np.ndarray[dtype=uint64] : Rows
- np.ndarray[dtype=uint64] : Columns
- np.ndarray : Values
- """
- warnings.warn(
- "`Matrix.to_values(...)` is deprecated; please use `Matrix.to_coo(...)` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return self.to_coo(dtype, rows=rows, columns=columns, values=values, sort=sort)
-
def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True):
"""Extract the indices and values as a 3-tuple of numpy arrays
corresponding to the COO format of the Matrix.
@@ -488,11 +531,11 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True)
dtype :
Requested dtype for the output values array.
rows : bool, default=True
- Whether to return rows; will return `None` for rows if `False`
+ Whether to return rows; will return ``None`` for rows if ``False``
columns :bool, default=True
- Whether to return columns; will return `None` for columns if `False`
+ Whether to return columns; will return ``None`` for columns if ``False``
values : bool, default=True
- Whether to return values; will return `None` for values if `False`
+ Whether to return values; will return ``None`` for values if ``False``
sort : bool, default=True
Whether to require sorted indices.
If internally stored rowwise, the sorting will be first by rows, then by column.
@@ -509,6 +552,7 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True)
np.ndarray[dtype=uint64] : Rows
np.ndarray[dtype=uint64] : Columns
np.ndarray : Values
+
"""
if sort and backend == "suitesparse":
self.wait() # sort in SS
@@ -559,7 +603,7 @@ def to_edgelist(self, dtype=None, *, values=True, sort=True):
dtype :
Requested dtype for the output values array.
values : bool, default=True
- Whether to return values; will return `None` for values if `False`
+ Whether to return values; will return ``None`` for values if ``False``
sort : bool, default=True
Whether to require sorted indices.
If internally stored rowwise, the sorting will be first by rows, then by column.
@@ -575,6 +619,7 @@ def to_edgelist(self, dtype=None, *, values=True, sort=True):
-------
np.ndarray[dtype=uint64] : Edgelist
np.ndarray : Values
+
"""
rows, columns, values = self.to_coo(dtype, values=values, sort=sort)
return (np.column_stack([rows, columns]), values)
@@ -585,7 +630,7 @@ def build(self, rows, columns, values, *, dup_op=None, clear=False, nrows=None,
The typical use case is to create a new Matrix and insert values
at the same time using :meth:`from_coo`.
- All the arguments are used identically in :meth:`from_coo`, except for `clear`, which
+ All the arguments are used identically in :meth:`from_coo`, except for ``clear``, which
indicates whether to clear the Matrix prior to adding the new values.
"""
# TODO: accept `dtype` keyword to match the dtype of `values`?
@@ -655,6 +700,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
Returns
-------
Matrix
+
"""
if dtype is not None or mask is not None or clear:
if dtype is None:
@@ -665,7 +711,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
else:
if opts:
# Ignore opts for now
- descriptor_lookup(**opts)
+ desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context)
new_mat = ffi_new("GrB_Matrix*")
rv = Matrix._from_obj(new_mat, self.dtype, self._nrows, self._ncols, name=name)
call("GrB_Matrix_dup", [_Pointer(rv), self])
@@ -686,6 +732,7 @@ def diag(self, k=0, dtype=None, *, name=None, **opts):
Returns
-------
:class:`~graphblas.Vector`
+
"""
if backend == "suitesparse":
from ..ss._core import diag
@@ -729,6 +776,7 @@ def wait(self, how="materialize"):
Use wait to force completion of the Matrix.
Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__.
+
"""
how = how.lower()
if how == "materialize":
@@ -755,6 +803,7 @@ def get(self, row, col, default=None):
Returns
-------
Python scalar
+
"""
expr = self[row, col]
if expr._is_scalar:
@@ -765,61 +814,6 @@ def get(self, row, col, default=None):
"Indices should get a single element, which will be extracted as a Python scalar."
)
- @classmethod
- def from_values(
- cls,
- rows,
- columns,
- values,
- dtype=None,
- *,
- nrows=None,
- ncols=None,
- dup_op=None,
- name=None,
- ):
- """Create a new Matrix from row and column indices and values.
-
- .. deprecated:: 2022.11.0
- `Matrix.from_values` will be removed in a future release.
- Use `Matrix.from_coo` instead. Will be removed in version 2023.9.0 or later
-
- Parameters
- ----------
- rows : list or np.ndarray
- Row indices.
- columns : list or np.ndarray
- Column indices.
- values : list or np.ndarray or scalar
- List of values. If a scalar is provided, all values will be set to this single value.
- dtype :
- Data type of the Matrix. If not provided, the values will be inspected
- to choose an appropriate dtype.
- nrows : int, optional
- Number of rows in the Matrix. If not provided, ``nrows`` is computed
- from the maximum row index found in ``rows``.
- ncols : int, optional
- Number of columns in the Matrix. If not provided, ``ncols`` is computed
- from the maximum column index found in ``columns``.
- dup_op : :class:`~graphblas.core.operator.BinaryOp`, optional
- Function used to combine values if duplicate indices are found.
- Leaving ``dup_op=None`` will raise an error if duplicates are found.
- name : str, optional
- Name to give the Matrix.
-
- Returns
- -------
- Matrix
- """
- warnings.warn(
- "`Matrix.from_values(...)` is deprecated; please use `Matrix.from_coo(...)` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return cls.from_coo(
- rows, columns, values, dtype, nrows=nrows, ncols=ncols, dup_op=dup_op, name=name
- )
-
@classmethod
def from_coo(
cls,
@@ -867,6 +861,7 @@ def from_coo(
Returns
-------
Matrix
+
"""
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
@@ -946,6 +941,7 @@ def from_edgelist(
Returns
-------
Matrix
+
"""
edgelist_values = None
if isinstance(edgelist, np.ndarray):
@@ -966,7 +962,7 @@ def from_edgelist(
rows = edgelist[:, 0]
cols = edgelist[:, 1]
else:
- unzipped = list(zip(*edgelist))
+ unzipped = list(zip(*edgelist, strict=True))
if len(unzipped) == 2:
rows, cols = unzipped
elif len(unzipped) == 3:
@@ -1086,7 +1082,7 @@ def from_csr(
Parameters
----------
indptr : list or np.ndarray
- Pointers for each row into col_indices and values; `indptr.size == nrows + 1`.
+ Pointers for each row into col_indices and values; ``indptr.size == nrows + 1``.
col_indices : list or np.ndarray
Column indices.
values : list or np.ndarray or scalar, default 1.0
@@ -1115,6 +1111,7 @@ def from_csr(
to_csr
Matrix.ss.import_csr
io.from_scipy_sparse
+
"""
return cls._from_csx(_CSR_FORMAT, indptr, col_indices, values, dtype, ncols, nrows, name)
@@ -1133,7 +1130,7 @@ def from_csc(
Parameters
----------
indptr : list or np.ndarray
- Pointers for each column into row_indices and values; `indptr.size == ncols + 1`.
+ Pointers for each column into row_indices and values; ``indptr.size == ncols + 1``.
col_indices : list or np.ndarray
Column indices.
values : list or np.ndarray or scalar, default 1.0
@@ -1162,6 +1159,7 @@ def from_csc(
to_csc
Matrix.ss.import_csc
io.from_scipy_sparse
+
"""
return cls._from_csx(_CSC_FORMAT, indptr, row_indices, values, dtype, nrows, ncols, name)
@@ -1222,6 +1220,7 @@ def from_dcsr(
to_dcsr
Matrix.ss.import_hypercsr
io.from_scipy_sparse
+
"""
if backend == "suitesparse":
return cls.ss.import_hypercsr(
@@ -1306,6 +1305,7 @@ def from_dcsc(
to_dcsc
Matrix.ss.import_hypercsc
io.from_scipy_sparse
+
"""
if backend == "suitesparse":
return cls.ss.import_hypercsc(
@@ -1367,6 +1367,7 @@ def from_scalar(cls, value, nrows, ncols, dtype=None, *, name=None, **opts):
Returns
-------
Matrix
+
"""
if type(value) is not Scalar:
try:
@@ -1420,6 +1421,7 @@ def from_dense(cls, values, missing_value=None, *, dtype=None, name=None, **opts
Returns
-------
Matrix
+
"""
values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=2)
if values.ndim == 0:
@@ -1479,6 +1481,7 @@ def to_dense(self, fill_value=None, dtype=None, **opts):
Returns
-------
np.ndarray
+
"""
max_nvals = self._nrows * self._ncols
if fill_value is None or self._nvals == max_nvals:
@@ -1554,6 +1557,7 @@ def from_dicts(
Returns
-------
Matrix
+
"""
order = get_order(order)
if isinstance(nested_dicts, Sequence):
@@ -1663,6 +1667,7 @@ def to_csr(self, dtype=None, *, sort=True):
from_csr
Matrix.ss.export
io.to_scipy_sparse
+
"""
if backend == "suitesparse":
info = self.ss.export("csr", sort=sort)
@@ -1694,6 +1699,7 @@ def to_csc(self, dtype=None, *, sort=True):
from_csc
Matrix.ss.export
io.to_scipy_sparse
+
"""
if backend == "suitesparse":
info = self.ss.export("csc", sort=sort)
@@ -1728,6 +1734,7 @@ def to_dcsr(self, dtype=None, *, sort=True):
from_dcsc
Matrix.ss.export
io.to_scipy_sparse
+
"""
if backend == "suitesparse":
info = self.ss.export("hypercsr", sort=sort)
@@ -1770,6 +1777,7 @@ def to_dcsc(self, dtype=None, *, sort=True):
from_dcsc
Matrix.ss.export
io.to_scipy_sparse
+
"""
if backend == "suitesparse":
info = self.ss.export("hypercsc", sort=sort)
@@ -1807,6 +1815,7 @@ def to_dicts(self, order="rowwise"):
Returns
-------
dict
+
"""
order = get_order(order)
if order == "rowwise":
@@ -1818,10 +1827,11 @@ def to_dicts(self, order="rowwise"):
cols = cols.tolist()
values = values.tolist()
return {
- row: dict(zip(cols[start:stop], values[start:stop]))
+ row: dict(zip(cols[start:stop], values[start:stop], strict=True))
for row, (start, stop) in zip(
compressed_rows.tolist(),
np.lib.stride_tricks.sliding_window_view(indptr, 2).tolist(),
+ strict=True,
)
}
# Alternative
@@ -1876,18 +1886,41 @@ def ewise_add(self, other, op=monoid.plus):
# Functional syntax
C << monoid.max(A | B)
+
"""
+ return self._ewise_add(other, op)
+
+ def _ewise_add(self, other, op=monoid.plus, is_infix=False):
method_name = "ewise_add"
- other = self._expect_type(
- other,
- (Matrix, TransposedMatrix, Vector),
- within=method_name,
- argname="other",
- op=op,
- )
- op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
- # Per the spec, op may be a semiring, but this is weird, so don't.
- self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if is_infix:
+ from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr
+
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, Vector, MatrixEwiseAddExpr, VectorEwiseAddExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if isinstance(self, MatrixEwiseAddExpr):
+ self = op(self).new()
+ if isinstance(other, InfixExprBase):
+ other = op(other).new()
+ else:
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, Vector),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+
if other.ndim == 1:
# Broadcast rowwise from the right
if self._ncols != other._size:
@@ -1944,14 +1977,41 @@ def ewise_mult(self, other, op=binary.times):
# Functional syntax
C << binary.gt(A & B)
+
"""
+ return self._ewise_mult(other, op)
+
+ def _ewise_mult(self, other, op=binary.times, is_infix=False):
method_name = "ewise_mult"
- other = self._expect_type(
- other, (Matrix, TransposedMatrix, Vector), within=method_name, argname="other", op=op
- )
- op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
- # Per the spec, op may be a semiring, but this is weird, so don't.
- self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if is_infix:
+ from .infix import MatrixEwiseMultExpr, VectorEwiseMultExpr
+
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, Vector, MatrixEwiseMultExpr, VectorEwiseMultExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if isinstance(self, MatrixEwiseMultExpr):
+ self = op(self).new()
+ if isinstance(other, InfixExprBase):
+ other = op(other).new()
+ else:
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, Vector),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+
if other.ndim == 1:
# Broadcast rowwise from the right
if self._ncols != other._size:
@@ -2012,12 +2072,35 @@ def ewise_union(self, other, op, left_default, right_default):
# Functional syntax
C << binary.div(A | B, left_default=1, right_default=1)
+
"""
+ return self._ewise_union(other, op, left_default, right_default)
+
+ def _ewise_union(self, other, op, left_default, right_default, is_infix=False):
method_name = "ewise_union"
- other = self._expect_type(
- other, (Matrix, TransposedMatrix, Vector), within=method_name, argname="other", op=op
- )
- dtype = self.dtype if self.dtype._is_udt else None
+ if is_infix:
+ from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr
+
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, Vector, MatrixEwiseAddExpr, VectorEwiseAddExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ temp_op = _get_typed_op_from_exprs(op, self, other, kind="binary")
+ else:
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, Vector),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
+
+ left_dtype = temp_op.type
+ dtype = left_dtype if left_dtype._is_udt else None
if type(left_default) is not Scalar:
try:
left = Scalar.from_value(
@@ -2034,6 +2117,8 @@ def ewise_union(self, other, op, left_default, right_default):
)
else:
left = _as_scalar(left_default, dtype, is_cscalar=False) # pragma: is_grbscalar
+ right_dtype = temp_op.type2
+ dtype = right_dtype if right_dtype._is_udt else None
if type(right_default) is not Scalar:
try:
right = Scalar.from_value(
@@ -2050,12 +2135,29 @@ def ewise_union(self, other, op, left_default, right_default):
)
else:
right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar
- scalar_dtype = unify(left.dtype, right.dtype)
- nonscalar_dtype = unify(self.dtype, other.dtype)
- op = get_typed_op(op, scalar_dtype, nonscalar_dtype, is_left_scalar=True, kind="binary")
+
+ if is_infix:
+ op1 = _get_typed_op_from_exprs(op, self, right, kind="binary")
+ op2 = _get_typed_op_from_exprs(op, left, other, kind="binary")
+ else:
+ op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary")
+ op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary")
+ if op1 is not op2:
+ left_dtype = unify(op1.type, op2.type, is_right_scalar=True)
+ right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True)
+ op = get_typed_op(op, left_dtype, right_dtype, kind="binary")
+ else:
+ op = op1
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
if op.opclass == "Monoid":
op = op.binaryop
+
+ if is_infix:
+ if isinstance(self, MatrixEwiseAddExpr):
+ self = op(self, left_default=left, right_default=right).new()
+ if isinstance(other, InfixExprBase):
+ other = op(other, left_default=left, right_default=right).new()
+
expr_repr = "{0.name}.{method_name}({2.name}, {op}, {1._expr_name}, {3._expr_name})"
if other.ndim == 1:
# Broadcast rowwise from the right
@@ -2085,11 +2187,10 @@ def ewise_union(self, other, op, left_default, right_default):
expr_repr=expr_repr,
)
else:
- dtype = unify(scalar_dtype, nonscalar_dtype, is_left_scalar=True)
expr = MatrixExpression(
method_name,
None,
- [self, left, other, right, _m_union_m, (self, other, left, right, op, dtype)],
+ [self, left, other, right, _m_union_m, (self, other, left, right, op)],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
@@ -2125,11 +2226,29 @@ def mxv(self, other, op=semiring.plus_times):
# Functional syntax
C << semiring.min_plus(A @ v)
+
"""
+ return self._mxv(other, op)
+
+ def _mxv(self, other, op=semiring.plus_times, is_infix=False):
method_name = "mxv"
- other = self._expect_type(other, Vector, within=method_name, argname="other", op=op)
- op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
- self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if is_infix:
+ from .infix import MatrixMatMulExpr, VectorMatMulExpr
+
+ other = self._expect_type(
+ other, (Vector, VectorMatMulExpr), within=method_name, argname="other", op=op
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if isinstance(self, MatrixMatMulExpr):
+ self = op(self).new()
+ if isinstance(other, VectorMatMulExpr):
+ other = op(other).new()
+ else:
+ other = self._expect_type(other, Vector, within=method_name, argname="other", op=op)
+ op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+
expr = VectorExpression(
method_name,
"GrB_mxv",
@@ -2168,13 +2287,35 @@ def mxm(self, other, op=semiring.plus_times):
# Functional syntax
C << semiring.min_plus(A @ B)
+
"""
+ return self._mxm(other, op)
+
+ def _mxm(self, other, op=semiring.plus_times, is_infix=False):
method_name = "mxm"
- other = self._expect_type(
- other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op
- )
- op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
- self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if is_infix:
+ from .infix import MatrixMatMulExpr
+
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, MatrixMatMulExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if isinstance(self, MatrixMatMulExpr):
+ self = op(self).new()
+ if isinstance(other, MatrixMatMulExpr):
+ other = op(other).new()
+ else:
+ other = self._expect_type(
+ other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op
+ )
+ op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+
expr = MatrixExpression(
method_name,
"GrB_mxm",
@@ -2211,6 +2352,7 @@ def kronecker(self, other, op=binary.times):
.. code-block:: python
C << A.kronecker(B, op=binary.times)
+
"""
method_name = "kronecker"
other = self._expect_type(
@@ -2267,6 +2409,7 @@ def apply(self, op, right=None, *, left=None):
# Functional syntax
C << op.abs(A)
+
"""
method_name = "apply"
extra_message = (
@@ -2415,6 +2558,7 @@ def select(self, op, thunk=None):
# Functional syntax
C << select.value(A >= 1)
+
"""
method_name = "select"
if isinstance(op, str):
@@ -2509,6 +2653,7 @@ def reduce_rowwise(self, op=monoid.plus):
.. code-block:: python
w << A.reduce_rowwise(monoid.plus)
+
"""
method_name = "reduce_rowwise"
op = get_typed_op(op, self.dtype, kind="binary|aggregator")
@@ -2546,6 +2691,7 @@ def reduce_columnwise(self, op=monoid.plus):
.. code-block:: python
w << A.reduce_columnwise(monoid.plus)
+
"""
method_name = "reduce_columnwise"
op = get_typed_op(op, self.dtype, kind="binary|aggregator")
@@ -2564,8 +2710,7 @@ def reduce_columnwise(self, op=monoid.plus):
)
def reduce_scalar(self, op=monoid.plus, *, allow_empty=True):
- """
- Reduce all values in the Matrix into a single value using ``op``.
+ """Reduce all values in the Matrix into a single value using ``op``.
See the `Reduce <../user_guide/operations.html#reduce>`__
section in the User Guide for more details.
@@ -2587,6 +2732,7 @@ def reduce_scalar(self, op=monoid.plus, *, allow_empty=True):
.. code-block:: python
total << A.reduce_scalar(monoid.plus)
+
"""
method_name = "reduce_scalar"
op = get_typed_op(op, self.dtype, kind="binary|aggregator")
@@ -2647,6 +2793,7 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None):
.. code-block:: python
C = A.reposition(1, 2).new()
+
"""
if nrows is None:
nrows = self._nrows
@@ -2690,6 +2837,185 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None):
dtype=self.dtype,
)
+ def power(self, n, op=semiring.plus_times):
+ """Raise a square Matrix to the (positive integer) power ``n``.
+
+ Matrix power is computed by repeated matrix squaring and matrix multiplication.
+ For a graph as an adjacency matrix, matrix power with default ``plus_times``
+ semiring computes the number of walks connecting each pair of nodes.
+ The result can grow very quickly for large matrices and with larger ``n``.
+
+ Parameters
+ ----------
+ n : int
+ The exponent must be a nonnegative integer. If n=0, the result will be a diagonal
+ matrix with values equal to the identity of the semiring's binary operator.
+ For example, ``plus_times`` will have diagonal values of 1, which is the
+ identity of ``times``. The binary operator must be associated with a monoid
+ when n=0 so the identity can be determined; otherwise, ValueError is raised.
+ op : :class:`~graphblas.core.operator.Semiring`
+ Semiring used in the computation
+
+ Returns
+ -------
+ MatrixExpression
+
+ Examples
+ --------
+ .. code-block:: python
+
+ C << A.power(4, op=semiring.plus_times)
+
+ # Is equivalent to:
+ tmp = (A @ A).new()
+ tmp << tmp @ tmp
+ C << tmp @ tmp
+
+ # And is more efficient than the naive implementation:
+ C = A.dup()
+ for i in range(1, 4):
+ C << A @ C
+
+ """
+ method_name = "power"
+ if self._nrows != self._ncols:
+ raise DimensionMismatch(f"power only works for square Matrix; shape is {self.shape}")
+ if (N := maybe_integral(n)) is None:
+ raise TypeError(f"n must be a nonnegative integer; got bad type: {type(n)}")
+ if N < 0:
+ raise ValueError(f"n must be a nonnegative integer; got: {N}")
+ op = get_typed_op(op, self.dtype, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if N == 0 and op.binaryop.monoid is None:
+ raise ValueError(
+ f"Binary operator of {op} semiring does not have a monoid with an identity. "
+ "When n=0, the result is a diagonal matrix with values equal to the "
+ "identity of the binaryop, so the binaryop must be associated with a monoid."
+ )
+ return MatrixExpression(
+ "power",
+ None,
+ [self, _power, (self, N, op)], # [*expr_args, func, args]
+ expr_repr=f"{{0.name}}.power({N}, op={op})",
+ nrows=self._nrows,
+ ncols=self._ncols,
+ dtype=self.dtype,
+ )
+
+ def setdiag(self, values, k=0, *, mask=None, accum=None, **opts):
+ """Set k'th diagonal with a Scalar, Vector, or array.
+
+ This is not a built-in GraphBLAS operation. It is implemented as a recipe.
+
+ Parameters
+ ----------
+ values : Vector or list or np.ndarray or scalar
+ New values to assign to the diagonal. The length of Vector and array
+ values must match the size of the diagonal being assigned to.
+ k : int, default=0
+ Which diagonal or off-diagonal to set. For example, set the elements
+ ``A[i, i+k] = values[i]``. The default, k=0, is the main diagonal.
+ mask : Mask, optional
+ Vector or Matrix Mask to control which diagonal elements to set.
+ If it is Matrix Mask, then only the diagonal is used as the mask.
+ accum : Monoid or BinaryOp, optional
+ Operator to use to combine existing diagonal values and new values.
+
+ """
+ if (K := maybe_integral(k)) is None:
+ raise TypeError(f"k must be an integer; got bad type: {type(k)}")
+ k = K
+ if k < 0:
+ if (size := min(self._nrows + k, self._ncols)) <= 0 and k <= -self._nrows:
+ raise IndexError(
+ f"k={k} is too small; the k'th diagonal is out of range. "
+ f"Valid k for Matrix with shape {self._nrows}x{self._ncols}: "
+ f"{-self._nrows} {'<' if self._nrows else '<='} k "
+ f"{'<' if self._ncols else '<='} {self._ncols}"
+ )
+ elif (size := min(self._ncols - k, self._nrows)) <= 0 and k > 0 and k >= self._ncols:
+ raise IndexError(
+ f"k={k} is too large; the k'th diagonal is out of range. "
+ f"Valid k for Matrix with shape {self._nrows}x{self._ncols}: "
+ f"{-self._nrows} {'<' if self._nrows else '<='} k "
+ f"{'<' if self._ncols else '<='} {self._ncols}"
+ )
+
+ # Convert `values` to Vector if necessary (i.e., it's scalar or array)
+ is_scalar = clear_diag = False
+ if output_type(values) is Vector:
+ v = values
+ clear_diag = accum is None and v._nvals != v._size
+ elif type(values) is Scalar:
+ is_scalar = True
+ else:
+ dtype = self.dtype if self.dtype._is_udt else None
+ try:
+ # Try to make it a Scalar
+ values = Scalar.from_value(values, dtype, is_cscalar=None, name="")
+ is_scalar = True
+ except (TypeError, ValueError):
+ try:
+ # Else try to make it a numpy array
+ values, dtype = values_to_numpy_buffer(values, dtype)
+ except Exception:
+ self._expect_type(
+ values,
+ (Scalar, Vector, np.ndarray),
+ within="setdiag",
+ argname="values",
+ extra_message="Literal scalars also accepted.",
+ )
+ else:
+ v = Vector.from_dense(values, dtype=dtype, **opts)
+
+ if is_scalar:
+ v = Vector.from_scalar(values, size, **opts)
+ elif v._size != size:
+ raise DimensionMismatch(
+ f"Dimensions not compatible for assigning length {v._size} Vector "
+ f"to {k}'th diagonal of Matrix with shape {self._nrows}x{self._ncols}."
+ f"The Vector should be size {size}."
+ )
+
+ if mask is not None:
+ mask = _check_mask(mask)
+ if mask.parent.ndim == 2:
+ if mask.parent.shape != self.shape:
+ raise DimensionMismatch(
+ "Matrix mask in setdiag is the wrong shape; "
+ f"expected shape {self._nrows}x{self._ncols}, "
+ f"got {mask.parent._nrows}x{mask.parent._ncols}"
+ )
+ if mask.complement:
+ mval = type(mask)(mask.parent.diag(k)).new(**opts)
+ mask = mval.S
+ M = mval.diag()
+ else:
+ M = select.diag(mask.parent, k).new(**opts)
+ elif mask.parent._size != size:
+ raise DimensionMismatch(
+ "Vector mask in setdiag is the wrong length; "
+ f"expected size {size}, got size {mask.parent._size}."
+ )
+ else:
+ if mask.complement:
+ mask = mask.new(**opts).S
+ M = mask.parent.diag()
+ if M.shape != self.shape:
+ M.resize(self._nrows, self._ncols)
+ mask = type(mask)(M)
+
+ if clear_diag:
+ self(mask=mask, **opts) << select.offdiag(self, k)
+
+ Diag = v.diag(k)
+ if Diag.shape != self.shape:
+ Diag.resize(self._nrows, self._ncols)
+ if mask is None:
+ mask = Diag.S
+ self(accum=accum, mask=mask, **opts) << Diag
+
##################################
# Extract and Assign index methods
##################################
@@ -2707,7 +3033,7 @@ def _extract_element(
result = Scalar(dtype, is_cscalar=is_cscalar, name=name)
if opts:
# Ignore opts for now
- descriptor_lookup(**opts)
+ desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context)
if is_cscalar:
dtype_name = "UDT" if dtype._is_udt else dtype.name
if (
@@ -3258,6 +3584,7 @@ class MatrixExpression(BaseExpression):
ndim = 2
output_type = Matrix
_is_transposed = False
+ __networkx_backend__ = "graphblas"
__networkx_plugin__ = "graphblas"
def __init__(
@@ -3358,6 +3685,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv))
name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name)
nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals))
+ power = wrapdoc(Matrix.power)(property(automethods.power))
reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise))
reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise))
reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar))
@@ -3375,7 +3703,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense))
to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts))
to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist))
- to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values))
wait = wrapdoc(Matrix.wait)(property(automethods.wait))
# These raise exceptions
__array__ = Matrix.__array__
@@ -3399,6 +3726,7 @@ class MatrixIndexExpr(AmbiguousAssignOrExtract):
ndim = 2
output_type = Matrix
_is_transposed = False
+ __networkx_backend__ = "graphblas"
__networkx_plugin__ = "graphblas"
def __init__(self, parent, resolved_indexes, nrows, ncols):
@@ -3458,6 +3786,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv))
name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name)
nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals))
+ power = wrapdoc(Matrix.power)(property(automethods.power))
reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise))
reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise))
reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar))
@@ -3475,7 +3804,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense))
to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts))
to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist))
- to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values))
wait = wrapdoc(Matrix.wait)(property(automethods.wait))
# These raise exceptions
__array__ = Matrix.__array__
@@ -3499,6 +3827,7 @@ class TransposedMatrix:
ndim = 2
_is_scalar = False
_is_transposed = True
+ __networkx_backend__ = "graphblas"
__networkx_plugin__ = "graphblas"
def __init__(self, matrix):
@@ -3550,13 +3879,6 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True)
)
return cols, rows, vals
- @wrapdoc(Matrix.to_values)
- def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=True):
- rows, cols, vals = self._matrix.to_values(
- dtype, rows=rows, columns=columns, values=values, sort=sort
- )
- return cols, rows, vals
-
@wrapdoc(Matrix.diag)
def diag(self, k=0, dtype=None, *, name=None, **opts):
return self._matrix.diag(-k, dtype, name=name, **opts)
@@ -3619,6 +3941,13 @@ def to_dicts(self, order="rowwise"):
reduce_columnwise = Matrix.reduce_columnwise
reduce_scalar = Matrix.reduce_scalar
reposition = Matrix.reposition
+ power = Matrix.power
+
+ _ewise_add = Matrix._ewise_add
+ _ewise_mult = Matrix._ewise_mult
+ _ewise_union = Matrix._ewise_union
+ _mxv = Matrix._mxv
+ _mxm = Matrix._mxm
# Operator sugar
__or__ = Matrix.__or__
diff --git a/graphblas/core/operator/__init__.py b/graphblas/core/operator/__init__.py
index 509e84a04..d59c835b3 100644
--- a/graphblas/core/operator/__init__.py
+++ b/graphblas/core/operator/__init__.py
@@ -6,6 +6,7 @@
from .semiring import ParameterizedSemiring, Semiring
from .unary import ParameterizedUnaryOp, UnaryOp
from .utils import (
+ _get_typed_op_from_exprs,
aggregator_from_string,
binary_from_string,
get_semiring,
diff --git a/graphblas/core/operator/agg.py b/graphblas/core/operator/agg.py
index 09d644c32..6b463a8a6 100644
--- a/graphblas/core/operator/agg.py
+++ b/graphblas/core/operator/agg.py
@@ -76,9 +76,9 @@ def __init__(
@property
def types(self):
if self._types is None:
- if type(self._semiring) is str:
+ if isinstance(self._semiring, str):
self._semiring = semiring.from_string(self._semiring)
- if type(self._types_orig[0]) is str: # pragma: no branch
+ if isinstance(self._types_orig[0], str): # pragma: no branch
self._types_orig[0] = semiring.from_string(self._types_orig[0])
self._types = _get_types(
self._types_orig, None if self._initval_orig is None else self._initdtype
diff --git a/graphblas/core/operator/base.py b/graphblas/core/operator/base.py
index a40438f14..97b2c9fbd 100644
--- a/graphblas/core/operator/base.py
+++ b/graphblas/core/operator/base.py
@@ -111,7 +111,9 @@ def _call_op(op, left, right=None, thunk=None, **kwargs):
if right is None and thunk is None:
if isinstance(left, InfixExprBase):
# op(A & B), op(A | B), op(A @ B)
- return getattr(left.left, left.method_name)(left.right, op, **kwargs)
+ return getattr(left.left, f"_{left.method_name}")(
+ left.right, op, is_infix=True, **kwargs
+ )
if find_opclass(op)[1] == "Semiring":
raise TypeError(
f"Bad type when calling {op!r}. Got type: {type(left)}.\n"
@@ -249,8 +251,7 @@ def __init__(self, parent, name, type_, return_type, gb_obj, gb_name, dtype2=Non
def __repr__(self):
classname = self.opclass.lower()
- if classname.endswith("op"):
- classname = classname[:-2]
+ classname = classname.removesuffix("op")
dtype2 = "" if self._type2 is None else f", {self._type2.name}"
return f"{classname}.{self.name}[{self.type.name}{dtype2}]"
@@ -336,15 +337,22 @@ def __getitem__(self, type_):
raise KeyError(f"{self.name} does not work with {type_}")
else:
return self._typed_ops[type_]
- if not _supports_udfs:
- raise KeyError(f"{self.name} does not work with {type_}")
# This is a UDT or is able to operate on UDTs such as `first` any `any`
dtype = lookup_dtype(type_)
return self._compile_udt(dtype, dtype)
- def _add(self, op):
- self._typed_ops[op.type] = op
- self.types[op.type] = op.return_type
+ def _add(self, op, *, is_jit=False):
+ if is_jit:
+ if hasattr(op, "type2") or hasattr(op, "thunk_type"):
+ dtypes = (op.type, op._type2)
+ else:
+ dtypes = op.type
+ self.types[dtypes] = op.return_type # This is a different use of .types
+ self._udt_types[dtypes] = op.return_type
+ self._udt_ops[dtypes] = op
+ else:
+ self._typed_ops[op.type] = op
+ self.types[op.type] = op.return_type
def __delitem__(self, type_):
type_ = lookup_dtype(type_)
@@ -396,9 +404,10 @@ def _find(cls, funcname):
@classmethod
def _initialize(cls, include_in_ops=True):
- """
+ """Initialize operators for this operator type.
+
include_in_ops determines whether the operators are included in the
- `gb.ops` namespace in addition to the defined module.
+ ``gb.ops`` namespace in addition to the defined module.
"""
if cls._initialized: # pragma: no cover (safety)
return
diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py
index 406405a80..3ee089fe4 100644
--- a/graphblas/core/operator/binary.py
+++ b/graphblas/core/operator/binary.py
@@ -19,12 +19,12 @@
UINT16,
UINT32,
UINT64,
- _sample_values,
_supports_complex,
lookup_dtype,
)
from ...exceptions import UdfParseError, check_status_carg
from .. import _has_numba, _supports_udfs, ffi, lib
+from ..dtypes import _sample_values
from ..expr import InfixExprBase
from .base import (
_SS_OPERATORS,
@@ -94,7 +94,9 @@ def __call__(self, left, right=None, *, left_default=None, right_default=None):
f">>> {self}(x | y, left_default=0, right_default=0)\n\nwhere x and y "
"are Vectors or Matrices, and left_default and right_default are scalars."
)
- return left.left.ewise_union(left.right, self, left_default, right_default)
+ return left.left._ewise_union(
+ left.right, self, left_default, right_default, is_infix=True
+ )
return _call_op(self, left, right)
@property
@@ -200,7 +202,7 @@ def monoid(self):
@property
def commutes_to(self):
- if type(self._commutes_to) is str:
+ if isinstance(self._commutes_to, str):
self._commutes_to = BinaryOp._find(self._commutes_to)
return self._commutes_to
@@ -506,7 +508,7 @@ def binary_wrapper(z, x, y): # pragma: no cover (numba)
type_.gb_obj,
),
"BinaryOp",
- new_binary,
+ new_binary[0],
)
op = TypedUserBinaryOp(new_type_obj, name, type_, ret_type, new_binary[0])
new_type_obj._add(op)
@@ -523,8 +525,8 @@ def _compile_udt(self, dtype, dtype2):
if dtypes in self._udt_types:
return self._udt_ops[dtypes]
- nt = numba.types
- if self.name == "eq" and not self._anonymous:
+ if self.name == "eq" and not self._anonymous and _has_numba:
+ nt = numba.types
# assert dtype.np_type == dtype2.np_type
itemsize = dtype.np_type.itemsize
mask = _udt_mask(dtype.np_type)
@@ -561,7 +563,8 @@ def binary_wrapper(z_ptr, x_ptr, y_ptr): # pragma: no cover (numba)
# z_ptr[0] = True
z_ptr[0] = (x[mask] == y[mask]).all()
- elif self.name == "ne" and not self._anonymous:
+ elif self.name == "ne" and not self._anonymous and _has_numba:
+ nt = numba.types
# assert dtype.np_type == dtype2.np_type
itemsize = dtype.np_type.itemsize
mask = _udt_mask(dtype.np_type)
@@ -597,6 +600,8 @@ def binary_wrapper(z_ptr, x_ptr, y_ptr): # pragma: no cover (numba)
# z_ptr[0] = False
z_ptr[0] = (x[mask] != y[mask]).any()
+ elif self._numba_func is None:
+ raise KeyError(f"{self.name} does not work with {dtypes} types")
else:
numba_func = self._numba_func
sig = (dtype.numba_type, dtype2.numba_type)
@@ -611,7 +616,7 @@ def binary_wrapper(z_ptr, x_ptr, y_ptr): # pragma: no cover (numba)
new_binary, binary_wrapper.cffi, ret_type._carg, dtype._carg, dtype2._carg
),
"BinaryOp",
- new_binary,
+ new_binary[0],
)
op = TypedUserBinaryOp(
self,
@@ -658,6 +663,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals
Returns
-------
BinaryOp or ParameterizedBinaryOp
+
"""
cls._check_supports_udf("register_anonymous")
if parameterized:
@@ -720,6 +726,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal
>>> return x == y or abs(x - y) <= max(rel_tol * max(abs(x), abs(y)), abs_tol)
>>> return inner
>>> gb.binary.register_new("user_isclose", user_isclose, parameterized=True)
+
"""
cls._check_supports_udf("register_new")
module, funcname = cls._remove_nesting(name)
diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py
index f6637ae6d..6fdacbcc1 100644
--- a/graphblas/core/operator/indexunary.py
+++ b/graphblas/core/operator/indexunary.py
@@ -3,9 +3,10 @@
from types import FunctionType
from ... import _STANDARD_OPERATOR_NAMES, indexunary, select
-from ...dtypes import BOOL, FP64, INT8, INT64, UINT64, _sample_values, lookup_dtype
+from ...dtypes import BOOL, FP64, INT8, INT64, UINT64, lookup_dtype
from ...exceptions import UdfParseError, check_status_carg
from .. import _has_numba, ffi, lib
+from ..dtypes import _sample_values
from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _deserialize_parameterized
if _has_numba:
@@ -24,6 +25,10 @@ def __call__(self, val, thunk=None):
thunk = False # most basic form of 0 when unifying dtypes
return _call_op(self, val, right=thunk)
+ @property
+ def thunk_type(self):
+ return self.type if self._type2 is None else self._type2
+
class TypedUserIndexUnaryOp(TypedOpBase):
__slots__ = ()
@@ -40,6 +45,7 @@ def orig_func(self):
def _numba_func(self):
return self.parent._numba_func
+ thunk_type = TypedBuiltinIndexUnaryOp.thunk_type
__call__ = TypedBuiltinIndexUnaryOp.__call__
@@ -193,7 +199,7 @@ def indexunary_wrapper(z, x, row, col, y): # pragma: no cover (numba)
type_.gb_obj,
),
"IndexUnaryOp",
- new_indexunary,
+ new_indexunary[0],
)
op = cls._typed_user_class(new_type_obj, name, type_, ret_type, new_indexunary[0])
new_type_obj._add(op)
@@ -209,6 +215,8 @@ def _compile_udt(self, dtype, dtype2):
dtypes = (dtype, dtype2)
if dtypes in self._udt_types:
return self._udt_ops[dtypes]
+ if self._numba_func is None:
+ raise KeyError(f"{self.name} does not work with {dtypes} types")
numba_func = self._numba_func
sig = (dtype.numba_type, UINT64.numba_type, UINT64.numba_type, dtype2.numba_type)
@@ -225,7 +233,7 @@ def _compile_udt(self, dtype, dtype2):
new_indexunary, indexunary_wrapper.cffi, ret_type._carg, dtype._carg, dtype2._carg
),
"IndexUnaryOp",
- new_indexunary,
+ new_indexunary[0],
)
op = TypedUserIndexUnaryOp(
self,
@@ -277,6 +285,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals
Returns
-------
return IndexUnaryOp or ParameterizedIndexUnaryOp
+
"""
cls._check_supports_udf("register_anonymous")
if parameterized:
@@ -332,6 +341,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal
>>> gb.indexunary.register_new("row_mod", lambda x, i, j, thunk: i % max(thunk, 2))
>>> dir(gb.indexunary)
[..., 'row_mod', ...]
+
"""
cls._check_supports_udf("register_new")
module, funcname = cls._remove_nesting(name)
diff --git a/graphblas/core/operator/monoid.py b/graphblas/core/operator/monoid.py
index fc327b4a7..e3f218a90 100644
--- a/graphblas/core/operator/monoid.py
+++ b/graphblas/core/operator/monoid.py
@@ -19,10 +19,9 @@
)
from ...exceptions import check_status_carg
from .. import ffi, lib
-from ..expr import InfixExprBase
from ..utils import libget
-from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _hasop
-from .binary import BinaryOp, ParameterizedBinaryOp
+from .base import OpBase, ParameterizedUdf, TypedOpBase, _hasop
+from .binary import BinaryOp, ParameterizedBinaryOp, TypedBuiltinBinaryOp
ffi_new = ffi.new
@@ -36,25 +35,6 @@ def __init__(self, parent, name, type_, return_type, gb_obj, gb_name):
super().__init__(parent, name, type_, return_type, gb_obj, gb_name)
self._identity = None
- def __call__(self, left, right=None, *, left_default=None, right_default=None):
- if left_default is not None or right_default is not None:
- if (
- left_default is None
- or right_default is None
- or right is not None
- or not isinstance(left, InfixExprBase)
- or left.method_name != "ewise_add"
- ):
- raise TypeError(
- "Specifying `left_default` or `right_default` keyword arguments implies "
- "performing `ewise_union` operation with infix notation.\n"
- "There is only one valid way to do this:\n\n"
- f">>> {self}(x | y, left_default=0, right_default=0)\n\nwhere x and y "
- "are Vectors or Matrices, and left_default and right_default are scalars."
- )
- return left.left.ewise_union(left.right, self, left_default, right_default)
- return _call_op(self, left, right)
-
@property
def identity(self):
if self._identity is None:
@@ -84,6 +64,8 @@ def is_idempotent(self):
"""True if ``monoid(x, x) == x`` for any x."""
return self.parent.is_idempotent
+ __call__ = TypedBuiltinBinaryOp.__call__
+
class TypedUserMonoid(TypedOpBase):
__slots__ = "binaryop", "identity"
@@ -288,6 +270,7 @@ def register_anonymous(cls, binaryop, identity, name=None, *, is_idempotent=Fals
Returns
-------
Monoid or ParameterizedMonoid
+
"""
if type(binaryop) is ParameterizedBinaryOp:
return ParameterizedMonoid(
@@ -327,6 +310,7 @@ def register_new(cls, name, binaryop, identity, *, is_idempotent=False, lazy=Fal
>>> gb.core.operator.Monoid.register_new("max_zero", gb.binary.max_zero, 0)
>>> dir(gb.monoid)
[..., 'max_zero', ...]
+
"""
module, funcname = cls._remove_nesting(name)
if lazy:
diff --git a/graphblas/core/operator/select.py b/graphblas/core/operator/select.py
index 4c9cd4639..6de4fa89a 100644
--- a/graphblas/core/operator/select.py
+++ b/graphblas/core/operator/select.py
@@ -1,9 +1,17 @@
import inspect
from ... import _STANDARD_OPERATOR_NAMES, select
-from ...dtypes import BOOL
+from ...dtypes import BOOL, UINT64
+from ...exceptions import check_status_carg
+from .. import _has_numba, ffi, lib
from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _deserialize_parameterized
-from .indexunary import IndexUnaryOp
+from .indexunary import IndexUnaryOp, TypedBuiltinIndexUnaryOp
+
+if _has_numba:
+ import numba
+
+ from .base import _get_udt_wrapper
+ffi_new = ffi.new
class TypedBuiltinSelectOp(TypedOpBase):
@@ -15,13 +23,15 @@ def __call__(self, val, thunk=None):
thunk = False # most basic form of 0 when unifying dtypes
return _call_op(self, val, thunk=thunk)
+ thunk_type = TypedBuiltinIndexUnaryOp.thunk_type
+
class TypedUserSelectOp(TypedOpBase):
__slots__ = ()
opclass = "SelectOp"
- def __init__(self, parent, name, type_, return_type, gb_obj):
- super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}")
+ def __init__(self, parent, name, type_, return_type, gb_obj, dtype2=None):
+ super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}", dtype2=dtype2)
@property
def orig_func(self):
@@ -31,6 +41,7 @@ def orig_func(self):
def _numba_func(self):
return self.parent._numba_func
+ thunk_type = TypedBuiltinSelectOp.thunk_type
__call__ = TypedBuiltinSelectOp.__call__
@@ -120,6 +131,44 @@ def _from_indexunary(cls, iop):
obj.types[type_] = op.return_type
return obj
+ def _compile_udt(self, dtype, dtype2):
+ if dtype2 is None: # pragma: no cover
+ dtype2 = dtype
+ dtypes = (dtype, dtype2)
+ if dtypes in self._udt_types:
+ return self._udt_ops[dtypes]
+ if self._numba_func is None:
+ raise KeyError(f"{self.name} does not work with {dtypes} types")
+
+ # It would be nice if we could reuse compiling done for IndexUnaryOp
+ numba_func = self._numba_func
+ sig = (dtype.numba_type, UINT64.numba_type, UINT64.numba_type, dtype2.numba_type)
+ numba_func.compile(sig) # Should we catch and give additional error message?
+ select_wrapper, wrapper_sig = _get_udt_wrapper(
+ numba_func, BOOL, dtype, dtype2, include_indexes=True
+ )
+
+ select_wrapper = numba.cfunc(wrapper_sig, nopython=True)(select_wrapper)
+ new_select = ffi_new("GrB_IndexUnaryOp*")
+ check_status_carg(
+ lib.GrB_IndexUnaryOp_new(
+ new_select, select_wrapper.cffi, BOOL._carg, dtype._carg, dtype2._carg
+ ),
+ "IndexUnaryOp",
+ new_select[0],
+ )
+ op = TypedUserSelectOp(
+ self,
+ self.name,
+ dtype,
+ BOOL,
+ new_select[0],
+ dtype2=dtype2,
+ )
+ self._udt_types[dtypes] = BOOL
+ self._udt_ops[dtypes] = op
+ return op
+
@classmethod
def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=False):
"""Register a SelectOp without registering it in the ``graphblas.select`` namespace.
@@ -159,6 +208,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals
Returns
-------
SelectOp or ParameterizedSelectOp
+
"""
cls._check_supports_udf("register_anonymous")
if parameterized:
@@ -215,6 +265,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal
>>> gb.select.register_new("upper_left_triangle", lambda x, i, j, thunk: i + j <= thunk)
>>> dir(gb.select)
[..., 'upper_left_triangle', ...]
+
"""
cls._check_supports_udf("register_new")
iop = IndexUnaryOp.register_new(
diff --git a/graphblas/core/operator/semiring.py b/graphblas/core/operator/semiring.py
index 035a1c43b..a8d18f1bf 100644
--- a/graphblas/core/operator/semiring.py
+++ b/graphblas/core/operator/semiring.py
@@ -228,7 +228,7 @@ def _build(cls, name, monoid, binaryop, *, anonymous=False):
check_status_carg(
lib.GrB_Semiring_new(new_semiring, monoid[binary_out].gb_obj, binary_func.gb_obj),
"Semiring",
- new_semiring,
+ new_semiring[0],
)
ret_type = monoid[binary_out].return_type
op = TypedUserSemiring(
@@ -254,7 +254,7 @@ def _compile_udt(self, dtype, dtype2):
ret_type = monoid.return_type
new_semiring = ffi_new("GrB_Semiring*")
status = lib.GrB_Semiring_new(new_semiring, monoid.gb_obj, binaryop.gb_obj)
- check_status_carg(status, "Semiring", new_semiring)
+ check_status_carg(status, "Semiring", new_semiring[0])
op = TypedUserSemiring(
new_semiring,
self.name,
@@ -287,6 +287,7 @@ def register_anonymous(cls, monoid, binaryop, name=None):
Returns
-------
Semiring or ParameterizedSemiring
+
"""
if type(monoid) is ParameterizedMonoid or type(binaryop) is ParameterizedBinaryOp:
return ParameterizedSemiring(name, monoid, binaryop, anonymous=True)
@@ -318,6 +319,7 @@ def register_new(cls, name, monoid, binaryop, *, lazy=False):
>>> gb.core.operator.Semiring.register_new("max_max", gb.monoid.max, gb.binary.max)
>>> dir(gb.semiring)
[..., 'max_max', ...]
+
"""
module, funcname = cls._remove_nesting(name)
if lazy:
diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py
index a02445836..26e0ca61c 100644
--- a/graphblas/core/operator/unary.py
+++ b/graphblas/core/operator/unary.py
@@ -15,12 +15,12 @@
UINT16,
UINT32,
UINT64,
- _sample_values,
_supports_complex,
lookup_dtype,
)
from ...exceptions import UdfParseError, check_status_carg
from .. import _has_numba, ffi, lib
+from ..dtypes import _sample_values
from ..utils import output_type
from .base import (
_SS_OPERATORS,
@@ -239,7 +239,7 @@ def unary_wrapper(z, x):
new_unary, unary_wrapper.cffi, ret_type.gb_obj, type_.gb_obj
),
"UnaryOp",
- new_unary,
+ new_unary[0],
)
op = TypedUserUnaryOp(new_type_obj, name, type_, ret_type, new_unary[0])
new_type_obj._add(op)
@@ -252,6 +252,8 @@ def unary_wrapper(z, x):
def _compile_udt(self, dtype, dtype2):
if dtype in self._udt_types:
return self._udt_ops[dtype]
+ if self._numba_func is None:
+ raise KeyError(f"{self.name} does not work with {dtype}")
numba_func = self._numba_func
sig = (dtype.numba_type,)
@@ -264,7 +266,7 @@ def _compile_udt(self, dtype, dtype2):
check_status_carg(
lib.GrB_UnaryOp_new(new_unary, unary_wrapper.cffi, ret_type._carg, dtype._carg),
"UnaryOp",
- new_unary,
+ new_unary[0],
)
op = TypedUserUnaryOp(self, self.name, dtype, ret_type, new_unary[0])
self._udt_types[dtype] = ret_type
@@ -302,6 +304,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals
Returns
-------
UnaryOp or ParameterizedUnaryOp
+
"""
cls._check_supports_udf("register_anonymous")
if parameterized:
@@ -347,6 +350,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal
>>> gb.core.operator.UnaryOp.register_new("plus_one", lambda x: x + 1)
>>> dir(gb.unary)
[..., 'plus_one', ...]
+
"""
cls._check_supports_udf("register_new")
module, funcname = cls._remove_nesting(name)
diff --git a/graphblas/core/operator/utils.py b/graphblas/core/operator/utils.py
index 00bc86cea..1442a9b5e 100644
--- a/graphblas/core/operator/utils.py
+++ b/graphblas/core/operator/utils.py
@@ -2,6 +2,7 @@
from ... import backend, binary, config, indexunary, monoid, op, select, semiring, unary
from ...dtypes import UINT64, lookup_dtype, unify
+from ..expr import InfixExprBase
from .base import (
_SS_OPERATORS,
OpBase,
@@ -74,6 +75,9 @@ def get_typed_op(op, dtype, dtype2=None, *, is_left_scalar=False, is_right_scala
from .agg import Aggregator, TypedAggregator
if isinstance(op, Aggregator):
+ # agg._any_dtype basically serves the same purpose as op._custom_dtype
+ if op._any_dtype is not None and op._any_dtype is not True:
+ return op[op._any_dtype]
return op[dtype]
if isinstance(op, TypedAggregator):
return op
@@ -132,6 +136,30 @@ def get_typed_op(op, dtype, dtype2=None, *, is_left_scalar=False, is_right_scala
raise TypeError(f"Unable to get typed operator from object with type {type(op)}")
+def _get_typed_op_from_exprs(op, left, right, *, kind=None):
+ if isinstance(left, InfixExprBase):
+ left_op = _get_typed_op_from_exprs(op, left.left, left.right, kind=kind)
+ left_dtype = left_op.type
+ else:
+ left_op = None
+ left_dtype = left.dtype
+ if isinstance(right, InfixExprBase):
+ right_op = _get_typed_op_from_exprs(op, right.left, right.right, kind=kind)
+ if right_op is left_op:
+ return right_op
+ right_dtype = right_op.type2
+ else:
+ right_dtype = right.dtype
+ return get_typed_op(
+ op,
+ left_dtype,
+ right_dtype,
+ is_left_scalar=left._is_scalar,
+ is_right_scalar=right._is_scalar,
+ kind=kind,
+ )
+
+
def get_semiring(monoid, binaryop, name=None):
"""Get or create a Semiring object from a monoid and binaryop.
@@ -142,6 +170,7 @@ def get_semiring(monoid, binaryop, name=None):
semiring.register_anonymous
semiring.register_new
semiring.from_string
+
"""
monoid, opclass = find_opclass(monoid)
switched = False
@@ -340,7 +369,7 @@ def _from_string(string, module, mapping, example):
)
if base in mapping:
op = mapping[base]
- if type(op) is str:
+ if isinstance(op, str):
op = mapping[base] = module.from_string(op)
elif hasattr(module, base):
op = getattr(module, base)
diff --git a/graphblas/core/recorder.py b/graphblas/core/recorder.py
index 2268c31eb..ca776f697 100644
--- a/graphblas/core/recorder.py
+++ b/graphblas/core/recorder.py
@@ -34,7 +34,7 @@ def gbstr(arg):
class Recorder:
"""Record GraphBLAS C calls.
- The recorder can use `.start()` and `.stop()` to enable/disable recording,
+ The recorder can use ``.start()`` and ``.stop()`` to enable/disable recording,
or it can be used as a context manager.
For example,
diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py
index a7a251a1d..25aef5743 100644
--- a/graphblas/core/scalar.py
+++ b/graphblas/core/scalar.py
@@ -3,7 +3,7 @@
import numpy as np
from .. import backend, binary, config, monoid
-from ..dtypes import _INDEX, FP64, lookup_dtype, unify
+from ..dtypes import _INDEX, FP64, _index_dtypes, lookup_dtype, unify
from ..exceptions import EmptyObject, check_status
from . import _has_numba, _supports_udfs, automethods, ffi, lib, utils
from .base import BaseExpression, BaseType, call
@@ -30,12 +30,12 @@ def _scalar_index(name):
return self
-def _s_union_s(updater, left, right, left_default, right_default, op, dtype):
+def _s_union_s(updater, left, right, left_default, right_default, op):
opts = updater.opts
- new_left = left.dup(dtype, clear=True)
+ new_left = left.dup(op.type, clear=True)
new_left(**opts) << binary.second(right, left_default)
new_left(**opts) << binary.first(left | new_left)
- new_right = right.dup(dtype, clear=True)
+ new_right = right.dup(op.type2, clear=True)
new_right(**opts) << binary.second(left, right_default)
new_right(**opts) << binary.first(right | new_right)
updater << op(new_left & new_right)
@@ -53,6 +53,7 @@ class Scalar(BaseType):
with a proper GrB_Scalar object.
name : str, optional
Name to give the Scalar. This will be displayed in the ``__repr__``.
+
"""
__slots__ = "_empty", "_is_cscalar"
@@ -158,9 +159,13 @@ def __int__(self):
def __complex__(self):
return complex(self.value)
- __index__ = __int__
+ @property
+ def __index__(self):
+ if self.dtype in _index_dtypes:
+ return self.__int__
+ raise AttributeError("Scalar object only has `__index__` for integral dtypes")
- def __array__(self, dtype=None):
+ def __array__(self, dtype=None, *, copy=None):
if dtype is None:
dtype = self.dtype.np_type
return np.array(self.value, dtype=dtype)
@@ -192,6 +197,7 @@ def isequal(self, other, *, check_dtype=False):
See Also
--------
:meth:`isclose` : For equality check of floating point dtypes
+
"""
if type(other) is not Scalar:
if other is None:
@@ -241,6 +247,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False):
Returns
-------
bool
+
"""
if type(other) is not Scalar:
if other is None:
@@ -424,6 +431,7 @@ def dup(self, dtype=None, *, clear=False, is_cscalar=None, name=None):
Returns
-------
Scalar
+
"""
if is_cscalar is None:
is_cscalar = self._is_cscalar
@@ -469,6 +477,7 @@ def wait(self, how="materialize"):
Use wait to force completion of the Scalar.
Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__.
+
"""
how = how.lower()
if how == "materialize":
@@ -492,6 +501,7 @@ def get(self, default=None):
Returns
-------
Python scalar
+
"""
return default if self._is_empty else self.value
@@ -515,6 +525,7 @@ def from_value(cls, value, dtype=None, *, is_cscalar=False, name=None):
Returns
-------
Scalar
+
"""
typ = output_type(value)
if dtype is None:
@@ -624,8 +635,25 @@ def ewise_add(self, other, op=monoid.plus):
# Functional syntax
c << monoid.max(a | b)
+
"""
+ return self._ewise_add(other, op)
+
+ def _ewise_add(self, other, op=monoid.plus, is_infix=False):
method_name = "ewise_add"
+ if is_infix:
+ from .infix import ScalarEwiseAddExpr
+
+ # This is a little different than how we handle ewise_add for Vector and
+ # Matrix where we are super-careful to handle dtypes well to support UDTs.
+ # For Scalar, we're going to let dtypes in expressions resolve themselves.
+ # Scalars are more challenging, because they may be literal scalars.
+ # Also, we have not yet resolved `op` here, so errors may be different.
+ if isinstance(self, ScalarEwiseAddExpr):
+ self = op(self).new()
+ if isinstance(other, ScalarEwiseAddExpr):
+ other = op(other).new()
+
if type(other) is not Scalar:
dtype = self.dtype if self.dtype._is_udt else None
try:
@@ -678,8 +706,25 @@ def ewise_mult(self, other, op=binary.times):
# Functional syntax
c << binary.gt(a & b)
+
"""
+ return self._ewise_mult(other, op)
+
+ def _ewise_mult(self, other, op=binary.times, is_infix=False):
method_name = "ewise_mult"
+ if is_infix:
+ from .infix import ScalarEwiseMultExpr
+
+ # This is a little different than how we handle ewise_mult for Vector and
+ # Matrix where we are super-careful to handle dtypes well to support UDTs.
+ # For Scalar, we're going to let dtypes in expressions resolve themselves.
+ # Scalars are more challenging, because they may be literal scalars.
+ # Also, we have not yet resolved `op` here, so errors may be different.
+ if isinstance(self, ScalarEwiseMultExpr):
+ self = op(self).new()
+ if isinstance(other, ScalarEwiseMultExpr):
+ other = op(other).new()
+
if type(other) is not Scalar:
dtype = self.dtype if self.dtype._is_udt else None
try:
@@ -736,9 +781,27 @@ def ewise_union(self, other, op, left_default, right_default):
# Functional syntax
c << binary.div(a | b, left_default=1, right_default=1)
+
"""
+ return self._ewise_union(other, op, left_default, right_default)
+
+ def _ewise_union(self, other, op, left_default, right_default, is_infix=False):
method_name = "ewise_union"
- dtype = self.dtype if self.dtype._is_udt else None
+ if is_infix:
+ from .infix import ScalarEwiseAddExpr
+
+ # This is a little different than how we handle ewise_union for Vector and
+ # Matrix where we are super-careful to handle dtypes well to support UDTs.
+ # For Scalar, we're going to let dtypes in expressions resolve themselves.
+ # Scalars are more challenging, because they may be literal scalars.
+ # Also, we have not yet resolved `op` here, so errors may be different.
+ if isinstance(self, ScalarEwiseAddExpr):
+ self = op(self, left_default=left_default, right_default=right_default).new()
+ if isinstance(other, ScalarEwiseAddExpr):
+ other = op(other, left_default=left_default, right_default=right_default).new()
+
+ right_dtype = self.dtype
+ dtype = right_dtype if right_dtype._is_udt else None
if type(other) is not Scalar:
try:
other = Scalar.from_value(other, dtype, is_cscalar=False, name="")
@@ -751,6 +814,13 @@ def ewise_union(self, other, op, left_default, right_default):
extra_message="Literal scalars also accepted.",
op=op,
)
+ else:
+ other = _as_scalar(other, dtype, is_cscalar=False) # pragma: is_grbscalar
+
+ temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
+
+ left_dtype = temp_op.type
+ dtype = left_dtype if left_dtype._is_udt else None
if type(left_default) is not Scalar:
try:
left = Scalar.from_value(
@@ -767,6 +837,8 @@ def ewise_union(self, other, op, left_default, right_default):
)
else:
left = _as_scalar(left_default, dtype, is_cscalar=False) # pragma: is_grbscalar
+ right_dtype = temp_op.type2
+ dtype = right_dtype if right_dtype._is_udt else None
if type(right_default) is not Scalar:
try:
right = Scalar.from_value(
@@ -783,9 +855,15 @@ def ewise_union(self, other, op, left_default, right_default):
)
else:
right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar
- defaults_dtype = unify(left.dtype, right.dtype)
- args_dtype = unify(self.dtype, other.dtype)
- op = get_typed_op(op, defaults_dtype, args_dtype, kind="binary")
+
+ op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary")
+ op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary")
+ if op1 is not op2:
+ left_dtype = unify(op1.type, op2.type, is_right_scalar=True)
+ right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True)
+ op = get_typed_op(op, left_dtype, right_dtype, kind="binary")
+ else:
+ op = op1
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
if op.opclass == "Monoid":
op = op.binaryop
@@ -801,11 +879,10 @@ def ewise_union(self, other, op, left_default, right_default):
scalar_as_vector=True,
)
else:
- dtype = unify(defaults_dtype, args_dtype)
expr = ScalarExpression(
method_name,
None,
- [self, left, other, right, _s_union_s, (self, other, left, right, op, dtype)],
+ [self, left, other, right, _s_union_s, (self, other, left, right, op)],
op=op,
expr_repr=expr_repr,
is_cscalar=False,
@@ -850,6 +927,7 @@ def apply(self, op, right=None, *, left=None):
# Functional syntax
b << op.abs(a)
+
"""
expr = self._as_vector().apply(op, right, left=left)
return ScalarExpression(
@@ -1056,7 +1134,7 @@ def _as_scalar(scalar, dtype=None, *, is_cscalar):
def _dict_to_record(np_type, d):
- """Converts e.g. `{"x": 1, "y": 2.3}` to `(1, 2.3)`."""
+ """Converts e.g. ``{"x": 1, "y": 2.3}`` to ``(1, 2.3)``."""
rv = []
for name, (dtype, _) in np_type.fields.items():
val = d[name]
diff --git a/graphblas/core/ss/__init__.py b/graphblas/core/ss/__init__.py
index e69de29bb..10a6fed94 100644
--- a/graphblas/core/ss/__init__.py
+++ b/graphblas/core/ss/__init__.py
@@ -0,0 +1,5 @@
+import suitesparse_graphblas as _ssgb
+
+(version_major, version_minor, version_bug) = map(int, _ssgb.__version__.split(".")[:3])
+
+_IS_SSGB7 = version_major == 7
diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py
new file mode 100644
index 000000000..d53608818
--- /dev/null
+++ b/graphblas/core/ss/binary.py
@@ -0,0 +1,128 @@
+from ... import backend
+from ...dtypes import lookup_dtype
+from ...exceptions import check_status_carg
+from .. import NULL, ffi, lib
+from ..operator.base import TypedOpBase
+from ..operator.binary import BinaryOp, TypedUserBinaryOp
+from . import _IS_SSGB7
+
+ffi_new = ffi.new
+
+
+class TypedJitBinaryOp(TypedOpBase):
+ __slots__ = "_monoid", "_jit_c_definition"
+ opclass = "BinaryOp"
+
+ def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None):
+ super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2)
+ self._monoid = None
+ self._jit_c_definition = jit_c_definition
+
+ @property
+ def jit_c_definition(self):
+ return self._jit_c_definition
+
+ monoid = TypedUserBinaryOp.monoid
+ commutes_to = TypedUserBinaryOp.commutes_to
+ _semiring_commutes_to = TypedUserBinaryOp._semiring_commutes_to
+ is_commutative = TypedUserBinaryOp.is_commutative
+ type2 = TypedUserBinaryOp.type2
+ __call__ = TypedUserBinaryOp.__call__
+
+
+def register_new(name, jit_c_definition, left_type, right_type, ret_type):
+ """Register a new BinaryOp using the SuiteSparse:GraphBLAS JIT compiler.
+
+ This creates a BinaryOp by compiling the C string definition of the function.
+ It requires a shell call to a C compiler. The resulting operator will be as
+ fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the
+ overhead of additional function calls as when using ``gb.binary.register_new``.
+
+ This is an advanced feature that requires a C compiler and proper configuration.
+ Configuration is handled by ``gb.ss.config``; see its docstring for details.
+ By default, the JIT caches results in ``~/.SuiteSparse/``. For more information,
+ see the SuiteSparse:GraphBLAS user guide.
+
+ Only one type signature may be registered at a time, but repeated calls using
+ the same name with different input types is allowed.
+
+ Parameters
+ ----------
+ name : str
+ The name of the operator. This will show up as ``gb.binary.ss.{name}``.
+ The name may contain periods, ".", which will result in nested objects
+ such as ``gb.binary.ss.x.y.z`` for name ``"x.y.z"``.
+ jit_c_definition : str
+ The C definition as a string of the user-defined function. For example:
+ ``"void absdiff (double *z, double *x, double *y) { (*z) = fabs ((*x) - (*y)) ; }"``.
+ left_type : dtype
+ The dtype of the left operand of the binary operator.
+ right_type : dtype
+ The dtype of the right operand of the binary operator.
+ ret_type : dtype
+ The dtype of the result of the binary operator.
+
+ Returns
+ -------
+ BinaryOp
+
+ See Also
+ --------
+ gb.binary.register_new
+ gb.binary.register_anonymous
+ gb.unary.ss.register_new
+
+ """
+ if backend != "suitesparse": # pragma: no cover (safety)
+ raise RuntimeError(
+ "`gb.binary.ss.register_new` invalid when not using 'suitesparse' backend"
+ )
+ if _IS_SSGB7:
+ # JIT was introduced in SuiteSparse:GraphBLAS 8.0
+ import suitesparse_graphblas as ssgb
+
+ raise RuntimeError(
+ "JIT was added to SuiteSparse:GraphBLAS in version 8; "
+ f"current version is {ssgb.__version__}"
+ )
+ left_type = lookup_dtype(left_type)
+ right_type = lookup_dtype(right_type)
+ ret_type = lookup_dtype(ret_type)
+ name = name if name.startswith("ss.") else f"ss.{name}"
+ module, funcname = BinaryOp._remove_nesting(name, strict=False)
+ if hasattr(module, funcname):
+ rv = getattr(module, funcname)
+ if not isinstance(rv, BinaryOp):
+ BinaryOp._remove_nesting(name)
+ if (
+ (left_type, right_type) in rv.types
+ or rv._udt_types is not None
+ and (left_type, right_type) in rv._udt_types
+ ):
+ raise TypeError(
+ f"BinaryOp gb.binary.{name} already defined for "
+ f"({left_type}, {right_type}) input types"
+ )
+ else:
+ # We use `is_udt=True` to make dtype handling flexible and explicit.
+ rv = BinaryOp(name, is_udt=True)
+ gb_obj = ffi_new("GrB_BinaryOp*")
+ check_status_carg(
+ lib.GxB_BinaryOp_new(
+ gb_obj,
+ NULL,
+ ret_type._carg,
+ left_type._carg,
+ right_type._carg,
+ ffi_new("char[]", funcname.encode()),
+ ffi_new("char[]", jit_c_definition.encode()),
+ ),
+ "BinaryOp",
+ gb_obj[0],
+ )
+ op = TypedJitBinaryOp(
+ rv, funcname, left_type, ret_type, gb_obj[0], jit_c_definition, dtype2=right_type
+ )
+ rv._add(op, is_jit=True)
+ setattr(module, funcname, rv)
+ return rv
diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py
index ca91cc198..70a7dd196 100644
--- a/graphblas/core/ss/config.py
+++ b/graphblas/core/ss/config.py
@@ -1,10 +1,9 @@
from collections.abc import MutableMapping
-from numbers import Integral
from ...dtypes import lookup_dtype
from ...exceptions import _error_code_lookup, check_status
from .. import NULL, ffi, lib
-from ..utils import values_to_numpy_buffer
+from ..utils import maybe_integral, values_to_numpy_buffer
class BaseConfig(MutableMapping):
@@ -12,6 +11,9 @@ class BaseConfig(MutableMapping):
# Subclasses should redefine these
_get_function = None
_set_function = None
+ _context_get_function = "GxB_Context_get"
+ _context_set_function = "GxB_Context_set"
+ _context_keys = set()
_null_valid = {}
_options = {}
_defaults = {}
@@ -28,7 +30,7 @@ class BaseConfig(MutableMapping):
"GxB_Format_Value",
}
- def __init__(self, parent=None):
+ def __init__(self, parent=None, context=None):
cls = type(self)
if not cls._initialized:
cls._reverse_enumerations = {}
@@ -51,6 +53,7 @@ def __init__(self, parent=None):
rd[k] = k
cls._initialized = True
self._parent = parent
+ self._context = context
def __delitem__(self, key):
raise TypeError("Configuration options can't be deleted.")
@@ -61,19 +64,27 @@ def __getitem__(self, key):
raise KeyError(key)
key_obj, ctype = self._options[key]
is_bool = ctype == "bool"
+ if is_context := (key in self._context_keys):
+ get_function_base = self._context_get_function
+ else:
+ get_function_base = self._get_function
if ctype in self._int32_ctypes:
ctype = "int32_t"
- get_function_name = f"{self._get_function}_INT32"
+ get_function_name = f"{get_function_base}_INT32"
elif ctype.startswith("int64_t"):
- get_function_name = f"{self._get_function}_INT64"
+ get_function_name = f"{get_function_base}_INT64"
elif ctype.startswith("double"):
- get_function_name = f"{self._get_function}_FP64"
+ get_function_name = f"{get_function_base}_FP64"
+ elif ctype.startswith("char"):
+ get_function_name = f"{get_function_base}_CHAR"
else: # pragma: no cover (sanity)
raise ValueError(ctype)
get_function = getattr(lib, get_function_name)
is_array = "[" in ctype
val_ptr = ffi.new(ctype if is_array else f"{ctype}*")
- if self._parent is None:
+ if is_context:
+ info = get_function(self._context._carg, key_obj, val_ptr)
+ elif self._parent is None:
info = get_function(key_obj, val_ptr)
else:
info = get_function(self._parent._carg, key_obj, val_ptr)
@@ -88,11 +99,13 @@ def __getitem__(self, key):
return {reverse_bitwise[val]}
rv = set()
for k, v in self._bitwise[key].items():
- if isinstance(k, str) and val & v and bin(v).count("1") == 1:
+ if isinstance(k, str) and val & v and (v).bit_count() == 1:
rv.add(k)
return rv
if is_bool:
return bool(val_ptr[0])
+ if ctype.startswith("char"):
+ return ffi.string(val_ptr[0]).decode()
return val_ptr[0]
raise _error_code_lookup[info](f"Failed to get info for {key!r}") # pragma: no cover
@@ -103,15 +116,21 @@ def __setitem__(self, key, val):
if key in self._read_only:
raise ValueError(f"Config option {key!r} is read-only")
key_obj, ctype = self._options[key]
+ if is_context := (key in self._context_keys):
+ set_function_base = self._context_set_function
+ else:
+ set_function_base = self._set_function
if ctype in self._int32_ctypes:
ctype = "int32_t"
- set_function_name = f"{self._set_function}_INT32"
+ set_function_name = f"{set_function_base}_INT32"
elif ctype == "double":
- set_function_name = f"{self._set_function}_FP64"
+ set_function_name = f"{set_function_base}_FP64"
elif ctype.startswith("int64_t["):
- set_function_name = f"{self._set_function}_INT64_ARRAY"
+ set_function_name = f"{set_function_base}_INT64_ARRAY"
elif ctype.startswith("double["):
- set_function_name = f"{self._set_function}_FP64_ARRAY"
+ set_function_name = f"{set_function_base}_FP64_ARRAY"
+ elif ctype.startswith("char"):
+ set_function_name = f"{set_function_base}_CHAR"
else: # pragma: no cover (sanity)
raise ValueError(ctype)
set_function = getattr(lib, set_function_name)
@@ -127,8 +146,8 @@ def __setitem__(self, key, val):
bitwise = self._bitwise[key]
if isinstance(val, str):
val = bitwise[val.lower()]
- elif isinstance(val, Integral):
- val = bitwise.get(val, val)
+ elif (x := maybe_integral(val)) is not None:
+ val = bitwise.get(x, x)
else:
bits = 0
for x in val:
@@ -154,9 +173,19 @@ def __setitem__(self, key, val):
f"expected {size}, got {vals.size}: {val}"
)
val_obj = ffi.from_buffer(ctype, vals)
+ elif ctype.startswith("char"):
+ val_obj = ffi.new("char[]", val.encode())
else:
val_obj = ffi.cast(ctype, val)
- if self._parent is None:
+ if is_context:
+ if self._context is None:
+ from .context import Context
+
+ self._context = Context(engage=False)
+ self._context._engage() # Disengage when context goes out of scope
+ self._parent._context = self._context # Set context to descriptor
+ info = set_function(self._context._carg, key_obj, val_obj)
+ elif self._parent is None:
info = set_function(key_obj, val_obj)
else:
info = set_function(self._parent._carg, key_obj, val_obj)
@@ -174,7 +203,12 @@ def __len__(self):
return len(self._options)
def __repr__(self):
- return "{" + ",\n ".join(f"{k!r}: {v!r}" for k, v in self.items()) + "}"
+ return (
+ type(self).__name__
+ + "({"
+ + ",\n ".join(f"{k!r}: {v!r}" for k, v in self.items())
+ + "})"
+ )
def _ipython_key_completions_(self): # pragma: no cover (ipython)
return list(self)
diff --git a/graphblas/core/ss/context.py b/graphblas/core/ss/context.py
new file mode 100644
index 000000000..f93d1ec1c
--- /dev/null
+++ b/graphblas/core/ss/context.py
@@ -0,0 +1,147 @@
+import threading
+
+from ...exceptions import InvalidValue, check_status, check_status_carg
+from .. import ffi, lib
+from . import _IS_SSGB7
+from .config import BaseConfig
+
+ffi_new = ffi.new
+if _IS_SSGB7:
+ # Context was introduced in SuiteSparse:GraphBLAS 8.0
+ import suitesparse_graphblas as ssgb
+
+ raise ImportError(
+ "Context was added to SuiteSparse:GraphBLAS in version 8; "
+ f"current version is {ssgb.__version__}"
+ )
+
+
+class Context(BaseConfig):
+ _context_keys = {"chunk", "gpu_id", "nthreads"}
+ _options = {
+ "chunk": (lib.GxB_CONTEXT_CHUNK, "double"),
+ "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"),
+ "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"),
+ }
+ _defaults = {
+ "nthreads": 0,
+ "chunk": 0,
+ "gpu_id": -1, # -1 means no GPU
+ }
+
+ def __init__(self, engage=True, *, stack=True, nthreads=None, chunk=None, gpu_id=None):
+ super().__init__()
+ self.gb_obj = ffi_new("GxB_Context*")
+ check_status_carg(lib.GxB_Context_new(self.gb_obj), "Context", self.gb_obj[0])
+ if stack:
+ context = threadlocal.context
+ self["nthreads"] = context["nthreads"] if nthreads is None else nthreads
+ self["chunk"] = context["chunk"] if chunk is None else chunk
+ self["gpu_id"] = context["gpu_id"] if gpu_id is None else gpu_id
+ else:
+ if nthreads is not None:
+ self["nthreads"] = nthreads
+ if chunk is not None:
+ self["chunk"] = chunk
+ if gpu_id is not None:
+ self["gpu_id"] = gpu_id
+ self._prev_context = None
+ if engage:
+ self.engage()
+
+ @classmethod
+ def _from_obj(cls, gb_obj=None):
+ self = object.__new__(cls)
+ self.gb_obj = gb_obj
+ self._prev_context = None
+ super().__init__(self)
+ return self
+
+ @property
+ def _carg(self):
+ return self.gb_obj[0]
+
+ def dup(self, engage=True, *, nthreads=None, chunk=None, gpu_id=None):
+ if nthreads is None:
+ nthreads = self["nthreads"]
+ if chunk is None:
+ chunk = self["chunk"]
+ if gpu_id is None:
+ gpu_id = self["gpu_id"]
+ return type(self)(engage, stack=False, nthreads=nthreads, chunk=chunk, gpu_id=gpu_id)
+
+ def __del__(self):
+ gb_obj = getattr(self, "gb_obj", None)
+ if gb_obj is not None and lib is not None: # pragma: no branch (safety)
+ try:
+ self.disengage()
+ except InvalidValue:
+ pass
+ lib.GxB_Context_free(gb_obj)
+
+ def engage(self):
+ if self._prev_context is None and (context := threadlocal.context) is not self:
+ self._prev_context = context
+ check_status(lib.GxB_Context_engage(self._carg), self)
+ threadlocal.context = self
+
+ def _engage(self):
+ """Like engage, but don't set to threadlocal.context.
+
+ This is useful if you want to disengage when the object is deleted by going out of scope.
+ """
+ if self._prev_context is None and (context := threadlocal.context) is not self:
+ self._prev_context = context
+ check_status(lib.GxB_Context_engage(self._carg), self)
+
+ def disengage(self):
+ prev_context = self._prev_context
+ self._prev_context = None
+ if threadlocal.context is self:
+ if prev_context is not None:
+ threadlocal.context = prev_context
+ prev_context.engage()
+ else:
+ threadlocal.context = global_context
+ check_status(lib.GxB_Context_disengage(self._carg), self)
+ elif prev_context is not None and threadlocal.context is prev_context:
+ prev_context.engage()
+ else:
+ check_status(lib.GxB_Context_disengage(self._carg), self)
+
+ def __enter__(self):
+ self.engage()
+ return self
+
+ def __exit__(self, exc_type, exc, exc_tb):
+ self.disengage()
+
+ @property
+ def _context(self):
+ return self
+
+ @_context.setter
+ def _context(self, val):
+ if val is not None and val is not self:
+ raise AttributeError("'_context' attribute is read-only")
+
+
+class GlobalContext(Context):
+ @property
+ def _carg(self):
+ return self.gb_obj
+
+ def __del__(self): # pragma: no cover (safety)
+ pass
+
+
+global_context = GlobalContext._from_obj(lib.GxB_CONTEXT_WORLD)
+
+
+class ThreadLocal(threading.local):
+ """Hold the active context for the current thread."""
+
+ context = global_context
+
+
+threadlocal = ThreadLocal()
diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py
index dffc4dec1..781661b7b 100644
--- a/graphblas/core/ss/descriptor.py
+++ b/graphblas/core/ss/descriptor.py
@@ -1,6 +1,7 @@
from ...exceptions import check_status, check_status_carg
from .. import ffi, lib
from ..descriptor import Descriptor
+from . import _IS_SSGB7
from .config import BaseConfig
ffi_new = ffi.new
@@ -18,6 +19,8 @@
class _DescriptorConfig(BaseConfig):
_get_function = "GxB_Desc_get"
_set_function = "GxB_Desc_set"
+ if not _IS_SSGB7:
+ _context_keys = {"chunk", "gpu_id", "nthreads"}
_options = {
# GrB
"output_replace": (lib.GrB_OUTP, "GrB_Desc_Value"),
@@ -26,13 +29,25 @@ class _DescriptorConfig(BaseConfig):
"transpose_first": (lib.GrB_INP0, "GrB_Desc_Value"),
"transpose_second": (lib.GrB_INP1, "GrB_Desc_Value"),
# GxB
- "nthreads": (lib.GxB_DESCRIPTOR_NTHREADS, "int"),
- "chunk": (lib.GxB_DESCRIPTOR_CHUNK, "double"),
"axb_method": (lib.GxB_AxB_METHOD, "GrB_Desc_Value"),
"sort": (lib.GxB_SORT, "int"),
"secure_import": (lib.GxB_IMPORT, "int"),
- # "gpu_control": (GxB_DESCRIPTOR_GPU_CONTROL, "GrB_Desc_Value"), # Coming soon...
}
+ if _IS_SSGB7:
+ _options.update(
+ {
+ "nthreads": (lib.GxB_DESCRIPTOR_NTHREADS, "int"),
+ "chunk": (lib.GxB_DESCRIPTOR_CHUNK, "double"),
+ }
+ )
+ else:
+ _options.update(
+ {
+ "chunk": (lib.GxB_CONTEXT_CHUNK, "double"),
+ "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"),
+ "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"),
+ }
+ )
_enumerations = {
# GrB
"output_replace": {
@@ -71,10 +86,6 @@ class _DescriptorConfig(BaseConfig):
False: False,
True: lib.GxB_SORT,
},
- # "gpu_control": { # Coming soon...
- # "always": lib.GxB_GPU_ALWAYS,
- # "never": lib.GxB_GPU_NEVER,
- # },
}
_defaults = {
# GrB
@@ -90,7 +101,8 @@ class _DescriptorConfig(BaseConfig):
"sort": False,
"secure_import": False,
}
- _count = 0
+ if not _IS_SSGB7:
+ _defaults["gpu_id"] = -1
def __init__(self):
gb_obj = ffi_new("GrB_Descriptor*")
@@ -132,7 +144,7 @@ def get_descriptor(**opts):
sort : bool, default False
A hint for whether methods may return a "jumbled" matrix
secure_import : bool, default False
- Whether to trust the data for `import` and `pack` functions.
+ Whether to trust the data for ``import`` and ``pack`` functions.
When True, checks are performed to ensure input data is valid.
compression : str, {"none", "default", "lz4", "lz4hc", "zstd"}
Whether and how to compress the data for serialization.
@@ -145,6 +157,7 @@ def get_descriptor(**opts):
Returns
-------
Descriptor or None
+
"""
if not opts or all(val is False or val is None for val in opts.values()):
return
diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py
new file mode 100644
index 000000000..d2eb5b416
--- /dev/null
+++ b/graphblas/core/ss/dtypes.py
@@ -0,0 +1,88 @@
+import numpy as np
+
+from ... import backend, core, dtypes
+from ...exceptions import check_status_carg
+from .. import _has_numba, ffi, lib
+from . import _IS_SSGB7
+
+ffi_new = ffi.new
+if _has_numba:
+ import numba
+ from cffi import FFI
+ from numba.core.typing import cffi_utils
+
+ jit_ffi = FFI()
+
+
+def register_new(name, jit_c_definition, *, np_type=None):
+ if backend != "suitesparse": # pragma: no cover (safety)
+ raise RuntimeError(
+ "`gb.dtypes.ss.register_new` invalid when not using 'suitesparse' backend"
+ )
+ if _IS_SSGB7:
+ # JIT was introduced in SuiteSparse:GraphBLAS 8.0
+ import suitesparse_graphblas as ssgb
+
+ raise RuntimeError(
+ "JIT was added to SuiteSparse:GraphBLAS in version 8; "
+ f"current version is {ssgb.__version__}"
+ )
+ if not name.isidentifier():
+ raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}")
+ if name in core.dtypes._registry or hasattr(dtypes.ss, name):
+ raise ValueError(f"{name!r} name for dtype is unavailable")
+ if len(name) > lib.GxB_MAX_NAME_LEN:
+ raise ValueError(
+ f"`name` argument is too large. Max size is {lib.GxB_MAX_NAME_LEN}; got {len(name)}"
+ )
+ if name not in jit_c_definition:
+ raise ValueError("`name` argument must be same name as the typedef in `jit_c_definition`")
+ if "struct" not in jit_c_definition:
+ raise ValueError("Only struct typedefs are currently allowed for JIT dtypes")
+
+ gb_obj = ffi.new("GrB_Type*")
+ status = lib.GxB_Type_new(
+ gb_obj, 0, ffi_new("char[]", name.encode()), ffi_new("char[]", jit_c_definition.encode())
+ )
+ check_status_carg(status, "Type", gb_obj[0])
+
+ # Let SuiteSparse:GraphBLAS determine the size (we gave 0 as size above)
+ size_ptr = ffi_new("size_t*")
+ check_status_carg(lib.GxB_Type_size(size_ptr, gb_obj[0]), "Type", gb_obj[0])
+ size = size_ptr[0]
+
+ save_np_type = True
+ if np_type is None and _has_numba and numba.__version__[:5] > "0.56.":
+ jit_ffi.cdef(jit_c_definition)
+ numba_type = cffi_utils.map_type(jit_ffi.typeof(name), use_record_dtype=True)
+ np_type = numba_type.dtype
+ if np_type.itemsize != size: # pragma: no cover
+ raise RuntimeError(
+ "Size of compiled user-defined type does not match size of inferred numpy type: "
+ f"{size} != {np_type.itemsize} != {size}.\n\n"
+ f"UDT C definition: {jit_c_definition}\n"
+ f"numpy dtype: {np_type}\n\n"
+ "To get around this, you may pass `np_type=` keyword argument."
+ )
+ else:
+ if np_type is not None:
+ np_type = np.dtype(np_type)
+ else:
+ # Not an ideal numpy type, but minimally useful
+ np_type = np.dtype((np.uint8, size))
+ save_np_type = False
+ if _has_numba:
+ numba_type = numba.typeof(np_type).dtype
+ else:
+ numba_type = None
+
+ # For now, let's use "opaque" unsigned bytes for the c type.
+ rv = core.dtypes.DataType(name, gb_obj, None, f"uint8_t[{size}]", numba_type, np_type)
+ core.dtypes._registry[gb_obj] = rv
+ if save_np_type or np_type not in core.dtypes._registry:
+ core.dtypes._registry[np_type] = rv
+ if numba_type is not None and (save_np_type or numba_type not in core.dtypes._registry):
+ core.dtypes._registry[numba_type] = rv
+ core.dtypes._registry[numba_type.name] = rv
+ setattr(dtypes.ss, name, rv)
+ return rv
diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py
new file mode 100644
index 000000000..b60837acf
--- /dev/null
+++ b/graphblas/core/ss/indexunary.py
@@ -0,0 +1,153 @@
+from ... import backend
+from ...dtypes import BOOL, lookup_dtype
+from ...exceptions import check_status_carg
+from .. import NULL, ffi, lib
+from ..operator.base import TypedOpBase
+from ..operator.indexunary import IndexUnaryOp, TypedUserIndexUnaryOp
+from . import _IS_SSGB7
+
+ffi_new = ffi.new
+
+
+class TypedJitIndexUnaryOp(TypedOpBase):
+ __slots__ = "_jit_c_definition"
+ opclass = "IndexUnaryOp"
+
+ def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None):
+ super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2)
+ self._jit_c_definition = jit_c_definition
+
+ @property
+ def jit_c_definition(self):
+ return self._jit_c_definition
+
+ thunk_type = TypedUserIndexUnaryOp.thunk_type
+ __call__ = TypedUserIndexUnaryOp.__call__
+
+
+def register_new(name, jit_c_definition, input_type, thunk_type, ret_type):
+ """Register a new IndexUnaryOp using the SuiteSparse:GraphBLAS JIT compiler.
+
+ This creates a IndexUnaryOp by compiling the C string definition of the function.
+ It requires a shell call to a C compiler. The resulting operator will be as
+ fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the
+ overhead of additional function calls as when using ``gb.indexunary.register_new``.
+
+ This is an advanced feature that requires a C compiler and proper configuration.
+ Configuration is handled by ``gb.ss.config``; see its docstring for details.
+ By default, the JIT caches results in ``~/.SuiteSparse/``. For more information,
+ see the SuiteSparse:GraphBLAS user guide.
+
+ Only one type signature may be registered at a time, but repeated calls using
+ the same name with different input types is allowed.
+
+ This will also create a SelectOp operator under ``gb.select.ss`` if the return
+ type is boolean.
+
+ Parameters
+ ----------
+ name : str
+ The name of the operator. This will show up as ``gb.indexunary.ss.{name}``.
+ The name may contain periods, ".", which will result in nested objects
+ such as ``gb.indexunary.ss.x.y.z`` for name ``"x.y.z"``.
+ jit_c_definition : str
+ The C definition as a string of the user-defined function. For example:
+ ``"void diffy (double *z, double *x, GrB_Index i, GrB_Index j, double *y) "``
+ ``"{ (*z) = (i + j) * fabs ((*x) - (*y)) ; }"``
+ input_type : dtype
+ The dtype of the operand of the indexunary operator.
+ thunk_type : dtype
+ The dtype of the thunk of the indexunary operator.
+ ret_type : dtype
+ The dtype of the result of the indexunary operator.
+
+ Returns
+ -------
+ IndexUnaryOp
+
+ See Also
+ --------
+ gb.indexunary.register_new
+ gb.indexunary.register_anonymous
+ gb.select.ss.register_new
+
+ """
+ if backend != "suitesparse": # pragma: no cover (safety)
+ raise RuntimeError(
+ "`gb.indexunary.ss.register_new` invalid when not using 'suitesparse' backend"
+ )
+ if _IS_SSGB7:
+ # JIT was introduced in SuiteSparse:GraphBLAS 8.0
+ import suitesparse_graphblas as ssgb
+
+ raise RuntimeError(
+ "JIT was added to SuiteSparse:GraphBLAS in version 8; "
+ f"current version is {ssgb.__version__}"
+ )
+ input_type = lookup_dtype(input_type)
+ thunk_type = lookup_dtype(thunk_type)
+ ret_type = lookup_dtype(ret_type)
+ name = name if name.startswith("ss.") else f"ss.{name}"
+ module, funcname = IndexUnaryOp._remove_nesting(name, strict=False)
+ if hasattr(module, funcname):
+ rv = getattr(module, funcname)
+ if not isinstance(rv, IndexUnaryOp):
+ IndexUnaryOp._remove_nesting(name)
+ if (
+ (input_type, thunk_type) in rv.types
+ or rv._udt_types is not None
+ and (input_type, thunk_type) in rv._udt_types
+ ):
+ raise TypeError(
+ f"IndexUnaryOp gb.indexunary.{name} already defined for "
+ f"({input_type}, {thunk_type}) input types"
+ )
+ else:
+ # We use `is_udt=True` to make dtype handling flexible and explicit.
+ rv = IndexUnaryOp(name, is_udt=True)
+ gb_obj = ffi_new("GrB_IndexUnaryOp*")
+ check_status_carg(
+ lib.GxB_IndexUnaryOp_new(
+ gb_obj,
+ NULL,
+ ret_type._carg,
+ input_type._carg,
+ thunk_type._carg,
+ ffi_new("char[]", funcname.encode()),
+ ffi_new("char[]", jit_c_definition.encode()),
+ ),
+ "IndexUnaryOp",
+ gb_obj[0],
+ )
+ op = TypedJitIndexUnaryOp(
+ rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type
+ )
+ rv._add(op, is_jit=True)
+ if ret_type == BOOL:
+ from ..operator.select import SelectOp
+ from .select import TypedJitSelectOp
+
+ select_module, funcname = SelectOp._remove_nesting(name, strict=False)
+ if hasattr(select_module, funcname):
+ selectop = getattr(select_module, funcname)
+ if not isinstance(selectop, SelectOp):
+ SelectOp._remove_nesting(name)
+ if (
+ (input_type, thunk_type) in selectop.types
+ or selectop._udt_types is not None
+ and (input_type, thunk_type) in selectop._udt_types
+ ):
+ raise TypeError(
+ f"SelectOp gb.select.{name} already defined for "
+ f"({input_type}, {thunk_type}) input types"
+ )
+ else:
+ # We use `is_udt=True` to make dtype handling flexible and explicit.
+ selectop = SelectOp(name, is_udt=True)
+ op2 = TypedJitSelectOp(
+ selectop, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type
+ )
+ selectop._add(op2, is_jit=True)
+ setattr(select_module, funcname, selectop)
+ setattr(module, funcname, rv)
+ return rv
diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py
index cac0296c7..509c56113 100644
--- a/graphblas/core/ss/matrix.py
+++ b/graphblas/core/ss/matrix.py
@@ -1,5 +1,4 @@
import itertools
-import warnings
import numpy as np
from suitesparse_graphblas.utils import claim_buffer, claim_buffer_2d, unclaim_buffer
@@ -7,10 +6,11 @@
import graphblas as gb
from ... import binary, monoid
-from ...dtypes import _INDEX, BOOL, INT64, UINT64, _string_to_dtype, lookup_dtype
+from ...dtypes import _INDEX, BOOL, INT64, UINT64, lookup_dtype
from ...exceptions import _error_code_lookup, check_status, check_status_carg
from .. import NULL, _has_numba, ffi, lib
from ..base import call
+from ..dtypes import _string_to_dtype
from ..operator import get_typed_op
from ..scalar import Scalar, _as_scalar, _scalar_index
from ..utils import (
@@ -58,12 +58,12 @@ def head(matrix, n=10, dtype=None, *, sort=False):
dtype = matrix.dtype
else:
dtype = lookup_dtype(dtype)
- rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n))
+ rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n), strict=True)
return np.array(rows, np.uint64), np.array(cols, np.uint64), np.array(vals, dtype.np_type)
def _concat_mn(tiles, *, is_matrix=None):
- """Argument checking for `Matrix.ss.concat` and returns number of tiles in each dimension."""
+ """Argument checking for ``Matrix.ss.concat`` and returns number of tiles in each dimension."""
from ..matrix import Matrix, TransposedMatrix
from ..vector import Vector
@@ -250,8 +250,7 @@ def orientation(self):
return "rowwise"
def build_diag(self, vector, k=0, **opts):
- """
- GxB_Matrix_diag.
+ """GxB_Matrix_diag.
Construct a diagonal Matrix from the given vector.
Existing entries in the Matrix are discarded.
@@ -261,8 +260,8 @@ def build_diag(self, vector, k=0, **opts):
vector : Vector
Create a diagonal from this Vector.
k : int, default 0
- Diagonal in question. Use `k>0` for diagonals above the main diagonal,
- and `k<0` for diagonals below the main diagonal.
+ Diagonal in question. Use ``k>0`` for diagonals above the main diagonal,
+ and ``k<0`` for diagonals below the main diagonal.
See Also
--------
@@ -279,15 +278,14 @@ def build_diag(self, vector, k=0, **opts):
)
def split(self, chunks, *, name=None, **opts):
- """
- GxB_Matrix_split.
+ """GxB_Matrix_split.
- Split a Matrix into a 2D array of sub-matrices according to `chunks`.
+ Split a Matrix into a 2D array of sub-matrices according to ``chunks``.
This performs the opposite operation as ``concat``.
- `chunks` is short for "chunksizes" and indicates the chunk sizes for each dimension.
- `chunks` may be a single integer, or a length 2 tuple or list. Example chunks:
+ ``chunks`` is short for "chunksizes" and indicates the chunk sizes for each dimension.
+ ``chunks`` may be a single integer, or a length 2 tuple or list. Example chunks:
- ``chunks=10``
- Split each dimension into chunks of size 10 (the last chunk may be smaller).
@@ -295,13 +293,14 @@ def split(self, chunks, *, name=None, **opts):
- Split rows into chunks of size 10 and columns into chunks of size 20.
- ``chunks=(None, [5, 10])``
- Don't split rows into chunks, and split columns into two chunks of size 5 and 10.
- ` ``chunks=(10, [20, None])``
+ - ``chunks=(10, [20, None])``
- Split columns into two chunks of size 20 and ``ncols - 20``
See Also
--------
Matrix.ss.concat
graphblas.ss.concat
+
"""
from ..matrix import Matrix
@@ -361,14 +360,13 @@ def _concat(self, tiles, m, n, opts):
)
def concat(self, tiles, **opts):
- """
- GxB_Matrix_concat.
+ """GxB_Matrix_concat.
Concatenate a 2D list of Matrix objects into the current Matrix.
Any existing values in the current Matrix will be discarded.
- To concatenate into a new Matrix, use `graphblas.ss.concat`.
+ To concatenate into a new Matrix, use ``graphblas.ss.concat``.
- Vectors may be used as `Nx1` Matrix objects.
+ Vectors may be used as ``Nx1`` Matrix objects.
This performs the opposite operation as ``split``.
@@ -376,13 +374,13 @@ def concat(self, tiles, **opts):
--------
Matrix.ss.split
graphblas.ss.concat
+
"""
tiles, m, n, is_matrix = _concat_mn(tiles, is_matrix=True)
self._concat(tiles, m, n, opts)
def build_scalar(self, rows, columns, value):
- """
- GxB_Matrix_build_Scalar.
+ """GxB_Matrix_build_Scalar.
Like ``build``, but uses a scalar for all the values.
@@ -390,6 +388,7 @@ def build_scalar(self, rows, columns, value):
--------
Matrix.build
Matrix.from_coo
+
"""
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
@@ -536,14 +535,13 @@ def iteritems(self, seek=0):
lib.GxB_Iterator_free(it_ptr)
def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts):
- """
- GxB_Matrix_export_xxx.
+ """GxB_Matrix_export_xxx.
Parameters
----------
format : str, optional
- If `format` is not specified, this method exports in the currently stored format.
- To control the export format, set `format` to one of:
+ If ``format`` is not specified, this method exports in the currently stored format.
+ To control the export format, set ``format`` to one of:
- "csr"
- "csc"
- "hypercsr"
@@ -578,7 +576,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **
Returns
-------
- dict; keys depend on `format` and `raw` arguments (see below).
+ dict; keys depend on ``format`` and ``raw`` arguments (see below).
See Also
--------
@@ -718,6 +716,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **
>>> pieces = A.ss.export()
>>> A2 = Matrix.ss.import_any(**pieces)
+
"""
return self._export(
format,
@@ -729,13 +728,12 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **
)
def unpack(self, format=None, *, sort=False, raw=False, **opts):
- """
- GxB_Matrix_unpack_xxx.
+ """GxB_Matrix_unpack_xxx.
- `unpack` is like `export`, except that the Matrix remains valid but empty.
- `pack_*` methods are the opposite of `unpack`.
+ ``unpack`` is like ``export``, except that the Matrix remains valid but empty.
+ ``pack_*`` methods are the opposite of ``unpack``.
- See `Matrix.ss.export` documentation for more details.
+ See ``Matrix.ss.export`` documentation for more details.
"""
return self._export(
format, sort=sort, raw=raw, give_ownership=True, method="unpack", opts=opts
@@ -1179,8 +1177,7 @@ def import_csr(
name=None,
**opts,
):
- """
- GxB_Matrix_import_CSR.
+ """GxB_Matrix_import_CSR.
Create a new Matrix from standard CSR format.
@@ -1193,7 +1190,7 @@ def import_csr(
col_indices : array-like
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_cols : bool, default False
Indicate whether the values in "col_indices" are sorted.
take_ownership : bool, default False
@@ -1210,7 +1207,7 @@ def import_csr(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "csr" or None. This is included to be compatible with
the dict returned from exporting.
@@ -1220,6 +1217,7 @@ def import_csr(
Returns
-------
Matrix
+
"""
return cls._import_csr(
nrows=nrows,
@@ -1256,13 +1254,12 @@ def pack_csr(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_CSR.
+ """GxB_Matrix_pack_CSR.
- `pack_csr` is like `import_csr` except it "packs" data into an
+ ``pack_csr`` is like ``import_csr`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("csr")``
- See `Matrix.ss.import_csr` documentation for more details.
+ See ``Matrix.ss.import_csr`` documentation for more details.
"""
return self._import_csr(
indptr=indptr,
@@ -1369,8 +1366,7 @@ def import_csc(
name=None,
**opts,
):
- """
- GxB_Matrix_import_CSC.
+ """GxB_Matrix_import_CSC.
Create a new Matrix from standard CSC format.
@@ -1383,7 +1379,7 @@ def import_csc(
row_indices : array-like
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_rows : bool, default False
Indicate whether the values in "row_indices" are sorted.
take_ownership : bool, default False
@@ -1400,7 +1396,7 @@ def import_csc(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "csc" or None. This is included to be compatible with
the dict returned from exporting.
@@ -1410,6 +1406,7 @@ def import_csc(
Returns
-------
Matrix
+
"""
return cls._import_csc(
nrows=nrows,
@@ -1446,13 +1443,12 @@ def pack_csc(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_CSC.
+ """GxB_Matrix_pack_CSC.
- `pack_csc` is like `import_csc` except it "packs" data into an
+ ``pack_csc`` is like ``import_csc`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("csc")``
- See `Matrix.ss.import_csc` documentation for more details.
+ See ``Matrix.ss.import_csc`` documentation for more details.
"""
return self._import_csc(
indptr=indptr,
@@ -1561,8 +1557,7 @@ def import_hypercsr(
name=None,
**opts,
):
- """
- GxB_Matrix_import_HyperCSR.
+ """GxB_Matrix_import_HyperCSR.
Create a new Matrix from standard HyperCSR format.
@@ -1579,7 +1574,7 @@ def import_hypercsr(
If not specified, will be set to ``len(rows)``.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_cols : bool, default False
Indicate whether the values in "col_indices" are sorted.
take_ownership : bool, default False
@@ -1596,7 +1591,7 @@ def import_hypercsr(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "hypercsr" or None. This is included to be compatible with
the dict returned from exporting.
@@ -1606,6 +1601,7 @@ def import_hypercsr(
Returns
-------
Matrix
+
"""
return cls._import_hypercsr(
nrows=nrows,
@@ -1646,13 +1642,12 @@ def pack_hypercsr(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_HyperCSR.
+ """GxB_Matrix_pack_HyperCSR.
- `pack_hypercsr` is like `import_hypercsr` except it "packs" data into an
+ ``pack_hypercsr`` is like ``import_hypercsr`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("hypercsr")``
- See `Matrix.ss.import_hypercsr` documentation for more details.
+ See ``Matrix.ss.import_hypercsr`` documentation for more details.
"""
return self._import_hypercsr(
rows=rows,
@@ -1785,8 +1780,7 @@ def import_hypercsc(
name=None,
**opts,
):
- """
- GxB_Matrix_import_HyperCSC.
+ """GxB_Matrix_import_HyperCSC.
Create a new Matrix from standard HyperCSC format.
@@ -1803,7 +1797,7 @@ def import_hypercsc(
If not specified, will be set to ``len(cols)``.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_rows : bool, default False
Indicate whether the values in "row_indices" are sorted.
take_ownership : bool, default False
@@ -1820,7 +1814,7 @@ def import_hypercsc(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "hypercsc" or None. This is included to be compatible with
the dict returned from exporting.
@@ -1830,6 +1824,7 @@ def import_hypercsc(
Returns
-------
Matrix
+
"""
return cls._import_hypercsc(
nrows=nrows,
@@ -1870,13 +1865,12 @@ def pack_hypercsc(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_HyperCSC.
+ """GxB_Matrix_pack_HyperCSC.
- `pack_hypercsc` is like `import_hypercsc` except it "packs" data into an
+ ``pack_hypercsc`` is like ``import_hypercsc`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("hypercsc")``
- See `Matrix.ss.import_hypercsc` documentation for more details.
+ See ``Matrix.ss.import_hypercsc`` documentation for more details.
"""
return self._import_hypercsc(
cols=cols,
@@ -2006,8 +2000,7 @@ def import_bitmapr(
name=None,
**opts,
):
- """
- GxB_Matrix_import_BitmapR.
+ """GxB_Matrix_import_BitmapR.
Create a new Matrix from values and bitmap (as mask) arrays.
@@ -2028,7 +2021,7 @@ def import_bitmapr(
If not provided, will be inferred from values or bitmap if either is 2d.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
@@ -2043,7 +2036,7 @@ def import_bitmapr(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "bitmapr" or None. This is included to be compatible with
the dict returned from exporting.
@@ -2053,6 +2046,7 @@ def import_bitmapr(
Returns
-------
Matrix
+
"""
return cls._import_bitmapr(
bitmap=bitmap,
@@ -2087,13 +2081,12 @@ def pack_bitmapr(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_BitmapR.
+ """GxB_Matrix_pack_BitmapR.
- `pack_bitmapr` is like `import_bitmapr` except it "packs" data into an
+ ``pack_bitmapr`` is like ``import_bitmapr`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("bitmapr")``
- See `Matrix.ss.import_bitmapr` documentation for more details.
+ See ``Matrix.ss.import_bitmapr`` documentation for more details.
"""
return self._import_bitmapr(
bitmap=bitmap,
@@ -2199,8 +2192,7 @@ def import_bitmapc(
name=None,
**opts,
):
- """
- GxB_Matrix_import_BitmapC.
+ """GxB_Matrix_import_BitmapC.
Create a new Matrix from values and bitmap (as mask) arrays.
@@ -2221,7 +2213,7 @@ def import_bitmapc(
If not provided, will be inferred from values or bitmap if either is 2d.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
@@ -2236,7 +2228,7 @@ def import_bitmapc(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "bitmapc" or None. This is included to be compatible with
the dict returned from exporting.
@@ -2246,6 +2238,7 @@ def import_bitmapc(
Returns
-------
Matrix
+
"""
return cls._import_bitmapc(
bitmap=bitmap,
@@ -2280,13 +2273,12 @@ def pack_bitmapc(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_BitmapC.
+ """GxB_Matrix_pack_BitmapC.
- `pack_bitmapc` is like `import_bitmapc` except it "packs" data into an
+ ``pack_bitmapc`` is like ``import_bitmapc`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("bitmapc")``
- See `Matrix.ss.import_bitmapc` documentation for more details.
+ See ``Matrix.ss.import_bitmapc`` documentation for more details.
"""
return self._import_bitmapc(
bitmap=bitmap,
@@ -2390,8 +2382,7 @@ def import_fullr(
name=None,
**opts,
):
- """
- GxB_Matrix_import_FullR.
+ """GxB_Matrix_import_FullR.
Create a new Matrix from values.
@@ -2407,7 +2398,7 @@ def import_fullr(
If not provided, will be inferred from values if it is 2d.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
@@ -2422,7 +2413,7 @@ def import_fullr(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "fullr" or None. This is included to be compatible with
the dict returned from exporting.
@@ -2432,6 +2423,7 @@ def import_fullr(
Returns
-------
Matrix
+
"""
return cls._import_fullr(
values=values,
@@ -2462,13 +2454,12 @@ def pack_fullr(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_FullR.
+ """GxB_Matrix_pack_FullR.
- `pack_fullr` is like `import_fullr` except it "packs" data into an
+ ``pack_fullr`` is like ``import_fullr`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("fullr")``
- See `Matrix.ss.import_fullr` documentation for more details.
+ See ``Matrix.ss.import_fullr`` documentation for more details.
"""
return self._import_fullr(
values=values,
@@ -2549,8 +2540,7 @@ def import_fullc(
name=None,
**opts,
):
- """
- GxB_Matrix_import_FullC.
+ """GxB_Matrix_import_FullC.
Create a new Matrix from values.
@@ -2566,7 +2556,7 @@ def import_fullc(
If not provided, will be inferred from values if it is 2d.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
@@ -2581,7 +2571,7 @@ def import_fullc(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "fullc" or None. This is included to be compatible with
the dict returned from exporting.
@@ -2591,6 +2581,7 @@ def import_fullc(
Returns
-------
Matrix
+
"""
return cls._import_fullc(
values=values,
@@ -2621,13 +2612,12 @@ def pack_fullc(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_FullC.
+ """GxB_Matrix_pack_FullC.
- `pack_fullc` is like `import_fullc` except it "packs" data into an
+ ``pack_fullc`` is like ``import_fullc`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("fullc")``
- See `Matrix.ss.import_fullc` documentation for more details.
+ See ``Matrix.ss.import_fullc`` documentation for more details.
"""
return self._import_fullc(
values=values,
@@ -2711,8 +2701,7 @@ def import_coo(
name=None,
**opts,
):
- """
- GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar.
+ """GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar.
Create a new Matrix from indices and values in coordinate format.
@@ -2727,7 +2716,7 @@ def import_coo(
The number of columns for the Matrix.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_rows : bool, default False
True if rows are sorted or when (cols, rows) are sorted lexicographically
sorted_cols : bool, default False
@@ -2736,7 +2725,7 @@ def import_coo(
Ignored. Zero-copy is not possible for "coo" format.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "coo" or None. This is included to be compatible with
the dict returned from exporting.
@@ -2746,6 +2735,7 @@ def import_coo(
Returns
-------
Matrix
+
"""
return cls._import_coo(
rows=rows,
@@ -2784,13 +2774,12 @@ def pack_coo(
name=None,
**opts,
):
- """
- GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar.
+ """GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar.
- `pack_coo` is like `import_coo` except it "packs" data into an
+ ``pack_coo`` is like ``import_coo`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("coo")``
- See `Matrix.ss.import_coo` documentation for more details.
+ See ``Matrix.ss.import_coo`` documentation for more details.
"""
return self._import_coo(
nrows=self._parent._nrows,
@@ -2897,8 +2886,7 @@ def import_coor(
name=None,
**opts,
):
- """
- GxB_Matrix_import_CSR.
+ """GxB_Matrix_import_CSR.
Create a new Matrix from indices and values in coordinate format.
Rows must be sorted.
@@ -2914,7 +2902,7 @@ def import_coor(
The number of columns for the Matrix.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_cols : bool, default False
True indicates indices are sorted by column, then row.
take_ownership : bool, default False
@@ -2932,7 +2920,7 @@ def import_coor(
For "coor", ownership of "rows" will never change.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "coor" or None. This is included to be compatible with
the dict returned from exporting.
@@ -2942,6 +2930,7 @@ def import_coor(
Returns
-------
Matrix
+
"""
return cls._import_coor(
rows=rows,
@@ -2980,13 +2969,12 @@ def pack_coor(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_CSR.
+ """GxB_Matrix_pack_CSR.
- `pack_coor` is like `import_coor` except it "packs" data into an
+ ``pack_coor`` is like ``import_coor`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("coor")``
- See `Matrix.ss.import_coor` documentation for more details.
+ See ``Matrix.ss.import_coor`` documentation for more details.
"""
return self._import_coor(
rows=rows,
@@ -3066,8 +3054,7 @@ def import_cooc(
name=None,
**opts,
):
- """
- GxB_Matrix_import_CSC.
+ """GxB_Matrix_import_CSC.
Create a new Matrix from indices and values in coordinate format.
Rows must be sorted.
@@ -3083,7 +3070,7 @@ def import_cooc(
The number of columns for the Matrix.
is_iso : bool, default False
Is the Matrix iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_rows : bool, default False
True indicates indices are sorted by column, then row.
take_ownership : bool, default False
@@ -3101,7 +3088,7 @@ def import_cooc(
For "cooc", ownership of "cols" will never change.
dtype : dtype, optional
dtype of the new Matrix.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "cooc" or None. This is included to be compatible with
the dict returned from exporting.
@@ -3111,6 +3098,7 @@ def import_cooc(
Returns
-------
Matrix
+
"""
return cls._import_cooc(
rows=rows,
@@ -3149,13 +3137,12 @@ def pack_cooc(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_CSC.
+ """GxB_Matrix_pack_CSC.
- `pack_cooc` is like `import_cooc` except it "packs" data into an
+ ``pack_cooc`` is like ``import_cooc`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack("cooc")``
- See `Matrix.ss.import_cooc` documentation for more details.
+ See ``Matrix.ss.import_cooc`` documentation for more details.
"""
return self._import_cooc(
ncols=self._parent._ncols,
@@ -3251,11 +3238,10 @@ def import_any(
nvals=None, # optional
**opts,
):
- """
- GxB_Matrix_import_xxx.
+ """GxB_Matrix_import_xxx.
Dispatch to appropriate import method inferred from inputs.
- See the other import functions and `Matrix.ss.export`` for details.
+ See the other import functions and ``Matrix.ss.export`` for details.
Returns
-------
@@ -3280,6 +3266,7 @@ def import_any(
>>> pieces = A.ss.export()
>>> A2 = Matrix.ss.import_any(**pieces)
+
"""
return cls._import_any(
values=values,
@@ -3349,13 +3336,12 @@ def pack_any(
name=None,
**opts,
):
- """
- GxB_Matrix_pack_xxx.
+ """GxB_Matrix_pack_xxx.
- `pack_any` is like `import_any` except it "packs" data into an
+ ``pack_any`` is like ``import_any`` except it "packs" data into an
existing Matrix. This is the opposite of ``unpack()``
- See `Matrix.ss.import_any` documentation for more details.
+ See ``Matrix.ss.import_any`` documentation for more details.
"""
return self._import_any(
values=values,
@@ -3664,8 +3650,10 @@ def _import_any(
def unpack_hyperhash(self, *, compute=False, name=None, **opts):
"""Unpacks the hyper_hash of a hypersparse matrix if possible.
- Will return None if the matrix is not hypersparse or if the hash is not computed.
- Use ``compute=True`` to compute the hyper_hash if the input is hypersparse.
+ Will return None if the matrix is not hypersparse, if the hash is not computed,
+ or if the hash is not needed. Use ``compute=True`` to try to compute the hyper_hash
+ if the input is hypersparse. The hyper_hash is optional in SuiteSparse:GraphBLAS,
+ so it may not be computed even with ``compute=True``.
Use ``pack_hyperhash`` to move a hyper_hash matrix that was previously unpacked
back into a matrix.
@@ -3701,12 +3689,13 @@ def head(self, n=10, dtype=None, *, sort=False):
def scan(self, op=monoid.plus, order="rowwise", *, name=None, **opts):
"""Perform a prefix scan across rows (default) or columns with the given monoid.
- For example, use `monoid.plus` (the default) to perform a cumulative sum,
- and `monoid.times` for cumulative product. Works with any monoid.
+ For example, use ``monoid.plus`` (the default) to perform a cumulative sum,
+ and ``monoid.times`` for cumulative product. Works with any monoid.
Returns
-------
Matrix
+
"""
order = get_order(order)
parent = self._parent
@@ -3714,51 +3703,6 @@ def scan(self, op=monoid.plus, order="rowwise", *, name=None, **opts):
parent = parent.T
return prefix_scan(parent, op, name=name, within="scan", **opts)
- def scan_columnwise(self, op=monoid.plus, *, name=None, **opts):
- """Perform a prefix scan across columns with the given monoid.
-
- .. deprecated:: 2022.11.1
- `Matrix.ss.scan_columnwise` will be removed in a future release.
- Use `Matrix.ss.scan(order="columnwise")` instead.
- Will be removed in version 2023.7.0 or later
-
- For example, use `monoid.plus` (the default) to perform a cumulative sum,
- and `monoid.times` for cumulative product. Works with any monoid.
-
- Returns
- -------
- Matrix
- """
- warnings.warn(
- "`Matrix.ss.scan_columnwise` is deprecated; "
- 'please use `Matrix.ss.scan(order="columnwise")` instead.',
- DeprecationWarning,
- stacklevel=2,
- )
- return prefix_scan(self._parent.T, op, name=name, within="scan_columnwise", **opts)
-
- def scan_rowwise(self, op=monoid.plus, *, name=None, **opts):
- """Perform a prefix scan across rows with the given monoid.
-
- .. deprecated:: 2022.11.1
- `Matrix.ss.scan_rowwise` will be removed in a future release.
- Use `Matrix.ss.scan` instead.
- Will be removed in version 2023.7.0 or later
-
- For example, use `monoid.plus` (the default) to perform a cumulative sum,
- and `monoid.times` for cumulative product. Works with any monoid.
-
- Returns
- -------
- Matrix
- """
- warnings.warn(
- "`Matrix.ss.scan_rowwise` is deprecated; please use `Matrix.ss.scan` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return prefix_scan(self._parent, op, name=name, within="scan_rowwise", **opts)
-
def flatten(self, order="rowwise", *, name=None, **opts):
"""Return a copy of the Matrix collapsed into a Vector.
@@ -3780,6 +3724,7 @@ def flatten(self, order="rowwise", *, name=None, **opts):
See Also
--------
Vector.ss.reshape : copy a Vector to a Matrix.
+
"""
rv = self.reshape(-1, 1, order=order, name=name, **opts)
return rv._as_vector()
@@ -3816,6 +3761,7 @@ def reshape(self, nrows, ncols=None, order="rowwise", *, inplace=False, name=Non
--------
Matrix.ss.flatten : flatten a Matrix into a Vector.
Vector.ss.reshape : copy a Vector to a Matrix.
+
"""
from ..matrix import Matrix
@@ -3870,6 +3816,7 @@ def selectk(self, how, k, order="rowwise", *, name=None):
The number of elements to choose from each row
**THIS API IS EXPERIMENTAL AND MAY CHANGE**
+
"""
# TODO: largest, smallest, random_weighted
order = get_order(order)
@@ -3900,99 +3847,6 @@ def selectk(self, how, k, order="rowwise", *, name=None):
k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name
)
- def selectk_rowwise(self, how, k, *, name=None): # pragma: no cover (deprecated)
- """Select (up to) k elements from each row.
-
- .. deprecated:: 2022.11.1
- `Matrix.ss.selectk_rowwise` will be removed in a future release.
- Use `Matrix.ss.selectk` instead.
- Will be removed in version 2023.7.0 or later
-
- Parameters
- ----------
- how : str
- "random": choose k elements with equal probability
- "first": choose the first k elements
- "last": choose the last k elements
- k : int
- The number of elements to choose from each row
-
- **THIS API IS EXPERIMENTAL AND MAY CHANGE**
- """
- warnings.warn(
- "`Matrix.ss.selectk_rowwise` is deprecated; please use `Matrix.ss.selectk` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- how = how.lower()
- fmt = "hypercsr"
- indices = "col_indices"
- sort_axis = "sorted_cols"
- if how == "random":
- choose_func = choose_random
- is_random = True
- do_sort = False
- elif how == "first":
- choose_func = choose_first
- is_random = False
- do_sort = True
- elif how == "last":
- choose_func = choose_last
- is_random = False
- do_sort = True
- else:
- raise ValueError('`how` argument must be one of: "random", "first", "last"')
- return self._select_random(
- k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name
- )
-
- def selectk_columnwise(self, how, k, *, name=None): # pragma: no cover (deprecated)
- """Select (up to) k elements from each column.
-
- .. deprecated:: 2022.11.1
- `Matrix.ss.selectk_columnwise` will be removed in a future release.
- Use `Matrix.ss.selectk(order="columnwise")` instead.
- Will be removed in version 2023.7.0 or later
-
- Parameters
- ----------
- how : str
- - "random": choose elements with equal probability
- - "first": choose the first k elements
- - "last": choose the last k elements
- k : int
- The number of elements to choose from each column
-
- **THIS API IS EXPERIMENTAL AND MAY CHANGE**
- """
- warnings.warn(
- "`Matrix.ss.selectk_columnwise` is deprecated; "
- 'please use `Matrix.ss.selectk(order="columnwise")` instead.',
- DeprecationWarning,
- stacklevel=2,
- )
- how = how.lower()
- fmt = "hypercsc"
- indices = "row_indices"
- sort_axis = "sorted_rows"
- if how == "random":
- choose_func = choose_random
- is_random = True
- do_sort = False
- elif how == "first":
- choose_func = choose_first
- is_random = False
- do_sort = True
- elif how == "last":
- choose_func = choose_last
- is_random = False
- do_sort = True
- else:
- raise ValueError('`how` argument must be one of: "random", "first", "last"')
- return self._select_random(
- k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name
- )
-
def _select_random(self, k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name):
if k < 0:
raise ValueError("negative k is not allowed")
@@ -4057,92 +3911,6 @@ def compactify(
indices = "row_indices"
return self._compactify(how, reverse, asindex, dimname, k, fmt, indices, name)
- def compactify_rowwise(
- self, how="first", ncols=None, *, reverse=False, asindex=False, name=None
- ):
- """Shift all values to the left so all values in a row are contiguous.
-
- This returns a new Matrix.
-
- Parameters
- ----------
- how : {"first", "last", "smallest", "largest", "random"}, optional
- How to compress the values:
- - first : take the values furthest to the left
- - last : take the values furthest to the right
- - smallest : take the smallest values (if tied, may take any)
- - largest : take the largest values (if tied, may take any)
- - random : take values randomly with equal probability and without replacement
- Chosen values may not be ordered randomly
- reverse : bool, default False
- Reverse the values in each row when True
- asindex : bool, default False
- Return the column index of the value when True. If there are ties for
- "smallest" and "largest", then any valid index may be returned.
- ncols : int, optional
- The number of columns of the returned Matrix. If not specified, then
- the Matrix will be "compacted" to the smallest ncols that doesn't lose
- values.
-
- **THIS API IS EXPERIMENTAL AND MAY CHANGE**
-
- See Also
- --------
- Matrix.ss.sort
- """
- warnings.warn(
- "`Matrix.ss.compactify_rowwise` is deprecated; "
- "please use `Matrix.ss.compactify` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return self._compactify(
- how, reverse, asindex, "ncols", ncols, "hypercsr", "col_indices", name
- )
-
- def compactify_columnwise(
- self, how="first", nrows=None, *, reverse=False, asindex=False, name=None
- ):
- """Shift all values to the top so all values in a column are contiguous.
-
- This returns a new Matrix.
-
- Parameters
- ----------
- how : {"first", "last", "smallest", "largest", "random"}, optional
- How to compress the values:
- - first : take the values furthest to the top
- - last : take the values furthest to the bottom
- - smallest : take the smallest values (if tied, may take any)
- - largest : take the largest values (if tied, may take any)
- - random : take values randomly with equal probability and without replacement
- Chosen values may not be ordered randomly
- reverse : bool, default False
- Reverse the values in each column when True
- asindex : bool, default False
- Return the row index of the value when True. If there are ties for
- "smallest" and "largest", then any valid index may be returned.
- nrows : int, optional
- The number of rows of the returned Matrix. If not specified, then
- the Matrix will be "compacted" to the smallest nrows that doesn't lose
- values.
-
- **THIS API IS EXPERIMENTAL AND MAY CHANGE**
-
- See Also
- --------
- Matrix.ss.sort
- """
- warnings.warn(
- "`Matrix.ss.compactify_columnwise` is deprecated; "
- 'please use `Matrix.ss.compactify(order="columnwise")` instead.',
- DeprecationWarning,
- stacklevel=2,
- )
- return self._compactify(
- how, reverse, asindex, "nrows", nrows, "hypercsc", "row_indices", name
- )
-
def _compactify(self, how, reverse, asindex, nkey, nval, fmt, indices_name, name):
how = how.lower()
if how not in {"first", "last", "smallest", "largest", "random"}:
@@ -4216,23 +3984,23 @@ def sort(self, op=binary.lt, order="rowwise", *, values=True, permutation=True,
"""GxB_Matrix_sort to sort values along the rows (default) or columns of the Matrix.
Sorting moves all the elements to the left (if rowwise) or top (if columnwise) just
- like `compactify`. The returned matrices will be the same shape as the input Matrix.
+ like ``compactify``. The returned matrices will be the same shape as the input Matrix.
Parameters
----------
op : :class:`~graphblas.core.operator.BinaryOp`, optional
Binary operator with a bool return type used to sort the values.
- For example, `binary.lt` (the default) sorts the smallest elements first.
+ For example, ``binary.lt`` (the default) sorts the smallest elements first.
Ties are broken according to indices (smaller first).
order : {"rowwise", "columnwise"}, optional
Whether to sort rowwise or columnwise. Rowwise shifts all values to the left,
and columnwise shifts all values to the top. The default is "rowwise".
values : bool, default=True
- Whether to return values; will return `None` for values if `False`.
+ Whether to return values; will return ``None`` for values if ``False``.
permutation : bool, default=True
Whether to compute the permutation Matrix that has the original column
indices (if rowwise) or row indices (if columnwise) of the sorted values.
- Will return None if `False`.
+ Will return None if ``False``.
nthreads : int, optional
The maximum number of threads to use for this operation.
None, 0 or negative nthreads means to use the default number of threads.
@@ -4245,6 +4013,7 @@ def sort(self, op=binary.lt, order="rowwise", *, values=True, permutation=True,
See Also
--------
Matrix.ss.compactify
+
"""
from ..matrix import Matrix
@@ -4301,16 +4070,32 @@ def serialize(self, compression="default", level=None, **opts):
None, 0 or negative nthreads means to use the default number of threads.
For best performance, this function returns a numpy array with uint8 dtype.
- Use `Matrix.ss.deserialize(blob)` to create a Matrix from the result of serialization
+ Use ``Matrix.ss.deserialize(blob)`` to create a Matrix from the result of serialization
This method is intended to support all serialization options from SuiteSparse:GraphBLAS.
*Warning*: Behavior of serializing UDTs is experimental and may change in a future release.
+
"""
desc = get_descriptor(compression=compression, compression_level=level, **opts)
blob_handle = ffi_new("void**")
blob_size_handle = ffi_new("GrB_Index*")
parent = self._parent
+ if parent.dtype._is_udt and hasattr(lib, "GrB_Type_get_String"):
+ # Get the name from the dtype and set it to the name of the matrix so we can
+ # recreate the UDT. This is a bit hacky and we should restore the original name.
+ # First get the size of name.
+ dtype_size = ffi_new("size_t*")
+ status = lib.GrB_Type_get_SIZE(parent.dtype.gb_obj[0], dtype_size, lib.GrB_NAME)
+ check_status_carg(status, "Type", parent.dtype.gb_obj[0])
+ # Then get the name
+ dtype_char = ffi_new(f"char[{dtype_size[0]}]")
+ status = lib.GrB_Type_get_String(parent.dtype.gb_obj[0], dtype_char, lib.GrB_NAME)
+ check_status_carg(status, "Type", parent.dtype.gb_obj[0])
+ # Then set the name
+ status = lib.GrB_Matrix_set_String(parent._carg, dtype_char, lib.GrB_NAME)
+ check_status_carg(status, "Matrix", parent._carg)
+
check_status(
lib.GxB_Matrix_serialize(
blob_handle,
@@ -4327,7 +4112,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts):
"""Deserialize a Matrix from bytes, buffer, or numpy array using GxB_Matrix_deserialize.
The data should have been previously serialized with a compatible version of
- SuiteSparse:GraphBLAS. For example, from the result of `data = matrix.ss.serialize()`.
+ SuiteSparse:GraphBLAS. For example, from the result of ``data = matrix.ss.serialize()``.
Examples
--------
@@ -4345,14 +4130,15 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts):
nthreads : int, optional
The maximum number of threads to use when deserializing.
None, 0 or negative nthreads means to use the default number of threads.
+
"""
if isinstance(data, np.ndarray):
data = ints_to_numpy_buffer(data, np.uint8)
else:
data = np.frombuffer(data, np.uint8)
data_obj = ffi.from_buffer("void*", data)
- # Get the dtype name first
if dtype is None:
+ # Get the dtype name first (for non-UDTs)
cname = ffi_new(f"char[{lib.GxB_MAX_NAME_LEN}]")
info = lib.GxB_deserialize_type_name(
cname,
@@ -4362,6 +4148,22 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts):
if info != lib.GrB_SUCCESS:
raise _error_code_lookup[info]("Matrix deserialize failed to get the dtype name")
dtype_name = b"".join(itertools.takewhile(b"\x00".__ne__, cname)).decode()
+ if not dtype_name and hasattr(lib, "GxB_Serialized_get_String"):
+ # Handle UDTs. First get the size of name
+ dtype_size = ffi_new("size_t*")
+ info = lib.GxB_Serialized_get_SIZE(data_obj, dtype_size, lib.GrB_NAME, data.nbytes)
+ if info != lib.GrB_SUCCESS:
+ raise _error_code_lookup[info](
+ "Matrix deserialize failed to get the size of name"
+ )
+ # Then get the name
+ dtype_char = ffi_new(f"char[{dtype_size[0]}]")
+ info = lib.GxB_Serialized_get_String(
+ data_obj, dtype_char, lib.GrB_NAME, data.nbytes
+ )
+ if info != lib.GrB_SUCCESS:
+ raise _error_code_lookup[info]("Matrix deserialize failed to get the name")
+ dtype_name = ffi.string(dtype_char).decode()
dtype = _string_to_dtype(dtype_name)
else:
dtype = lookup_dtype(dtype)
diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py
new file mode 100644
index 000000000..3ba135eee
--- /dev/null
+++ b/graphblas/core/ss/select.py
@@ -0,0 +1,89 @@
+from ... import backend, indexunary
+from ...dtypes import BOOL, lookup_dtype
+from .. import ffi
+from ..operator.base import TypedOpBase
+from ..operator.select import SelectOp, TypedUserSelectOp
+from . import _IS_SSGB7
+
+ffi_new = ffi.new
+
+
+class TypedJitSelectOp(TypedOpBase):
+ __slots__ = "_jit_c_definition"
+ opclass = "SelectOp"
+
+ def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None):
+ super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2)
+ self._jit_c_definition = jit_c_definition
+
+ @property
+ def jit_c_definition(self):
+ return self._jit_c_definition
+
+ thunk_type = TypedUserSelectOp.thunk_type
+ __call__ = TypedUserSelectOp.__call__
+
+
+def register_new(name, jit_c_definition, input_type, thunk_type):
+ """Register a new SelectOp using the SuiteSparse:GraphBLAS JIT compiler.
+
+ This creates a SelectOp by compiling the C string definition of the function.
+ It requires a shell call to a C compiler. The resulting operator will be as
+ fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the
+ overhead of additional function calls as when using ``gb.select.register_new``.
+
+ This is an advanced feature that requires a C compiler and proper configuration.
+ Configuration is handled by ``gb.ss.config``; see its docstring for details.
+ By default, the JIT caches results in ``~/.SuiteSparse/``. For more information,
+ see the SuiteSparse:GraphBLAS user guide.
+
+ Only one type signature may be registered at a time, but repeated calls using
+ the same name with different input types is allowed.
+
+ This will also create an IndexUnary operator under ``gb.indexunary.ss``
+
+ Parameters
+ ----------
+ name : str
+ The name of the operator. This will show up as ``gb.select.ss.{name}``.
+ The name may contain periods, ".", which will result in nested objects
+ such as ``gb.select.ss.x.y.z`` for name ``"x.y.z"``.
+ jit_c_definition : str
+ The C definition as a string of the user-defined function. For example:
+ ``"void woot (bool *z, const int32_t *x, GrB_Index i, GrB_Index j, int32_t *y) "``
+ ``"{ (*z) = ((*x) + i + j == (*y)) ; }"``
+ input_type : dtype
+ The dtype of the operand of the select operator.
+ thunk_type : dtype
+ The dtype of the thunk of the select operator.
+
+ Returns
+ -------
+ SelectOp
+
+ See Also
+ --------
+ gb.select.register_new
+ gb.select.register_anonymous
+ gb.indexunary.ss.register_new
+
+ """
+ if backend != "suitesparse": # pragma: no cover (safety)
+ raise RuntimeError(
+ "`gb.select.ss.register_new` invalid when not using 'suitesparse' backend"
+ )
+ if _IS_SSGB7:
+ # JIT was introduced in SuiteSparse:GraphBLAS 8.0
+ import suitesparse_graphblas as ssgb
+
+ raise RuntimeError(
+ "JIT was added to SuiteSparse:GraphBLAS in version 8; "
+ f"current version is {ssgb.__version__}"
+ )
+ input_type = lookup_dtype(input_type)
+ thunk_type = lookup_dtype(thunk_type)
+ name = name if name.startswith("ss.") else f"ss.{name}"
+ # Register to both `gb.indexunary.ss` and `gb.select.ss.`
+ indexunary.ss.register_new(name, jit_c_definition, input_type, thunk_type, BOOL)
+ module, funcname = SelectOp._remove_nesting(name, strict=False)
+ return getattr(module, funcname)
diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py
new file mode 100644
index 000000000..0b7ced3c8
--- /dev/null
+++ b/graphblas/core/ss/unary.py
@@ -0,0 +1,109 @@
+from ... import backend
+from ...dtypes import lookup_dtype
+from ...exceptions import check_status_carg
+from .. import NULL, ffi, lib
+from ..operator.base import TypedOpBase
+from ..operator.unary import TypedUserUnaryOp, UnaryOp
+from . import _IS_SSGB7
+
+ffi_new = ffi.new
+
+
+class TypedJitUnaryOp(TypedOpBase):
+ __slots__ = "_jit_c_definition"
+ opclass = "UnaryOp"
+
+ def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition):
+ super().__init__(parent, name, type_, return_type, gb_obj, name)
+ self._jit_c_definition = jit_c_definition
+
+ @property
+ def jit_c_definition(self):
+ return self._jit_c_definition
+
+ __call__ = TypedUserUnaryOp.__call__
+
+
+def register_new(name, jit_c_definition, input_type, ret_type):
+ """Register a new UnaryOp using the SuiteSparse:GraphBLAS JIT compiler.
+
+ This creates a UnaryOp by compiling the C string definition of the function.
+ It requires a shell call to a C compiler. The resulting operator will be as
+ fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the
+ overhead of additional function calls as when using ``gb.unary.register_new``.
+
+ This is an advanced feature that requires a C compiler and proper configuration.
+ Configuration is handled by ``gb.ss.config``; see its docstring for details.
+ By default, the JIT caches results in ``~/.SuiteSparse/``. For more information,
+ see the SuiteSparse:GraphBLAS user guide.
+
+ Only one type signature may be registered at a time, but repeated calls using
+ the same name with different input types is allowed.
+
+ Parameters
+ ----------
+ name : str
+ The name of the operator. This will show up as ``gb.unary.ss.{name}``.
+ The name may contain periods, ".", which will result in nested objects
+ such as ``gb.unary.ss.x.y.z`` for name ``"x.y.z"``.
+ jit_c_definition : str
+ The C definition as a string of the user-defined function. For example:
+ ``"void square (float *z, float *x) { (*z) = (*x) * (*x) ; } ;"``
+ input_type : dtype
+ The dtype of the operand of the unary operator.
+ ret_type : dtype
+ The dtype of the result of the unary operator.
+
+ Returns
+ -------
+ UnaryOp
+
+ See Also
+ --------
+ gb.unary.register_new
+ gb.unary.register_anonymous
+ gb.binary.ss.register_new
+
+ """
+ if backend != "suitesparse": # pragma: no cover (safety)
+ raise RuntimeError(
+ "`gb.unary.ss.register_new` invalid when not using 'suitesparse' backend"
+ )
+ if _IS_SSGB7:
+ # JIT was introduced in SuiteSparse:GraphBLAS 8.0
+ import suitesparse_graphblas as ssgb
+
+ raise RuntimeError(
+ "JIT was added to SuiteSparse:GraphBLAS in version 8; "
+ f"current version is {ssgb.__version__}"
+ )
+ input_type = lookup_dtype(input_type)
+ ret_type = lookup_dtype(ret_type)
+ name = name if name.startswith("ss.") else f"ss.{name}"
+ module, funcname = UnaryOp._remove_nesting(name, strict=False)
+ if hasattr(module, funcname):
+ rv = getattr(module, funcname)
+ if not isinstance(rv, UnaryOp):
+ UnaryOp._remove_nesting(name)
+ if input_type in rv.types or rv._udt_types is not None and input_type in rv._udt_types:
+ raise TypeError(f"UnaryOp gb.unary.{name} already defined for {input_type} input type")
+ else:
+ # We use `is_udt=True` to make dtype handling flexible and explicit.
+ rv = UnaryOp(name, is_udt=True)
+ gb_obj = ffi_new("GrB_UnaryOp*")
+ check_status_carg(
+ lib.GxB_UnaryOp_new(
+ gb_obj,
+ NULL,
+ ret_type._carg,
+ input_type._carg,
+ ffi_new("char[]", funcname.encode()),
+ ffi_new("char[]", jit_c_definition.encode()),
+ ),
+ "UnaryOp",
+ gb_obj[0],
+ )
+ op = TypedJitUnaryOp(rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition)
+ rv._add(op, is_jit=True)
+ setattr(module, funcname, rv)
+ return rv
diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py
index 2b1e8bf05..fdde7eb92 100644
--- a/graphblas/core/ss/vector.py
+++ b/graphblas/core/ss/vector.py
@@ -6,10 +6,11 @@
import graphblas as gb
from ... import binary, monoid
-from ...dtypes import _INDEX, INT64, UINT64, _string_to_dtype, lookup_dtype
+from ...dtypes import _INDEX, INT64, UINT64, lookup_dtype
from ...exceptions import _error_code_lookup, check_status, check_status_carg
from .. import NULL, ffi, lib
from ..base import call
+from ..dtypes import _string_to_dtype
from ..operator import get_typed_op
from ..scalar import Scalar, _as_scalar
from ..utils import (
@@ -42,7 +43,7 @@ def head(vector, n=10, dtype=None, *, sort=False):
dtype = vector.dtype
else:
dtype = lookup_dtype(dtype)
- indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n))
+ indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n), strict=True)
return np.array(indices, np.uint64), np.array(vals, dtype.np_type)
@@ -144,8 +145,7 @@ def format(self):
return format
def build_diag(self, matrix, k=0, **opts):
- """
- GxB_Vector_diag.
+ """GxB_Vector_diag.
Extract a diagonal from a Matrix or TransposedMatrix into a Vector.
Existing entries in the Vector are discarded.
@@ -155,8 +155,8 @@ def build_diag(self, matrix, k=0, **opts):
matrix : Matrix or TransposedMatrix
Extract a diagonal from this matrix.
k : int, default 0
- Diagonal in question. Use `k>0` for diagonals above the main diagonal,
- and `k<0` for diagonals below the main diagonal.
+ Diagonal in question. Use ``k>0`` for diagonals above the main diagonal,
+ and ``k<0`` for diagonals below the main diagonal.
See Also
--------
@@ -182,15 +182,14 @@ def build_diag(self, matrix, k=0, **opts):
)
def split(self, chunks, *, name=None, **opts):
- """
- GxB_Matrix_split.
+ """GxB_Matrix_split.
- Split a Vector into a 1D array of sub-vectors according to `chunks`.
+ Split a Vector into a 1D array of sub-vectors according to ``chunks``.
This performs the opposite operation as ``concat``.
- `chunks` is short for "chunksizes" and indicates the chunk sizes.
- `chunks` may be a single integer, or a tuple or list. Example chunks:
+ ``chunks`` is short for "chunksizes" and indicates the chunk sizes.
+ ``chunks`` may be a single integer, or a tuple or list. Example chunks:
- ``chunks=10``
- Split vector into chunks of size 10 (the last chunk may be smaller).
@@ -201,6 +200,7 @@ def split(self, chunks, *, name=None, **opts):
--------
Vector.ss.concat
graphblas.ss.concat
+
"""
from ..vector import Vector
@@ -248,12 +248,11 @@ def _concat(self, tiles, m, opts):
)
def concat(self, tiles, **opts):
- """
- GxB_Matrix_concat.
+ """GxB_Matrix_concat.
Concatenate a 1D list of Vector objects into the current Vector.
Any existing values in the current Vector will be discarded.
- To concatenate into a new Vector, use `graphblas.ss.concat`.
+ To concatenate into a new Vector, use ``graphblas.ss.concat``.
This performs the opposite operation as ``split``.
@@ -261,13 +260,13 @@ def concat(self, tiles, **opts):
--------
Vector.ss.split
graphblas.ss.concat
+
"""
tiles, m, n, is_matrix = _concat_mn(tiles, is_matrix=False)
self._concat(tiles, m, opts)
def build_scalar(self, indices, value):
- """
- GxB_Vector_build_Scalar.
+ """GxB_Vector_build_Scalar.
Like ``build``, but uses a scalar for all the values.
@@ -275,6 +274,7 @@ def build_scalar(self, indices, value):
--------
Vector.build
Vector.from_coo
+
"""
indices = ints_to_numpy_buffer(indices, np.uint64, name="indices")
scalar = _as_scalar(value, self._parent.dtype, is_cscalar=False) # pragma: is_grbscalar
@@ -409,14 +409,13 @@ def iteritems(self, seek=0):
lib.GxB_Iterator_free(it_ptr)
def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts):
- """
- GxB_Vextor_export_xxx.
+ """GxB_Vextor_export_xxx.
Parameters
----------
format : str or None, default None
- If `format` is not specified, this method exports in the currently stored format.
- To control the export format, set `format` to one of:
+ If ``format`` is not specified, this method exports in the currently stored format.
+ To control the export format, set ``format`` to one of:
- "sparse"
- "bitmap"
- "full"
@@ -434,7 +433,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **
Returns
-------
- dict; keys depend on `format` and `raw` arguments (see below).
+ dict; keys depend on ``format`` and ``raw`` arguments (see below).
See Also
--------
@@ -442,7 +441,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **
Vector.ss.import_any
Return values
- - Note: for `raw=True`, arrays may be larger than specified.
+ - Note: for ``raw=True``, arrays may be larger than specified.
- "sparse" format
- indices : ndarray(dtype=uint64, size=nvals)
- values : ndarray(size=nvals)
@@ -467,6 +466,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **
>>> pieces = v.ss.export()
>>> v2 = Vector.ss.import_any(**pieces)
+
"""
return self._export(
format=format,
@@ -478,13 +478,12 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **
)
def unpack(self, format=None, *, sort=False, raw=False, **opts):
- """
- GxB_Vector_unpack_xxx.
+ """GxB_Vector_unpack_xxx.
- `unpack` is like `export`, except that the Vector remains valid but empty.
- `pack_*` methods are the opposite of `unpack`.
+ ``unpack`` is like ``export``, except that the Vector remains valid but empty.
+ ``pack_*`` methods are the opposite of ``unpack``.
- See `Vector.ss.export` documentation for more details.
+ See ``Vector.ss.export`` documentation for more details.
"""
return self._export(
format=format, sort=sort, give_ownership=True, raw=raw, method="unpack", opts=opts
@@ -654,11 +653,10 @@ def import_any(
nvals=None, # optional
**opts,
):
- """
- GxB_Vector_import_xxx.
+ """GxB_Vector_import_xxx.
Dispatch to appropriate import method inferred from inputs.
- See the other import functions and `Vector.ss.export`` for details.
+ See the other import functions and ``Vector.ss.export`` for details.
Returns
-------
@@ -678,6 +676,7 @@ def import_any(
>>> pieces = v.ss.export()
>>> v2 = Vector.ss.import_any(**pieces)
+
"""
return cls._import_any(
values=values,
@@ -721,13 +720,12 @@ def pack_any(
name=None,
**opts,
):
- """
- GxB_Vector_pack_xxx.
+ """GxB_Vector_pack_xxx.
- `pack_any` is like `import_any` except it "packs" data into an
+ ``pack_any`` is like ``import_any`` except it "packs" data into an
existing Vector. This is the opposite of ``unpack()``
- See `Vector.ss.import_any` documentation for more details.
+ See ``Vector.ss.import_any`` documentation for more details.
"""
return self._import_any(
values=values,
@@ -843,8 +841,7 @@ def import_sparse(
name=None,
**opts,
):
- """
- GxB_Vector_import_CSC.
+ """GxB_Vector_import_CSC.
Create a new Vector from sparse input.
@@ -858,7 +855,7 @@ def import_sparse(
If not specified, will be set to ``len(values)``.
is_iso : bool, default False
Is the Vector iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
sorted_index : bool, default False
Indicate whether the values in "col_indices" are sorted.
take_ownership : bool, default False
@@ -875,7 +872,7 @@ def import_sparse(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Vector.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "sparse" or None. This is included to be compatible with
the dict returned from exporting.
@@ -885,6 +882,7 @@ def import_sparse(
Returns
-------
Vector
+
"""
return cls._import_sparse(
size=size,
@@ -919,13 +917,12 @@ def pack_sparse(
name=None,
**opts,
):
- """
- GxB_Vector_pack_CSC.
+ """GxB_Vector_pack_CSC.
- `pack_sparse` is like `import_sparse` except it "packs" data into an
+ ``pack_sparse`` is like ``import_sparse`` except it "packs" data into an
existing Vector. This is the opposite of ``unpack("sparse")``
- See `Vector.ss.import_sparse` documentation for more details.
+ See ``Vector.ss.import_sparse`` documentation for more details.
"""
return self._import_sparse(
indices=indices,
@@ -1028,8 +1025,7 @@ def import_bitmap(
name=None,
**opts,
):
- """
- GxB_Vector_import_Bitmap.
+ """GxB_Vector_import_Bitmap.
Create a new Vector from values and bitmap (as mask) arrays.
@@ -1045,7 +1041,7 @@ def import_bitmap(
If not specified, it will be set to the size of values.
is_iso : bool, default False
Is the Vector iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
@@ -1060,7 +1056,7 @@ def import_bitmap(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Vector.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "bitmap" or None. This is included to be compatible with
the dict returned from exporting.
@@ -1070,6 +1066,7 @@ def import_bitmap(
Returns
-------
Vector
+
"""
return cls._import_bitmap(
bitmap=bitmap,
@@ -1102,13 +1099,12 @@ def pack_bitmap(
name=None,
**opts,
):
- """
- GxB_Vector_pack_Bitmap.
+ """GxB_Vector_pack_Bitmap.
- `pack_bitmap` is like `import_bitmap` except it "packs" data into an
+ ``pack_bitmap`` is like ``import_bitmap`` except it "packs" data into an
existing Vector. This is the opposite of ``unpack("bitmap")``
- See `Vector.ss.import_bitmap` documentation for more details.
+ See ``Vector.ss.import_bitmap`` documentation for more details.
"""
return self._import_bitmap(
bitmap=bitmap,
@@ -1213,8 +1209,7 @@ def import_full(
name=None,
**opts,
):
- """
- GxB_Vector_import_Full.
+ """GxB_Vector_import_Full.
Create a new Vector from values.
@@ -1226,7 +1221,7 @@ def import_full(
If not specified, it will be set to the size of values.
is_iso : bool, default False
Is the Vector iso-valued (meaning all the same value)?
- If true, then `values` should be a length 1 array.
+ If true, then ``values`` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
@@ -1241,7 +1236,7 @@ def import_full(
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Vector.
- If not specified, this will be inferred from `values`.
+ If not specified, this will be inferred from ``values``.
format : str, optional
Must be "full" or None. This is included to be compatible with
the dict returned from exporting.
@@ -1251,6 +1246,7 @@ def import_full(
Returns
-------
Vector
+
"""
return cls._import_full(
values=values,
@@ -1279,13 +1275,12 @@ def pack_full(
name=None,
**opts,
):
- """
- GxB_Vector_pack_Full.
+ """GxB_Vector_pack_Full.
- `pack_full` is like `import_full` except it "packs" data into an
+ ``pack_full`` is like ``import_full`` except it "packs" data into an
existing Vector. This is the opposite of ``unpack("full")``
- See `Vector.ss.import_full` documentation for more details.
+ See ``Vector.ss.import_full`` documentation for more details.
"""
return self._import_full(
values=values,
@@ -1364,12 +1359,13 @@ def head(self, n=10, dtype=None, *, sort=False):
def scan(self, op=monoid.plus, *, name=None, **opts):
"""Perform a prefix scan with the given monoid.
- For example, use `monoid.plus` (the default) to perform a cumulative sum,
- and `monoid.times` for cumulative product. Works with any monoid.
+ For example, use ``monoid.plus`` (the default) to perform a cumulative sum,
+ and ``monoid.times`` for cumulative product. Works with any monoid.
Returns
-------
Scalar
+
"""
return prefix_scan(self._parent, op, name=name, within="scan", **opts)
@@ -1400,6 +1396,7 @@ def reshape(self, nrows, ncols=None, order="rowwise", *, name=None, **opts):
See Also
--------
Matrix.ss.flatten : flatten a Matrix into a Vector.
+
"""
return self._parent._as_matrix().ss.reshape(nrows, ncols, order, name=name, **opts)
@@ -1419,6 +1416,7 @@ def selectk(self, how, k, *, name=None):
The number of elements to choose
**THIS API IS EXPERIMENTAL AND MAY CHANGE**
+
"""
how = how.lower()
if k < 0:
@@ -1561,20 +1559,20 @@ def compactify(self, how="first", size=None, *, reverse=False, asindex=False, na
def sort(self, op=binary.lt, *, values=True, permutation=True, **opts):
"""GxB_Vector_sort to sort values of the Vector.
- Sorting moves all the elements to the left just like `compactify`.
+ Sorting moves all the elements to the left just like ``compactify``.
The returned vectors will be the same size as the input Vector.
Parameters
----------
op : :class:`~graphblas.core.operator.BinaryOp`, optional
Binary operator with a bool return type used to sort the values.
- For example, `binary.lt` (the default) sorts the smallest elements first.
+ For example, ``binary.lt`` (the default) sorts the smallest elements first.
Ties are broken according to indices (smaller first).
values : bool, default=True
- Whether to return values; will return `None` for values if `False`.
+ Whether to return values; will return ``None`` for values if ``False``.
permutation : bool, default=True
Whether to compute the permutation Vector that has the original indices of the
- sorted values. Will return None if `False`.
+ sorted values. Will return None if ``False``.
nthreads : int, optional
The maximum number of threads to use for this operation.
None, 0 or negative nthreads means to use the default number of threads.
@@ -1587,6 +1585,7 @@ def sort(self, op=binary.lt, *, values=True, permutation=True, **opts):
See Also
--------
Vector.ss.compactify
+
"""
from ..vector import Vector
@@ -1642,16 +1641,32 @@ def serialize(self, compression="default", level=None, **opts):
None, 0 or negative nthreads means to use the default number of threads.
For best performance, this function returns a numpy array with uint8 dtype.
- Use `Vector.ss.deserialize(blob)` to create a Vector from the result of serialization·
+ Use ``Vector.ss.deserialize(blob)`` to create a Vector from the result of serialization·
This method is intended to support all serialization options from SuiteSparse:GraphBLAS.
*Warning*: Behavior of serializing UDTs is experimental and may change in a future release.
+
"""
desc = get_descriptor(compression=compression, compression_level=level, **opts)
blob_handle = ffi_new("void**")
blob_size_handle = ffi_new("GrB_Index*")
parent = self._parent
+ if parent.dtype._is_udt and hasattr(lib, "GrB_Type_get_String"):
+ # Get the name from the dtype and set it to the name of the vector so we can
+ # recreate the UDT. This is a bit hacky and we should restore the original name.
+ # First get the size of name.
+ dtype_size = ffi_new("size_t*")
+ status = lib.GrB_Type_get_SIZE(parent.dtype.gb_obj[0], dtype_size, lib.GrB_NAME)
+ check_status_carg(status, "Type", parent.dtype.gb_obj[0])
+ # Then get the name
+ dtype_char = ffi_new(f"char[{dtype_size[0]}]")
+ status = lib.GrB_Type_get_String(parent.dtype.gb_obj[0], dtype_char, lib.GrB_NAME)
+ check_status_carg(status, "Type", parent.dtype.gb_obj[0])
+ # Then set the name
+ status = lib.GrB_Vector_set_String(parent._carg, dtype_char, lib.GrB_NAME)
+ check_status_carg(status, "Vector", parent._carg)
+
check_status(
lib.GxB_Vector_serialize(
blob_handle,
@@ -1668,7 +1683,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts):
"""Deserialize a Vector from bytes, buffer, or numpy array using GxB_Vector_deserialize.
The data should have been previously serialized with a compatible version of
- SuiteSparse:GraphBLAS. For example, from the result of `data = vector.ss.serialize()`.
+ SuiteSparse:GraphBLAS. For example, from the result of ``data = vector.ss.serialize()``.
Examples
--------
@@ -1686,6 +1701,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts):
nthreads : int, optional
The maximum number of threads to use when deserializing.
None, 0 or negative nthreads means to use the default number of threads.
+
"""
if isinstance(data, np.ndarray):
data = ints_to_numpy_buffer(data, np.uint8)
@@ -1693,7 +1709,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts):
data = np.frombuffer(data, np.uint8)
data_obj = ffi.from_buffer("void*", data)
if dtype is None:
- # Get the dtype name first
+ # Get the dtype name first (for non-UDTs)
cname = ffi_new(f"char[{lib.GxB_MAX_NAME_LEN}]")
info = lib.GxB_deserialize_type_name(
cname,
@@ -1703,6 +1719,22 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts):
if info != lib.GrB_SUCCESS:
raise _error_code_lookup[info]("Vector deserialize failed to get the dtype name")
dtype_name = b"".join(itertools.takewhile(b"\x00".__ne__, cname)).decode()
+ if not dtype_name and hasattr(lib, "GxB_Serialized_get_String"):
+ # Handle UDTs. First get the size of name
+ dtype_size = ffi_new("size_t*")
+ info = lib.GxB_Serialized_get_SIZE(data_obj, dtype_size, lib.GrB_NAME, data.nbytes)
+ if info != lib.GrB_SUCCESS:
+ raise _error_code_lookup[info](
+ "Vector deserialize failed to get the size of name"
+ )
+ # Then get the name
+ dtype_char = ffi_new(f"char[{dtype_size[0]}]")
+ info = lib.GxB_Serialized_get_String(
+ data_obj, dtype_char, lib.GrB_NAME, data.nbytes
+ )
+ if info != lib.GrB_SUCCESS:
+ raise _error_code_lookup[info]("Vector deserialize failed to get the name")
+ dtype_name = ffi.string(dtype_char).decode()
dtype = _string_to_dtype(dtype_name)
else:
dtype = lookup_dtype(dtype)
diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py
index 77c64a7ac..e9a29b3a9 100644
--- a/graphblas/core/utils.py
+++ b/graphblas/core/utils.py
@@ -1,17 +1,19 @@
-from numbers import Integral, Number
+from operator import index
import numpy as np
from ..dtypes import _INDEX, lookup_dtype
from . import ffi, lib
+_NP2 = np.__version__.startswith("2.")
+
def libget(name):
"""Helper to get items from GraphBLAS which might be GrB or GxB."""
try:
return getattr(lib, name)
except AttributeError:
- if name[-4:] not in {"FC32", "FC64", "error"}:
+ if name[-4:] not in {"FC32", "FC64", "rror"}:
raise
ext_name = f"GxB_{name[4:]}"
try:
@@ -22,7 +24,7 @@ def libget(name):
def wrapdoc(func_with_doc):
- """Decorator to copy `__doc__` from a function onto the wrapped function."""
+ """Decorator to copy ``__doc__`` from a function onto the wrapped function."""
def inner(func_wo_doc):
func_wo_doc.__doc__ = func_with_doc.__doc__
@@ -43,7 +45,7 @@ def inner(func_wo_doc):
object: object,
type: type,
}
-_output_types.update((k, k) for k in np.cast)
+_output_types.update((k, k) for k in set(np.sctypeDict.values()))
def output_type(val):
@@ -60,7 +62,8 @@ def ints_to_numpy_buffer(array, dtype, *, name="array", copy=False, ownable=Fals
and not np.issubdtype(array.dtype, np.bool_)
):
raise ValueError(f"{name} must be integers, not {array.dtype.name}")
- array = np.array(array, dtype, copy=copy, order=order)
+ # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors
+ array = np.array(array, dtype, copy=copy or _NP2 and None, order=order)
if ownable and (not array.flags.owndata or not array.flags.writeable):
array = array.copy(order)
return array
@@ -86,13 +89,18 @@ def values_to_numpy_buffer(
-------
np.ndarray
dtype
+
"""
if dtype is not None:
dtype = lookup_dtype(dtype)
- array = np.array(array, _get_subdtype(dtype.np_type), copy=copy, order=order)
+ # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors
+ array = np.array(
+ array, _get_subdtype(dtype.np_type), copy=copy or _NP2 and None, order=order
+ )
else:
is_input_np = isinstance(array, np.ndarray)
- array = np.array(array, copy=copy, order=order)
+ # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors
+ array = np.array(array, copy=copy or _NP2 and None, order=order)
if array.dtype.hasobject:
raise ValueError("object dtype for values is not allowed")
if not is_input_np and array.dtype == np.int32: # pragma: no cover
@@ -158,8 +166,19 @@ def get_order(order):
)
+def maybe_integral(val):
+ """Ensure ``val`` is an integer or return None if it's not."""
+ try:
+ return index(val)
+ except TypeError:
+ pass
+ if isinstance(val, float) and val.is_integer():
+ return int(val)
+ return None
+
+
def normalize_chunks(chunks, shape):
- """Normalize chunks argument for use by `Matrix.ss.split`.
+ """Normalize chunks argument for use by ``Matrix.ss.split``.
Examples
--------
@@ -172,11 +191,12 @@ def normalize_chunks(chunks, shape):
[(10,), (5, 15)]
>>> normalize_chunks((5, (5, None)), shape)
[(5, 5), (5, 15)]
+
"""
if isinstance(chunks, (list, tuple)):
pass
- elif isinstance(chunks, Number):
- chunks = (chunks,) * len(shape)
+ elif (chunk := maybe_integral(chunks)) is not None:
+ chunks = (chunk,) * len(shape)
elif isinstance(chunks, np.ndarray):
chunks = chunks.tolist()
else:
@@ -189,25 +209,24 @@ def normalize_chunks(chunks, shape):
f"chunks argument must be of length {len(shape)} (one for each dimension of a {typ})"
)
chunksizes = []
- for size, chunk in zip(shape, chunks):
+ for size, chunk in zip(shape, chunks, strict=True):
if chunk is None:
cur_chunks = [size]
- elif isinstance(chunk, Integral) or isinstance(chunk, float) and chunk.is_integer():
- chunk = int(chunk)
- if chunk < 0:
- raise ValueError(f"Chunksize must be greater than 0; got: {chunk}")
- div, mod = divmod(size, chunk)
- cur_chunks = [chunk] * div
+ elif (c := maybe_integral(chunk)) is not None:
+ if c < 0:
+ raise ValueError(f"Chunksize must be greater than 0; got: {c}")
+ div, mod = divmod(size, c)
+ cur_chunks = [c] * div
if mod:
cur_chunks.append(mod)
elif isinstance(chunk, (list, tuple)):
cur_chunks = []
none_index = None
for c in chunk:
- if isinstance(c, Integral) or isinstance(c, float) and c.is_integer():
- c = int(c)
- if c < 0:
- raise ValueError(f"Chunksize must be greater than 0; got: {c}")
+ if (val := maybe_integral(c)) is not None:
+ if val < 0:
+ raise ValueError(f"Chunksize must be greater than 0; got: {val}")
+ c = val
elif c is None:
if none_index is not None:
raise TypeError(
@@ -249,17 +268,17 @@ def normalize_chunks(chunks, shape):
def ensure_type(x, types):
- """Try to ensure `x` is one of the given types, computing if necessary.
+ """Try to ensure ``x`` is one of the given types, computing if necessary.
- `types` must be a type or a tuple of types as used in `isinstance`.
+ ``types`` must be a type or a tuple of types as used in ``isinstance``.
- For example, if `types` is a Vector, then a Vector input will be returned,
- and a `VectorExpression` input will be computed and returned as a Vector.
+ For example, if ``types`` is a Vector, then a Vector input will be returned,
+ and a ``VectorExpression`` input will be computed and returned as a Vector.
TypeError will be raised if the input is not or can't be converted to types.
- This function ignores `graphblas.config["autocompute"]`; it always computes
- if the return type will match `types`.
+ This function ignores ``graphblas.config["autocompute"]``; it always computes
+ if the return type will match ``types``.
"""
if isinstance(x, types):
return x
@@ -300,7 +319,10 @@ def __init__(self, array=None, dtype=_INDEX, *, size=None, name=None):
if size is not None:
self.array = np.empty(size, dtype=dtype.np_type)
else:
- self.array = np.array(array, dtype=_get_subdtype(dtype.np_type), copy=False, order="C")
+ # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors
+ self.array = np.array(
+ array, dtype=_get_subdtype(dtype.np_type), copy=_NP2 and None, order="C"
+ )
c_type = dtype.c_type if dtype._is_udt else f"{dtype.c_type}*"
self._carg = ffi.cast(c_type, ffi.from_buffer(self.array))
self.dtype = dtype
@@ -358,6 +380,7 @@ def _autogenerate_code(
specializer=None,
begin="# Begin auto-generated code",
end="# End auto-generated code",
+ callblack=True,
):
"""Super low-tech auto-code generation used by automethods.py and infixmethods.py."""
with filepath.open() as f: # pragma: no branch (flaky)
@@ -384,7 +407,8 @@ def _autogenerate_code(
f.write(new_text)
import subprocess
- try:
- subprocess.check_call(["black", filepath])
- except FileNotFoundError: # pragma: no cover (safety)
- pass # It's okay if `black` isn't installed; pre-commit hooks will do linting
+ if callblack:
+ try:
+ subprocess.check_call(["black", filepath])
+ except FileNotFoundError: # pragma: no cover (safety)
+ pass # It's okay if `black` isn't installed; pre-commit hooks will do linting
diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py
index 57851420d..8bac4198e 100644
--- a/graphblas/core/vector.py
+++ b/graphblas/core/vector.py
@@ -1,5 +1,4 @@
import itertools
-import warnings
import numpy as np
@@ -9,9 +8,16 @@
from . import _supports_udfs, automethods, ffi, lib, utils
from .base import BaseExpression, BaseType, _check_mask, call
from .descriptor import lookup as descriptor_lookup
-from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, Updater
+from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, InfixExprBase, Updater
from .mask import Mask, StructuralMask, ValueMask
-from .operator import UNKNOWN_OPCLASS, find_opclass, get_semiring, get_typed_op, op_from_string
+from .operator import (
+ UNKNOWN_OPCLASS,
+ _get_typed_op_from_exprs,
+ find_opclass,
+ get_semiring,
+ get_typed_op,
+ op_from_string,
+)
from .scalar import (
_COMPLETE,
_MATERIALIZE,
@@ -61,13 +67,13 @@ def _v_union_m(updater, left, right, left_default, right_default, op):
updater << temp.ewise_union(right, op, left_default=left_default, right_default=right_default)
-def _v_union_v(updater, left, right, left_default, right_default, op, dtype):
+def _v_union_v(updater, left, right, left_default, right_default, op):
mask = updater.kwargs.get("mask")
opts = updater.opts
- new_left = left.dup(dtype, clear=True)
+ new_left = left.dup(op.type, clear=True)
new_left(mask=mask, **opts) << binary.second(right, left_default)
new_left(mask=mask, **opts) << binary.first(left | new_left)
- new_right = right.dup(dtype, clear=True)
+ new_right = right.dup(op.type2, clear=True)
new_right(mask=mask, **opts) << binary.second(left, right_default)
new_right(mask=mask, **opts) << binary.first(right | new_right)
updater << op(new_left & new_right)
@@ -143,6 +149,7 @@ class Vector(BaseType):
Size of the Vector.
name : str, optional
Name to give the Vector. This will be displayed in the ``__repr__``.
+
"""
__slots__ = "_size", "_parent", "ss"
@@ -259,6 +266,7 @@ def __delitem__(self, keys, **opts):
Examples
--------
>>> del v[1:-1]
+
"""
del Updater(self, opts=opts)[keys]
@@ -273,6 +281,7 @@ def __getitem__(self, keys):
.. code-block:: python
sub_v = v[[1, 3, 5]].new()
+
"""
resolved_indexes = IndexerResolver(self, keys)
shape = resolved_indexes.shape
@@ -292,6 +301,7 @@ def __setitem__(self, keys, expr, **opts):
# This makes a dense iso-value vector
v[:] = 1
+
"""
Updater(self, opts=opts)[keys] = expr
@@ -304,6 +314,7 @@ def __contains__(self, index):
# Check if v[15] is non-empty
15 in v
+
"""
extractor = self[index]
if not extractor._is_scalar:
@@ -343,6 +354,7 @@ def isequal(self, other, *, check_dtype=False, **opts):
See Also
--------
:meth:`isclose` : For equality check of floating point dtypes
+
"""
other = self._expect_type(other, Vector, within="isequal", argname="other")
if check_dtype and self.dtype != other.dtype:
@@ -385,6 +397,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False, **opts
Returns
-------
bool
+
"""
other = self._expect_type(other, Vector, within="isclose", argname="other")
if check_dtype and self.dtype != other.dtype:
@@ -449,36 +462,6 @@ def resize(self, size):
call("GrB_Vector_resize", [self, size])
self._size = size.value
- def to_values(self, dtype=None, *, indices=True, values=True, sort=True):
- """Extract the indices and values as a 2-tuple of numpy arrays.
-
- .. deprecated:: 2022.11.0
- `Vector.to_values` will be removed in a future release.
- Use `Vector.to_coo` instead. Will be removed in version 2023.9.0 or later
-
- Parameters
- ----------
- dtype :
- Requested dtype for the output values array.
- indices :bool, default=True
- Whether to return indices; will return `None` for indices if `False`
- values : bool, default=True
- Whether to return values; will return `None` for values if `False`
- sort : bool, default=True
- Whether to require sorted indices.
-
- Returns
- -------
- np.ndarray[dtype=uint64] : Indices
- np.ndarray : Values
- """
- warnings.warn(
- "`Vector.to_values(...)` is deprecated; please use `Vector.to_coo(...)` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return self.to_coo(dtype, indices=indices, values=values, sort=sort)
-
def to_coo(self, dtype=None, *, indices=True, values=True, sort=True):
"""Extract the indices and values as a 2-tuple of numpy arrays.
@@ -487,9 +470,9 @@ def to_coo(self, dtype=None, *, indices=True, values=True, sort=True):
dtype :
Requested dtype for the output values array.
indices :bool, default=True
- Whether to return indices; will return `None` for indices if `False`
+ Whether to return indices; will return ``None`` for indices if ``False``
values : bool, default=True
- Whether to return values; will return `None` for values if `False`
+ Whether to return values; will return ``None`` for values if ``False``
sort : bool, default=True
Whether to require sorted indices.
@@ -503,6 +486,7 @@ def to_coo(self, dtype=None, *, indices=True, values=True, sort=True):
-------
np.ndarray[dtype=uint64] : Indices
np.ndarray : Values
+
"""
if sort and backend == "suitesparse":
self.wait() # sort in SS
@@ -539,7 +523,7 @@ def build(self, indices, values, *, dup_op=None, clear=False, size=None):
"""Rarely used method to insert values into an existing Vector. The typical use case
is to create a new Vector and insert values at the same time using :meth:`from_coo`.
- All the arguments are used identically in :meth:`from_coo`, except for `clear`, which
+ All the arguments are used identically in :meth:`from_coo`, except for ``clear``, which
indicates whether to clear the Vector prior to adding the new values.
"""
# TODO: accept `dtype` keyword to match the dtype of `values`?
@@ -602,6 +586,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
Returns
-------
Vector
+
"""
if dtype is not None or mask is not None or clear:
if dtype is None:
@@ -612,7 +597,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
else:
if opts:
# Ignore opts for now
- descriptor_lookup(**opts)
+ desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context)
rv = Vector._from_obj(ffi_new("GrB_Vector*"), self.dtype, self._size, name=name)
call("GrB_Vector_dup", [_Pointer(rv), self])
return rv
@@ -632,6 +617,7 @@ def diag(self, k=0, *, name=None):
Returns
-------
:class:`~graphblas.Matrix`
+
"""
from .matrix import Matrix
@@ -656,6 +642,7 @@ def wait(self, how="materialize"):
Use wait to force completion of the Vector.
Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__.
+
"""
how = how.lower()
if how == "materialize":
@@ -680,6 +667,7 @@ def get(self, index, default=None):
Returns
-------
Python scalar
+
"""
expr = self[index]
if expr._is_scalar:
@@ -690,43 +678,6 @@ def get(self, index, default=None):
"A single index should be given, and the result will be a Python scalar."
)
- @classmethod
- def from_values(cls, indices, values, dtype=None, *, size=None, dup_op=None, name=None):
- """Create a new Vector from indices and values.
-
- .. deprecated:: 2022.11.0
- `Vector.from_values` will be removed in a future release.
- Use `Vector.from_coo` instead. Will be removed in version 2023.9.0 or later
-
- Parameters
- ----------
- indices : list or np.ndarray
- Vector indices.
- values : list or np.ndarray or scalar
- List of values. If a scalar is provided, all values will be set to this single value.
- dtype :
- Data type of the Vector. If not provided, the values will be inspected
- to choose an appropriate dtype.
- size : int, optional
- Size of the Vector. If not provided, ``size`` is computed from
- the maximum index found in ``indices``.
- dup_op : BinaryOp, optional
- Function used to combine values if duplicate indices are found.
- Leaving ``dup_op=None`` will raise an error if duplicates are found.
- name : str, optional
- Name to give the Vector.
-
- Returns
- -------
- Vector
- """
- warnings.warn(
- "`Vector.from_values(...)` is deprecated; please use `Vector.from_coo(...)` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return cls.from_coo(indices, values, dtype, size=size, dup_op=dup_op, name=name)
-
@classmethod
def from_coo(cls, indices, values=1.0, dtype=None, *, size=None, dup_op=None, name=None):
"""Create a new Vector from indices and values.
@@ -759,6 +710,7 @@ def from_coo(cls, indices, values=1.0, dtype=None, *, size=None, dup_op=None, na
Returns
-------
Vector
+
"""
indices = ints_to_numpy_buffer(indices, np.uint64, name="indices")
values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=1)
@@ -816,10 +768,11 @@ def from_pairs(cls, pairs, dtype=None, *, size=None, dup_op=None, name=None):
Returns
-------
Vector
+
"""
if isinstance(pairs, np.ndarray):
raise TypeError("pairs as NumPy array is not supported; use `Vector.from_coo` instead")
- unzipped = list(zip(*pairs))
+ unzipped = list(zip(*pairs, strict=True))
if len(unzipped) == 2:
indices, values = unzipped
elif not unzipped:
@@ -867,6 +820,7 @@ def from_scalar(cls, value, size, dtype=None, *, name=None, **opts):
Returns
-------
Vector
+
"""
if type(value) is not Scalar:
try:
@@ -919,6 +873,7 @@ def from_dense(cls, values, missing_value=None, *, dtype=None, name=None, **opts
Returns
-------
Vector
+
"""
values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=1)
if values.ndim == 0:
@@ -967,6 +922,7 @@ def to_dense(self, fill_value=None, dtype=None, **opts):
Returns
-------
np.ndarray
+
"""
if fill_value is None or self._nvals == self._size:
if self._nvals != self._size:
@@ -1037,16 +993,43 @@ def ewise_add(self, other, op=monoid.plus):
# Functional syntax
w << monoid.max(u | v)
+
"""
+ return self._ewise_add(other, op)
+
+ def _ewise_add(self, other, op=monoid.plus, is_infix=False):
from .matrix import Matrix, MatrixExpression, TransposedMatrix
method_name = "ewise_add"
- other = self._expect_type(
- other, (Vector, Matrix, TransposedMatrix), within=method_name, argname="other", op=op
- )
- op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
- # Per the spec, op may be a semiring, but this is weird, so don't.
- self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if is_infix:
+ from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr
+
+ other = self._expect_type(
+ other,
+ (Vector, Matrix, TransposedMatrix, MatrixEwiseAddExpr, VectorEwiseAddExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if isinstance(self, VectorEwiseAddExpr):
+ self = op(self).new()
+ if isinstance(other, InfixExprBase):
+ other = op(other).new()
+ else:
+ other = self._expect_type(
+ other,
+ (Vector, Matrix, TransposedMatrix),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+
if other.ndim == 2:
# Broadcast columnwise from the left
if other._nrows != self._size:
@@ -1102,16 +1085,42 @@ def ewise_mult(self, other, op=binary.times):
# Functional syntax
w << binary.gt(u & v)
+
"""
+ return self._ewise_mult(other, op)
+
+ def _ewise_mult(self, other, op=binary.times, is_infix=False):
from .matrix import Matrix, MatrixExpression, TransposedMatrix
method_name = "ewise_mult"
- other = self._expect_type(
- other, (Vector, Matrix, TransposedMatrix), within=method_name, argname="other", op=op
- )
- op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
- # Per the spec, op may be a semiring, but this is weird, so don't.
- self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if is_infix:
+ from .infix import MatrixEwiseMultExpr, VectorEwiseMultExpr
+
+ other = self._expect_type(
+ other,
+ (Vector, Matrix, TransposedMatrix, MatrixEwiseMultExpr, VectorEwiseMultExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
+ if isinstance(self, VectorEwiseMultExpr):
+ self = op(self).new()
+ if isinstance(other, InfixExprBase):
+ other = op(other).new()
+ else:
+ other = self._expect_type(
+ other,
+ (Vector, Matrix, TransposedMatrix),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
+ # Per the spec, op may be a semiring, but this is weird, so don't.
+ self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
if other.ndim == 2:
# Broadcast columnwise from the left
if other._nrows != self._size:
@@ -1170,14 +1179,37 @@ def ewise_union(self, other, op, left_default, right_default):
# Functional syntax
w << binary.div(u | v, left_default=1, right_default=1)
+
"""
+ return self._ewise_union(other, op, left_default, right_default)
+
+ def _ewise_union(self, other, op, left_default, right_default, is_infix=False):
from .matrix import Matrix, MatrixExpression, TransposedMatrix
method_name = "ewise_union"
- other = self._expect_type(
- other, (Vector, Matrix, TransposedMatrix), within=method_name, argname="other", op=op
- )
- dtype = self.dtype if self.dtype._is_udt else None
+ if is_infix:
+ from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr
+
+ other = self._expect_type(
+ other,
+ (Vector, Matrix, TransposedMatrix, MatrixEwiseAddExpr, VectorEwiseAddExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ temp_op = _get_typed_op_from_exprs(op, self, other, kind="binary")
+ else:
+ other = self._expect_type(
+ other,
+ (Vector, Matrix, TransposedMatrix),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
+
+ left_dtype = temp_op.type
+ dtype = left_dtype if left_dtype._is_udt else None
if type(left_default) is not Scalar:
try:
left = Scalar.from_value(
@@ -1194,6 +1226,8 @@ def ewise_union(self, other, op, left_default, right_default):
)
else:
left = _as_scalar(left_default, dtype, is_cscalar=False) # pragma: is_grbscalar
+ right_dtype = temp_op.type2
+ dtype = right_dtype if right_dtype._is_udt else None
if type(right_default) is not Scalar:
try:
right = Scalar.from_value(
@@ -1210,12 +1244,29 @@ def ewise_union(self, other, op, left_default, right_default):
)
else:
right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar
- scalar_dtype = unify(left.dtype, right.dtype)
- nonscalar_dtype = unify(self.dtype, other.dtype)
- op = get_typed_op(op, scalar_dtype, nonscalar_dtype, is_left_scalar=True, kind="binary")
+
+ if is_infix:
+ op1 = _get_typed_op_from_exprs(op, self, right, kind="binary")
+ op2 = _get_typed_op_from_exprs(op, left, other, kind="binary")
+ else:
+ op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary")
+ op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary")
+ if op1 is not op2:
+ left_dtype = unify(op1.type, op2.type, is_right_scalar=True)
+ right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True)
+ op = get_typed_op(op, left_dtype, right_dtype, kind="binary")
+ else:
+ op = op1
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
if op.opclass == "Monoid":
op = op.binaryop
+
+ if is_infix:
+ if isinstance(self, VectorEwiseAddExpr):
+ self = op(self, left_default=left, right_default=right).new()
+ if isinstance(other, InfixExprBase):
+ other = op(other, left_default=left, right_default=right).new()
+
expr_repr = "{0.name}.{method_name}({2.name}, {op}, {1._expr_name}, {3._expr_name})"
if other.ndim == 2:
# Broadcast columnwise from the left
@@ -1243,11 +1294,10 @@ def ewise_union(self, other, op, left_default, right_default):
expr_repr=expr_repr,
)
else:
- dtype = unify(scalar_dtype, nonscalar_dtype, is_left_scalar=True)
expr = VectorExpression(
method_name,
None,
- [self, left, other, right, _v_union_v, (self, other, left, right, op, dtype)],
+ [self, left, other, right, _v_union_v, (self, other, left, right, op)],
expr_repr=expr_repr,
size=self._size,
op=op,
@@ -1284,15 +1334,37 @@ def vxm(self, other, op=semiring.plus_times):
# Functional syntax
C << semiring.min_plus(v @ A)
+
"""
+ return self._vxm(other, op)
+
+ def _vxm(self, other, op=semiring.plus_times, is_infix=False):
from .matrix import Matrix, TransposedMatrix
method_name = "vxm"
- other = self._expect_type(
- other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op
- )
- op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
- self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if is_infix:
+ from .infix import MatrixMatMulExpr, VectorMatMulExpr
+
+ other = self._expect_type(
+ other,
+ (Matrix, TransposedMatrix, MatrixMatMulExpr),
+ within=method_name,
+ argname="other",
+ op=op,
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if isinstance(self, VectorMatMulExpr):
+ self = op(self).new()
+ if isinstance(other, MatrixMatMulExpr):
+ other = op(other).new()
+ else:
+ other = self._expect_type(
+ other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op
+ )
+ op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+
expr = VectorExpression(
method_name,
"GrB_vxm",
@@ -1342,6 +1414,7 @@ def apply(self, op, right=None, *, left=None):
# Functional syntax
w << op.abs(v)
+
"""
method_name = "apply"
extra_message = (
@@ -1487,6 +1560,7 @@ def select(self, op, thunk=None):
# Functional syntax
w << select.value(v >= 1)
+
"""
method_name = "select"
if isinstance(op, str):
@@ -1581,6 +1655,7 @@ def reduce(self, op=monoid.plus, *, allow_empty=True):
.. code-block:: python
total << v.reduce(monoid.plus)
+
"""
method_name = "reduce"
op = get_typed_op(op, self.dtype, kind="binary|aggregator")
@@ -1633,11 +1708,29 @@ def inner(self, other, op=semiring.plus_times):
*Note*: This is not a standard GraphBLAS function, but fits with other functions in the
`Matrix Multiplication <../user_guide/operations.html#matrix-multiply>`__
family of functions.
+
"""
+ return self._inner(other, op)
+
+ def _inner(self, other, op=semiring.plus_times, is_infix=False):
method_name = "inner"
- other = self._expect_type(other, Vector, within=method_name, argname="other", op=op)
- op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
- self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if is_infix:
+ from .infix import VectorMatMulExpr
+
+ other = self._expect_type(
+ other, (Vector, VectorMatMulExpr), within=method_name, argname="other", op=op
+ )
+ op = _get_typed_op_from_exprs(op, self, other, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+ if isinstance(self, VectorMatMulExpr):
+ self = op(self).new()
+ if isinstance(other, VectorMatMulExpr):
+ other = op(other).new()
+ else:
+ other = self._expect_type(other, Vector, within=method_name, argname="other", op=op)
+ op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
+ self._expect_op(op, "Semiring", within=method_name, argname="op")
+
expr = ScalarExpression(
method_name,
"GrB_vxm",
@@ -1671,6 +1764,7 @@ def outer(self, other, op=binary.times):
C << v.outer(w, op=binary.times)
*Note*: This is not a standard GraphBLAS function.
+
"""
from .matrix import MatrixExpression
@@ -1719,6 +1813,7 @@ def reposition(self, offset, *, size=None):
.. code-block:: python
w = v.reposition(20).new()
+
"""
if size is None:
size = self._size
@@ -1757,7 +1852,7 @@ def _extract_element(
result = Scalar(dtype, is_cscalar=is_cscalar, name=name)
if opts:
# Ignore opts for now
- descriptor_lookup(**opts)
+ desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context)
if is_cscalar:
dtype_name = "UDT" if dtype._is_udt else dtype.name
if (
@@ -1979,6 +2074,7 @@ def from_dict(cls, d, dtype=None, *, size=None, name=None):
Returns
-------
Vector
+
"""
indices = np.fromiter(d.keys(), np.uint64)
if dtype is None:
@@ -2006,9 +2102,10 @@ def to_dict(self):
Returns
-------
dict
+
"""
indices, values = self.to_coo(sort=False)
- return dict(zip(indices.tolist(), values.tolist()))
+ return dict(zip(indices.tolist(), values.tolist(), strict=True))
if backend == "suitesparse":
@@ -2135,7 +2232,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo))
to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense))
to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict))
- to_values = wrapdoc(Vector.to_values)(property(automethods.to_values))
vxm = wrapdoc(Vector.vxm)(property(automethods.vxm))
wait = wrapdoc(Vector.wait)(property(automethods.wait))
# These raise exceptions
@@ -2177,6 +2273,9 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
if clear:
if dtype is None:
dtype = self.dtype
+ if opts:
+ # Ignore opts for now
+ desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context)
return self.output_type(dtype, *self.shape, name=name)
return self.new(dtype, mask=mask, name=name, **opts)
@@ -2220,7 +2319,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo))
to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense))
to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict))
- to_values = wrapdoc(Vector.to_values)(property(automethods.to_values))
vxm = wrapdoc(Vector.vxm)(property(automethods.vxm))
wait = wrapdoc(Vector.wait)(property(automethods.wait))
# These raise exceptions
diff --git a/graphblas/dtypes/__init__.py b/graphblas/dtypes/__init__.py
new file mode 100644
index 000000000..f9c144f13
--- /dev/null
+++ b/graphblas/dtypes/__init__.py
@@ -0,0 +1,46 @@
+from ..core.dtypes import (
+ _INDEX,
+ BOOL,
+ FP32,
+ FP64,
+ INT8,
+ INT16,
+ INT32,
+ INT64,
+ UINT8,
+ UINT16,
+ UINT32,
+ UINT64,
+ DataType,
+ _supports_complex,
+ lookup_dtype,
+ register_anonymous,
+ register_new,
+ unify,
+)
+
+if _supports_complex:
+ from ..core.dtypes import FC32, FC64
+
+
+def __dir__():
+ return globals().keys() | {"ss"}
+
+
+def __getattr__(key):
+ if key == "ss":
+ from .. import backend
+
+ if backend != "suitesparse":
+ raise AttributeError(
+ f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"'
+ )
+ from importlib import import_module
+
+ ss = import_module(".ss", __name__)
+ globals()["ss"] = ss
+ return ss
+ raise AttributeError(f"module {__name__!r} has no attribute {key!r}")
+
+
+_index_dtypes = {BOOL, INT8, UINT8, INT16, UINT16, INT32, UINT32, INT64, UINT64, _INDEX}
diff --git a/graphblas/dtypes/ss.py b/graphblas/dtypes/ss.py
new file mode 100644
index 000000000..9f6083e01
--- /dev/null
+++ b/graphblas/dtypes/ss.py
@@ -0,0 +1 @@
+from ..core.ss.dtypes import register_new # noqa: F401
diff --git a/graphblas/exceptions.py b/graphblas/exceptions.py
index 0acc9ed0b..05cac988a 100644
--- a/graphblas/exceptions.py
+++ b/graphblas/exceptions.py
@@ -1,4 +1,3 @@
-from . import backend as _backend
from .core import ffi as _ffi
from .core import lib as _lib
from .core.utils import _Pointer
@@ -85,9 +84,14 @@ class NotImplementedException(GraphblasException):
"""
+# SuiteSparse errors
+class JitError(GraphblasException):
+ """SuiteSparse:GraphBLAS error using JIT."""
+
+
# Our errors
class UdfParseError(GraphblasException):
- """Unable to parse the user-defined function."""
+ """SuiteSparse:GraphBLAS unable to parse the user-defined function."""
_error_code_lookup = {
@@ -112,8 +116,12 @@ class UdfParseError(GraphblasException):
}
GrB_SUCCESS = _lib.GrB_SUCCESS
GrB_NO_VALUE = _lib.GrB_NO_VALUE
-if _backend == "suitesparse":
+
+# SuiteSparse-specific errors
+if hasattr(_lib, "GxB_EXHAUSTED"):
_error_code_lookup[_lib.GxB_EXHAUSTED] = StopIteration
+if hasattr(_lib, "GxB_JIT_ERROR"): # Added in 9.4
+ _error_code_lookup[_lib.GxB_JIT_ERROR] = JitError
def check_status(response_code, args):
@@ -121,7 +129,7 @@ def check_status(response_code, args):
return
if response_code == GrB_NO_VALUE:
return NoValue
- if type(args) is list:
+ if isinstance(args, list):
arg = args[0]
else:
arg = args
diff --git a/graphblas/indexunary/__init__.py b/graphblas/indexunary/__init__.py
index 472231597..a3cb06608 100644
--- a/graphblas/indexunary/__init__.py
+++ b/graphblas/indexunary/__init__.py
@@ -4,7 +4,7 @@
def __dir__():
- return globals().keys() | _delayed.keys()
+ return globals().keys() | _delayed.keys() | {"ss"}
def __getattr__(key):
@@ -13,6 +13,18 @@ def __getattr__(key):
rv = func(**kwargs)
globals()[key] = rv
return rv
+ if key == "ss":
+ from .. import backend
+
+ if backend != "suitesparse":
+ raise AttributeError(
+ f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"'
+ )
+ from importlib import import_module
+
+ ss = import_module(".ss", __name__)
+ globals()["ss"] = ss
+ return ss
raise AttributeError(f"module {__name__!r} has no attribute {key!r}")
diff --git a/graphblas/indexunary/ss.py b/graphblas/indexunary/ss.py
new file mode 100644
index 000000000..58218df6f
--- /dev/null
+++ b/graphblas/indexunary/ss.py
@@ -0,0 +1,6 @@
+from ..core import operator
+from ..core.ss.indexunary import register_new # noqa: F401
+
+_delayed = {}
+
+del operator
diff --git a/graphblas/io/__init__.py b/graphblas/io/__init__.py
index 0eafd45c8..a1b71db40 100644
--- a/graphblas/io/__init__.py
+++ b/graphblas/io/__init__.py
@@ -1,7 +1,5 @@
from ._awkward import from_awkward, to_awkward
from ._matrixmarket import mmread, mmwrite
from ._networkx import from_networkx, to_networkx
-from ._numpy import from_numpy, to_numpy # deprecated
from ._scipy import from_scipy_sparse, to_scipy_sparse
from ._sparse import from_pydata_sparse, to_pydata_sparse
-from ._viz import draw # deprecated
diff --git a/graphblas/io/_awkward.py b/graphblas/io/_awkward.py
index 3119bdf3b..b30984251 100644
--- a/graphblas/io/_awkward.py
+++ b/graphblas/io/_awkward.py
@@ -7,58 +7,6 @@
_AwkwardDoublyCompressedMatrix = None
-def from_awkward(A, *, name=None):
- """Create a Matrix or Vector from an Awkward Array.
-
- The Awkward Array must have top-level parameters: format, shape
-
- The Awkward Array must have top-level attributes based on format:
- - vec/csr/csc: values, indices
- - hypercsr/hypercsc: values, indices, offset_labels
-
- Parameters
- ----------
- A : awkward.Array
- Awkward Array with values and indices
- name : str, optional
- Name of resulting Matrix or Vector
-
- Returns
- -------
- Vector or Matrix
- """
- params = A.layout.parameters
- if missing := {"format", "shape"} - params.keys():
- raise ValueError(f"Missing parameters: {missing}")
- format = params["format"]
- shape = params["shape"]
-
- if len(shape) == 1:
- if format != "vec":
- raise ValueError(f"Invalid format for Vector: {format}")
- return Vector.from_coo(
- A.indices.layout.data, A.values.layout.data, size=shape[0], name=name
- )
- nrows, ncols = shape
- values = A.values.layout.content.data
- indptr = A.values.layout.offsets.data
- if format == "csr":
- cols = A.indices.layout.content.data
- return Matrix.from_csr(indptr, cols, values, ncols=ncols, name=name)
- if format == "csc":
- rows = A.indices.layout.content.data
- return Matrix.from_csc(indptr, rows, values, nrows=nrows, name=name)
- if format == "hypercsr":
- rows = A.offset_labels.layout.data
- cols = A.indices.layout.content.data
- return Matrix.from_dcsr(rows, indptr, cols, values, nrows=nrows, ncols=ncols, name=name)
- if format == "hypercsc":
- cols = A.offset_labels.layout.data
- rows = A.indices.layout.content.data
- return Matrix.from_dcsc(cols, indptr, rows, values, nrows=nrows, ncols=ncols, name=name)
- raise ValueError(f"Invalid format for Matrix: {format}")
-
-
def to_awkward(A, format=None):
"""Create an Awkward Array from a GraphBLAS Matrix.
@@ -179,3 +127,62 @@ def indices(self): # pragma: no branch (???)
if classname:
ret = ak.with_name(ret, classname)
return ret
+
+
+def from_awkward(A, *, name=None):
+ """Create a Matrix or Vector from an Awkward Array.
+
+ The Awkward Array must have top-level parameters: format, shape
+
+ The Awkward Array must have top-level attributes based on format:
+ - vec/csr/csc: values, indices
+ - hypercsr/hypercsc: values, indices, offset_labels
+
+ Parameters
+ ----------
+ A : awkward.Array
+ Awkward Array with values and indices
+ name : str, optional
+ Name of resulting Matrix or Vector
+
+ Returns
+ -------
+ Vector or Matrix
+
+ Note: the intended purpose of this function is to facilitate
+ conversion of an `awkward-array` that was created via `to_awkward`
+ function. If attempting to convert an arbitrary `awkward-array`,
+ make sure that the top-level attributes and parameters contain
+ the expected values.
+
+ """
+ params = A.layout.parameters
+ if missing := {"format", "shape"} - params.keys():
+ raise ValueError(f"Missing parameters: {missing}")
+ format = params["format"]
+ shape = params["shape"]
+
+ if len(shape) == 1:
+ if format != "vec":
+ raise ValueError(f"Invalid format for Vector: {format}")
+ return Vector.from_coo(
+ A.indices.layout.data, A.values.layout.data, size=shape[0], name=name
+ )
+ nrows, ncols = shape
+ values = A.values.layout.content.data
+ indptr = A.values.layout.offsets.data
+ if format == "csr":
+ cols = A.indices.layout.content.data
+ return Matrix.from_csr(indptr, cols, values, ncols=ncols, name=name)
+ if format == "csc":
+ rows = A.indices.layout.content.data
+ return Matrix.from_csc(indptr, rows, values, nrows=nrows, name=name)
+ if format == "hypercsr":
+ rows = A.offset_labels.layout.data
+ cols = A.indices.layout.content.data
+ return Matrix.from_dcsr(rows, indptr, cols, values, nrows=nrows, ncols=ncols, name=name)
+ if format == "hypercsc":
+ cols = A.offset_labels.layout.data
+ rows = A.indices.layout.content.data
+ return Matrix.from_dcsc(cols, indptr, rows, values, nrows=nrows, ncols=ncols, name=name)
+ raise ValueError(f"Invalid format for Matrix: {format}")
diff --git a/graphblas/io/_matrixmarket.py b/graphblas/io/_matrixmarket.py
index 294bcfa1e..8cf8738a3 100644
--- a/graphblas/io/_matrixmarket.py
+++ b/graphblas/io/_matrixmarket.py
@@ -32,11 +32,11 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs):
Returns
-------
:class:`~graphblas.Matrix`
+
"""
try:
# scipy is currently needed for *all* engines
from scipy.io import mmread
- from scipy.sparse import isspmatrix_coo
except ImportError: # pragma: no cover (import)
raise ImportError("scipy is required to read Matrix Market files") from None
engine = engine.lower()
@@ -54,7 +54,7 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs):
f'Bad engine value: {engine!r}. Must be "auto", "scipy", "fmm", or "fast_matrix_market"'
)
array = mmread(source, **kwargs)
- if isspmatrix_coo(array):
+ if getattr(array, "format", None) == "coo":
nrows, ncols = array.shape
return Matrix.from_coo(
array.row, array.col, array.data, nrows=nrows, ncols=ncols, dup_op=dup_op, name=name
@@ -96,6 +96,7 @@ def mmwrite(
Number of digits to write for real or complex values
symmetry : str, optional
{"general", "symmetric", "skew-symmetric", "hermetian"}
+
"""
try:
# scipy is currently needed for *all* engines
@@ -105,13 +106,17 @@ def mmwrite(
engine = engine.lower()
if engine in {"auto", "fmm", "fast_matrix_market"}:
try:
- from fast_matrix_market import mmwrite # noqa: F811
+ from fast_matrix_market import __version__, mmwrite # noqa: F811
except ImportError: # pragma: no cover (import)
if engine != "auto":
raise ImportError(
"fast_matrix_market is required to write Matrix Market files "
f'using the "{engine}" engine'
) from None
+ else:
+ import scipy as sp
+
+ engine = "fast_matrix_market"
elif engine != "scipy":
raise ValueError(
f'Bad engine value: {engine!r}. Must be "auto", "scipy", "fmm", or "fast_matrix_market"'
@@ -120,6 +125,12 @@ def mmwrite(
array = matrix.ss.export()["values"]
else:
array = to_scipy_sparse(matrix, format="coo")
+ if engine == "fast_matrix_market" and __version__ < "1.7." and sp.__version__ > "1.11.":
+ # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_coo`.
+ # fast_matrix_market updated to handle this in version 1.7.0
+ # Also, it looks like fast_matrix_market has special writers for csr and csc;
+ # should we see if using those are faster?
+ array = sp.sparse.coo_matrix(array) # FLAKY COVERAGE
mmwrite(
target,
array,
diff --git a/graphblas/io/_networkx.py b/graphblas/io/_networkx.py
index 2324a11c2..8cf84e576 100644
--- a/graphblas/io/_networkx.py
+++ b/graphblas/io/_networkx.py
@@ -21,6 +21,7 @@ def from_networkx(G, nodelist=None, dtype=None, weight="weight", name=None):
Returns
-------
:class:`~graphblas.Matrix`
+
"""
import networkx as nx
@@ -45,6 +46,7 @@ def to_networkx(m, edge_attribute="weight"):
Returns
-------
nx.DiGraph
+
"""
import networkx as nx
@@ -53,7 +55,9 @@ def to_networkx(m, edge_attribute="weight"):
cols = cols.tolist()
G = nx.DiGraph()
if edge_attribute is None:
- G.add_edges_from(zip(rows, cols))
+ G.add_edges_from(zip(rows, cols, strict=True))
else:
- G.add_weighted_edges_from(zip(rows, cols, vals.tolist()), weight=edge_attribute)
+ G.add_weighted_edges_from(
+ zip(rows, cols, vals.tolist(), strict=True), weight=edge_attribute
+ )
return G
diff --git a/graphblas/io/_numpy.py b/graphblas/io/_numpy.py
deleted file mode 100644
index 1c40e1633..000000000
--- a/graphblas/io/_numpy.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from warnings import warn
-
-from ..core.utils import output_type
-from ..core.vector import Vector
-from ..dtypes import lookup_dtype
-from ..exceptions import GraphblasException
-from ._scipy import from_scipy_sparse, to_scipy_sparse
-
-
-def from_numpy(m): # pragma: no cover (deprecated)
- """Create a sparse Vector or Matrix from a dense numpy array.
-
- .. deprecated:: 2023.2.0
- `from_numpy` will be removed in a future release.
- Use `Vector.from_dense` or `Matrix.from_dense` instead.
- Will be removed in version 2023.10.0 or later
-
- A value of 0 is considered as "missing".
-
- - m.ndim == 1 returns a `Vector`
- - m.ndim == 2 returns a `Matrix`
- - m.ndim > 2 raises an error
-
- dtype is inferred from m.dtype
-
- Parameters
- ----------
- m : np.ndarray
- Input array
-
- See Also
- --------
- Matrix.from_dense
- Vector.from_dense
- from_scipy_sparse
-
- Returns
- -------
- Vector or Matrix
- """
- warn(
- "`graphblas.io.from_numpy` is deprecated; "
- "use `Matrix.from_dense` and `Vector.from_dense` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- if m.ndim > 2:
- raise GraphblasException("m.ndim must be <= 2")
-
- try:
- from scipy.sparse import coo_array, csr_array
- except ImportError: # pragma: no cover (import)
- raise ImportError("scipy is required to import from numpy") from None
-
- if m.ndim == 1:
- A = csr_array(m)
- _, size = A.shape
- dtype = lookup_dtype(m.dtype)
- return Vector.from_coo(A.indices, A.data, size=size, dtype=dtype)
- A = coo_array(m)
- return from_scipy_sparse(A)
-
-
-def to_numpy(m): # pragma: no cover (deprecated)
- """Create a dense numpy array from a sparse Vector or Matrix.
-
- .. deprecated:: 2023.2.0
- `to_numpy` will be removed in a future release.
- Use `Vector.to_dense` or `Matrix.to_dense` instead.
- Will be removed in version 2023.10.0 or later
-
- Missing values will become 0 in the output.
-
- numpy dtype will match the GraphBLAS dtype
-
- Parameters
- ----------
- m : Vector or Matrix
- GraphBLAS Vector or Matrix
-
- See Also
- --------
- to_scipy_sparse
- Matrix.to_dense
- Vector.to_dense
-
- Returns
- -------
- np.ndarray
- """
- warn(
- "`graphblas.io.to_numpy` is deprecated; "
- "use `Matrix.to_dense` and `Vector.to_dense` instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- try:
- import scipy # noqa: F401
- except ImportError: # pragma: no cover (import)
- raise ImportError("scipy is required to export to numpy") from None
- if output_type(m) is Vector:
- return to_scipy_sparse(m).toarray()[0]
- sparse = to_scipy_sparse(m, "coo")
- return sparse.toarray()
diff --git a/graphblas/io/_scipy.py b/graphblas/io/_scipy.py
index 1eaa691dd..228432eed 100644
--- a/graphblas/io/_scipy.py
+++ b/graphblas/io/_scipy.py
@@ -22,6 +22,7 @@ def from_scipy_sparse(A, *, dup_op=None, name=None):
Returns
-------
:class:`~graphblas.Matrix`
+
"""
nrows, ncols = A.shape
dtype = lookup_dtype(A.dtype)
diff --git a/graphblas/io/_sparse.py b/graphblas/io/_sparse.py
index 2bbdad2e6..c0d4beabb 100644
--- a/graphblas/io/_sparse.py
+++ b/graphblas/io/_sparse.py
@@ -23,6 +23,7 @@ def from_pydata_sparse(s, *, dup_op=None, name=None):
-------
:class:`~graphblas.Vector`
:class:`~graphblas.Matrix`
+
"""
try:
import sparse
diff --git a/graphblas/io/_viz.py b/graphblas/io/_viz.py
deleted file mode 100644
index 19211573f..000000000
--- a/graphblas/io/_viz.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from warnings import warn
-
-
-def draw(m): # pragma: no cover (deprecated)
- """Draw a square adjacency Matrix as a graph.
-
- Requires `networkx `_ and
- `matplotlib `_ to be installed.
-
- Example output:
-
- .. image:: /_static/img/draw-example.png
- """
- from .. import viz
-
- warn(
- "`graphblas.io.draw` is deprecated; it has been moved to `graphblas.viz.draw`",
- DeprecationWarning,
- stacklevel=2,
- )
- viz.draw(m)
diff --git a/graphblas/monoid/__init__.py b/graphblas/monoid/__init__.py
index 007aba416..027fc0afe 100644
--- a/graphblas/monoid/__init__.py
+++ b/graphblas/monoid/__init__.py
@@ -4,19 +4,31 @@
def __dir__():
- return globals().keys() | _delayed.keys()
+ return globals().keys() | _delayed.keys() | {"ss"}
def __getattr__(key):
if key in _delayed:
func, kwargs = _delayed.pop(key)
- if type(kwargs["binaryop"]) is str:
+ if isinstance(kwargs["binaryop"], str):
from ..binary import from_string
kwargs["binaryop"] = from_string(kwargs["binaryop"])
rv = func(**kwargs)
globals()[key] = rv
return rv
+ if key == "ss":
+ from .. import backend
+
+ if backend != "suitesparse":
+ raise AttributeError(
+ f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"'
+ )
+ from importlib import import_module
+
+ ss = import_module(".ss", __name__)
+ globals()["ss"] = ss
+ return ss
raise AttributeError(f"module {__name__!r} has no attribute {key!r}")
diff --git a/graphblas/monoid/numpy.py b/graphblas/monoid/numpy.py
index f46d57143..b9ff2b502 100644
--- a/graphblas/monoid/numpy.py
+++ b/graphblas/monoid/numpy.py
@@ -5,6 +5,7 @@
https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations
"""
+
import numpy as _np
from .. import _STANDARD_OPERATOR_NAMES
@@ -90,8 +91,7 @@
if (
_config.get("mapnumpy")
or _has_numba
- and type(_numba.njit(lambda x, y: _np.fmax(x, y))(1, 2)) # pragma: no branch (numba)
- is not float
+ and not isinstance(_numba.njit(lambda x, y: _np.fmax(x, y))(1, 2), float) # pragma: no branch
):
# Incorrect behavior was introduced in numba 0.56.2 and numpy 1.23
# See: https://github.com/numba/numba/issues/8478
@@ -170,7 +170,7 @@ def __dir__():
def __getattr__(name):
if name in _delayed:
func, kwargs = _delayed.pop(name)
- if type(kwargs["binaryop"]) is str:
+ if isinstance(kwargs["binaryop"], str):
from ..binary import from_string
kwargs["binaryop"] = from_string(kwargs["binaryop"])
diff --git a/graphblas/monoid/ss.py b/graphblas/monoid/ss.py
new file mode 100644
index 000000000..97852fc12
--- /dev/null
+++ b/graphblas/monoid/ss.py
@@ -0,0 +1,5 @@
+from ..core import operator
+
+_delayed = {}
+
+del operator
diff --git a/graphblas/op/ss.py b/graphblas/op/ss.py
index e45cbcda0..97852fc12 100644
--- a/graphblas/op/ss.py
+++ b/graphblas/op/ss.py
@@ -1,3 +1,5 @@
from ..core import operator
+_delayed = {}
+
del operator
diff --git a/graphblas/select/__init__.py b/graphblas/select/__init__.py
index c7a1897f5..b55766ff8 100644
--- a/graphblas/select/__init__.py
+++ b/graphblas/select/__init__.py
@@ -8,7 +8,7 @@
def __dir__():
- return globals().keys() | _delayed.keys()
+ return globals().keys() | _delayed.keys() | {"ss"}
def __getattr__(key):
@@ -17,6 +17,18 @@ def __getattr__(key):
rv = func(**kwargs)
globals()[key] = rv
return rv
+ if key == "ss":
+ from .. import backend
+
+ if backend != "suitesparse":
+ raise AttributeError(
+ f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"'
+ )
+ from importlib import import_module
+
+ ss = import_module(".ss", __name__)
+ globals()["ss"] = ss
+ return ss
raise AttributeError(f"module {__name__!r} has no attribute {key!r}")
@@ -57,9 +69,9 @@ def _resolve_expr(expr, callname, opname):
def _match_expr(parent, expr):
- """Match expressions to rewrite `A.select(A < 5)` into select expression.
+ """Match expressions to rewrite ``A.select(A < 5)`` into select expression.
- The argument must match the parent, so this _won't_ be rewritten: `A.select(B < 5)`
+ The argument must match the parent, so this _won't_ be rewritten: ``A.select(B < 5)``
"""
args = expr.args
op = expr.op
@@ -76,56 +88,49 @@ def _match_expr(parent, expr):
def value(expr):
- """
- An advanced select method which allows for easily expressing
- value comparison logic.
+ """An advanced select method for easily expressing value comparison logic.
Example usage:
>>> gb.select.value(A > 0)
- The example will dispatch to `gb.select.valuegt(A, 0)`
+ The example will dispatch to ``gb.select.valuegt(A, 0)``
while being nicer to read.
"""
return _resolve_expr(expr, "value", "value")
def row(expr):
- """
- An advanced select method which allows for easily expressing
- Matrix row index comparison logic.
+ """An advanced select method for easily expressing Matrix row index comparison logic.
Example usage:
>>> gb.select.row(A <= 5)
- The example will dispatch to `gb.select.rowle(A, 5)`
+ The example will dispatch to ``gb.select.rowle(A, 5)``
while being potentially nicer to read.
"""
return _resolve_expr(expr, "row", "row")
def column(expr):
- """
- An advanced select method which allows for easily expressing
- Matrix column index comparison logic.
+ """An advanced select method for easily expressing Matrix column index comparison logic.
Example usage:
>>> gb.select.column(A <= 5)
- The example will dispatch to `gb.select.colle(A, 5)`
+ The example will dispatch to ``gb.select.colle(A, 5)``
while being potentially nicer to read.
"""
return _resolve_expr(expr, "column", "col")
def index(expr):
- """
- An advanced select method which allows for easily expressing
+ """An advanced select method which allows for easily expressing
Vector index comparison logic.
Example usage:
>>> gb.select.index(v <= 5)
- The example will dispatch to `gb.select.indexle(v, 5)`
+ The example will dispatch to ``gb.select.indexle(v, 5)``
while being potentially nicer to read.
"""
return _resolve_expr(expr, "index", "index")
diff --git a/graphblas/select/ss.py b/graphblas/select/ss.py
new file mode 100644
index 000000000..173067382
--- /dev/null
+++ b/graphblas/select/ss.py
@@ -0,0 +1,6 @@
+from ..core import operator
+from ..core.ss.select import register_new # noqa: F401
+
+_delayed = {}
+
+del operator
diff --git a/graphblas/semiring/__init__.py b/graphblas/semiring/__init__.py
index 538136406..95a44261a 100644
--- a/graphblas/semiring/__init__.py
+++ b/graphblas/semiring/__init__.py
@@ -46,11 +46,11 @@ def __getattr__(key):
return rv
if key in _delayed:
func, kwargs = _delayed.pop(key)
- if type(kwargs["binaryop"]) is str:
+ if isinstance(kwargs["binaryop"], str):
from ..binary import from_string
kwargs["binaryop"] = from_string(kwargs["binaryop"])
- if type(kwargs["monoid"]) is str:
+ if isinstance(kwargs["monoid"], str):
from ..monoid import from_string
kwargs["monoid"] = from_string(kwargs["monoid"])
diff --git a/graphblas/semiring/numpy.py b/graphblas/semiring/numpy.py
index 3a59090cc..10a680ea0 100644
--- a/graphblas/semiring/numpy.py
+++ b/graphblas/semiring/numpy.py
@@ -5,6 +5,7 @@
https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations
"""
+
import itertools as _itertools
from .. import _STANDARD_OPERATOR_NAMES
@@ -151,11 +152,11 @@ def __getattr__(name):
if name in _delayed:
func, kwargs = _delayed.pop(name)
- if type(kwargs["binaryop"]) is str:
+ if isinstance(kwargs["binaryop"], str):
from ..binary import from_string
kwargs["binaryop"] = from_string(kwargs["binaryop"])
- if type(kwargs["monoid"]) is str:
+ if isinstance(kwargs["monoid"], str):
from ..monoid import from_string
kwargs["monoid"] = from_string(kwargs["monoid"])
diff --git a/graphblas/semiring/ss.py b/graphblas/semiring/ss.py
index e45cbcda0..97852fc12 100644
--- a/graphblas/semiring/ss.py
+++ b/graphblas/semiring/ss.py
@@ -1,3 +1,5 @@
from ..core import operator
+_delayed = {}
+
del operator
diff --git a/graphblas/ss/__init__.py b/graphblas/ss/__init__.py
index b36bc1bdc..1f059771b 100644
--- a/graphblas/ss/__init__.py
+++ b/graphblas/ss/__init__.py
@@ -1 +1,7 @@
-from ._core import about, concat, config, diag
+from suitesparse_graphblas import burble
+
+from ._core import _IS_SSGB7, about, concat, config, diag
+
+if not _IS_SSGB7:
+ # Context was introduced in SuiteSparse:GraphBLAS 8.0
+ from ..core.ss.context import Context, global_context
diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py
index 441458a42..b42ea72b4 100644
--- a/graphblas/ss/_core.py
+++ b/graphblas/ss/_core.py
@@ -2,8 +2,10 @@
from ..core import ffi, lib
from ..core.base import _expect_type
+from ..core.descriptor import lookup as descriptor_lookup
from ..core.matrix import Matrix, TransposedMatrix
from ..core.scalar import _as_scalar
+from ..core.ss import _IS_SSGB7
from ..core.ss.config import BaseConfig
from ..core.ss.matrix import _concat_mn
from ..core.vector import Vector
@@ -12,7 +14,7 @@
class _graphblas_ss:
- """Used in `_expect_type`."""
+ """Used in ``_expect_type``."""
_graphblas_ss.__name__ = "graphblas.ss"
@@ -20,8 +22,7 @@ class _graphblas_ss:
def diag(x, k=0, dtype=None, *, name=None, **opts):
- """
- GxB_Matrix_diag, GxB_Vector_diag.
+ """GxB_Matrix_diag, GxB_Vector_diag.
Extract a diagonal Vector from a Matrix, or construct a diagonal Matrix
from a Vector. Unlike ``Matrix.diag`` and ``Vector.diag``, this function
@@ -33,8 +34,8 @@ def diag(x, k=0, dtype=None, *, name=None, **opts):
The Vector to assign to the diagonal, or the Matrix from which to
extract the diagonal.
k : int, default 0
- Diagonal in question. Use `k>0` for diagonals above the main diagonal,
- and `k<0` for diagonals below the main diagonal.
+ Diagonal in question. Use ``k>0`` for diagonals above the main diagonal,
+ and ``k<0`` for diagonals below the main diagonal.
See Also
--------
@@ -52,6 +53,9 @@ def diag(x, k=0, dtype=None, *, name=None, **opts):
dtype = x.dtype
typ = type(x)
if typ is Vector:
+ if opts:
+ # Ignore opts for now
+ desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context)
size = x._size + abs(k.value)
rv = Matrix(dtype, nrows=size, ncols=size, name=name)
rv.ss.build_diag(x, k)
@@ -66,14 +70,13 @@ def diag(x, k=0, dtype=None, *, name=None, **opts):
def concat(tiles, dtype=None, *, name=None, **opts):
- """
- GxB_Matrix_concat.
+ """GxB_Matrix_concat.
Concatenate a 2D list of Matrix objects into a new Matrix, or a 1D list of
Vector objects into a new Vector. To concatenate into existing objects,
- use ``Matrix.ss.concat`` or `Vector.ss.concat`.
+ use ``Matrix.ss.concat`` or ``Vector.ss.concat``.
- Vectors may be used as `Nx1` Matrix objects when creating a new Matrix.
+ Vectors may be used as ``Nx1`` Matrix objects when creating a new Matrix.
This performs the opposite operation as ``split``.
@@ -117,18 +120,65 @@ class GlobalConfig(BaseConfig):
Threshold that determines when to switch to bitmap format
nthreads : int
Maximum number of OpenMP threads to use
- memory_pool : List[int]
+ chunk : double
+ Control the number of threads used for small problems.
+ For example, ``nthreads = floor(work / chunk)``.
burble : bool
Enable diagnostic printing from SuiteSparse:GraphBLAS
- print_1based: bool
+ print_1based : bool
gpu_control : str, {"always", "never"}
+ Only available for SuiteSparse:GraphBLAS 7
+ **GPU support is a work in progress--not recommended to use**
gpu_chunk : double
+ Only available for SuiteSparse:GraphBLAS 7
+ **GPU support is a work in progress--not recommended to use**
+ gpu_id : int
+ Which GPU to use; default is -1, which means do not run on the GPU.
+ Only available for SuiteSparse:GraphBLAS >=8
+ **GPU support is a work in progress--not recommended to use**
+ jit_c_control : {"off", "pause", "run", "load", "on}
+ Control the CPU JIT:
+ "off" : do not use the JIT and free all JIT kernels if loaded
+ "pause" : do not run JIT kernels, but keep any loaded
+ "run" : run JIT kernels if already loaded, but don't load or compile
+ "load" : able to load and run JIT kernels; may not compile
+ "on" : full JIT: able to compile, load, and run
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_use_cmake : bool
+ Whether to use cmake to compile the JIT kernels.
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_c_compiler_name : str
+ C compiler for JIT kernels.
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_c_compiler_flags : str
+ Flags for the C compiler.
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_c_linker_flags : str
+ Link flags for the C compiler
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_c_libraries : str
+ Libraries to link against.
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_c_cmake_libs : str
+ Libraries to link against when cmake is used.
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_c_preface : str
+ C code as preface to JIT kernels.
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_error_log : str
+ Error log file.
+ Only available for SuiteSparse:GraphBLAS >=8
+ jit_cache_path : str
+ The folder with the compiled kernels.
+ Only available for SuiteSparse:GraphBLAS >=8
Setting values to None restores the default value for most configurations.
"""
_get_function = "GxB_Global_Option_get"
_set_function = "GxB_Global_Option_set"
+ if not _IS_SSGB7:
+ _context_keys = {"chunk", "gpu_id", "nthreads"}
_null_valid = {"bitmap_switch"}
_options = {
# Matrix/Vector format
@@ -139,14 +189,36 @@ class GlobalConfig(BaseConfig):
"nthreads": (lib.GxB_GLOBAL_NTHREADS, "int"),
"chunk": (lib.GxB_GLOBAL_CHUNK, "double"),
# Memory pool control
- "memory_pool": (lib.GxB_MEMORY_POOL, "int64_t[64]"),
+ # "memory_pool": (lib.GxB_MEMORY_POOL, "int64_t[64]"), # No longer used
# Diagnostics (skipping "printf" and "flush" for now)
"burble": (lib.GxB_BURBLE, "bool"),
"print_1based": (lib.GxB_PRINT_1BASED, "bool"),
- # CUDA GPU control
- "gpu_control": (lib.GxB_GLOBAL_GPU_CONTROL, "GrB_Desc_Value"),
- "gpu_chunk": (lib.GxB_GLOBAL_GPU_CHUNK, "double"),
}
+ if _IS_SSGB7:
+ _options.update(
+ {
+ "gpu_control": (lib.GxB_GLOBAL_GPU_CONTROL, "GrB_Desc_Value"),
+ "gpu_chunk": (lib.GxB_GLOBAL_GPU_CHUNK, "double"),
+ }
+ )
+ else:
+ _options.update(
+ {
+ # JIT control
+ "jit_c_control": (lib.GxB_JIT_C_CONTROL, "int"),
+ "jit_use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"),
+ "jit_c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"),
+ "jit_c_compiler_flags": (lib.GxB_JIT_C_COMPILER_FLAGS, "char*"),
+ "jit_c_linker_flags": (lib.GxB_JIT_C_LINKER_FLAGS, "char*"),
+ "jit_c_libraries": (lib.GxB_JIT_C_LIBRARIES, "char*"),
+ "jit_c_cmake_libs": (lib.GxB_JIT_C_CMAKE_LIBS, "char*"),
+ "jit_c_preface": (lib.GxB_JIT_C_PREFACE, "char*"),
+ "jit_error_log": (lib.GxB_JIT_ERROR_LOG, "char*"),
+ "jit_cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"),
+ # CUDA GPU control
+ "gpu_id": (lib.GxB_GLOBAL_GPU_ID, "int"),
+ }
+ )
# Values to restore defaults
_defaults = {
"hyper_switch": lib.GxB_HYPER_DEFAULT,
@@ -157,17 +229,28 @@ class GlobalConfig(BaseConfig):
"burble": 0,
"print_1based": 0,
}
+ if not _IS_SSGB7:
+ _defaults["gpu_id"] = -1 # -1 means no GPU
_enumerations = {
"format": {
"by_row": lib.GxB_BY_ROW,
"by_col": lib.GxB_BY_COL,
# "no_format": lib.GxB_NO_FORMAT, # Used by iterators; not valid here
},
- "gpu_control": {
+ }
+ if _IS_SSGB7:
+ _enumerations["gpu_control"] = {
"always": lib.GxB_GPU_ALWAYS,
"never": lib.GxB_GPU_NEVER,
- },
- }
+ }
+ else:
+ _enumerations["jit_c_control"] = {
+ "off": lib.GxB_JIT_OFF,
+ "pause": lib.GxB_JIT_PAUSE,
+ "run": lib.GxB_JIT_RUN,
+ "load": lib.GxB_JIT_LOAD,
+ "on": lib.GxB_JIT_ON,
+ }
class About(Mapping):
@@ -254,4 +337,10 @@ def __len__(self):
about = About()
-config = GlobalConfig()
+if _IS_SSGB7:
+ config = GlobalConfig()
+else:
+ # Context was introduced in SuiteSparse:GraphBLAS 8.0
+ from ..core.ss.context import global_context
+
+ config = GlobalConfig(context=global_context)
diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py
index a4df5d336..964325e0d 100644
--- a/graphblas/tests/conftest.py
+++ b/graphblas/tests/conftest.py
@@ -1,7 +1,9 @@
import atexit
+import contextlib
import functools
import itertools
import platform
+import sys
from pathlib import Path
import numpy as np
@@ -18,26 +20,31 @@
def pytest_configure(config):
rng = np.random.default_rng()
- randomly = config.getoption("--randomly", False)
+ randomly = config.getoption("--randomly", None)
+ if randomly is None: # pragma: no cover
+ options_unavailable = True
+ randomly = True
+ config.addinivalue_line("markers", "slow: Skipped unless --runslow passed")
+ else:
+ options_unavailable = False
backend = config.getoption("--backend", None)
if backend is None:
if randomly:
backend = "suitesparse" if rng.random() < 0.5 else "suitesparse-vanilla"
else:
backend = "suitesparse"
- blocking = config.getoption("--blocking", True)
+ blocking = config.getoption("--blocking", None)
if blocking is None: # pragma: no branch
blocking = rng.random() < 0.5 if randomly else True
record = config.getoption("--record", False)
if record is None: # pragma: no branch
record = rng.random() < 0.5 if randomly else False
- mapnumpy = config.getoption("--mapnumpy", False)
+ mapnumpy = config.getoption("--mapnumpy", None)
if mapnumpy is None:
mapnumpy = rng.random() < 0.5 if randomly else False
- runslow = config.getoption("--runslow", False)
+ runslow = config.getoption("--runslow", None)
if runslow is None:
- # Add a small amount of randomization to be safer
- runslow = rng.random() < 0.05 if randomly else False
+ runslow = options_unavailable
config.runslow = runslow
gb.config.set(autocompute=False, mapnumpy=mapnumpy)
@@ -62,9 +69,11 @@ def save_records():
for key in dir(gb.semiring)
if key != "ss"
and isinstance(
- getattr(gb.semiring, key)
- if key not in gb.semiring._deprecated
- else gb.semiring._deprecated[key],
+ (
+ getattr(gb.semiring, key)
+ if key not in gb.semiring._deprecated
+ else gb.semiring._deprecated[key]
+ ),
(gb.core.operator.Semiring, gb.core.operator.ParameterizedSemiring),
)
)
@@ -73,9 +82,11 @@ def save_records():
for key in dir(gb.binary)
if key != "ss"
and isinstance(
- getattr(gb.binary, key)
- if key not in gb.binary._deprecated
- else gb.binary._deprecated[key],
+ (
+ getattr(gb.binary, key)
+ if key not in gb.binary._deprecated
+ else gb.binary._deprecated[key]
+ ),
(gb.core.operator.BinaryOp, gb.core.operator.ParameterizedBinaryOp),
)
)
@@ -109,6 +120,27 @@ def ic(): # pragma: no cover (debug)
return icecream.ic
+@contextlib.contextmanager
+def burble(): # pragma: no cover (debug)
+ """Show the burble diagnostics within a context."""
+ if gb.backend != "suitesparse":
+ yield
+ return
+ prev = gb.ss.config["burble"]
+ gb.ss.config["burble"] = True
+ try:
+ yield
+ finally:
+ gb.ss.config["burble"] = prev
+
+
+@pytest.fixture(scope="session")
+def burble_all(): # pragma: no cover (debug)
+ """Show the burble diagnostics for the entire test."""
+ with burble():
+ yield burble
+
+
def autocompute(func):
@functools.wraps(func)
def inner(*args, **kwargs):
@@ -125,3 +157,10 @@ def compute(x):
def shouldhave(module, opname):
"""Whether an "operator" module should have the given operator."""
return supports_udfs or hasattr(module, opname)
+
+
+def dprint(*args, **kwargs): # pragma: no cover (debug)
+ """Print to stderr for debugging purposes."""
+ kwargs["file"] = sys.stderr
+ kwargs["flush"] = True
+ print(*args, **kwargs)
diff --git a/graphblas/tests/test_core.py b/graphblas/tests/test_core.py
index ae2051145..3586eb4a8 100644
--- a/graphblas/tests/test_core.py
+++ b/graphblas/tests/test_core.py
@@ -83,7 +83,14 @@ def test_packages():
if not pyproject.exists(): # pragma: no cover (safety)
pytest.skip("Did not find pyproject.toml")
with pyproject.open("rb") as f:
- pkgs2 = sorted(tomli.load(f)["tool"]["setuptools"]["packages"])
+ cfg = tomli.load(f)
+ if cfg.get("project", {}).get("name") != "python-graphblas": # pragma: no cover (safety)
+ pytest.skip("Did not find correct pyproject.toml")
+ pkgs2 = sorted(cfg["tool"]["setuptools"]["packages"])
assert (
pkgs == pkgs2
), "If there are extra items on the left, add them to pyproject.toml:tool.setuptools.packages"
+
+
+def test_index_max():
+ assert gb.MAX_SIZE == 2**60 # True for all current backends
diff --git a/graphblas/tests/test_descriptor.py b/graphblas/tests/test_descriptor.py
index 9209a8055..6ec9df36a 100644
--- a/graphblas/tests/test_descriptor.py
+++ b/graphblas/tests/test_descriptor.py
@@ -2,8 +2,7 @@
def test_caching():
- """
- Test that building a descriptor is actually caching rather than building
+ """Test that building a descriptor is actually caching rather than building
a new object for each call.
"""
tocr = descriptor.lookup(
diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py
index 66c19cce5..ecbca707f 100644
--- a/graphblas/tests/test_dtype.py
+++ b/graphblas/tests/test_dtype.py
@@ -7,8 +7,9 @@
import pytest
import graphblas as gb
-from graphblas import dtypes
+from graphblas import core, dtypes
from graphblas.core import lib
+from graphblas.core.utils import _NP2
from graphblas.dtypes import lookup_dtype
suitesparse = gb.backend == "suitesparse"
@@ -123,7 +124,7 @@ def test_dtype_bad_comparison():
def test_dtypes_match_numpy():
- for key, val in dtypes._registry.items():
+ for key, val in core.dtypes._registry.items():
try:
if key is int or (isinstance(key, str) and key == "int"):
# For win64, numpy treats int as int32, not int64
@@ -137,7 +138,7 @@ def test_dtypes_match_numpy():
def test_pickle():
- for val in dtypes._registry.values():
+ for val in core.dtypes._registry.values():
s = pickle.dumps(val)
val2 = pickle.loads(s)
if val._is_udt: # pragma: no cover
@@ -205,7 +206,7 @@ def test_auto_register():
def test_default_names():
- from graphblas.dtypes import _default_name
+ from graphblas.core.dtypes import _default_name
assert _default_name(np.dtype([("x", np.int32), ("y", np.float64)], align=True)) == (
"{'x': INT32, 'y': FP64}"
@@ -224,15 +225,22 @@ def test_record_dtype_from_dict():
def test_dtype_to_from_string():
types = [dtypes.BOOL, dtypes.FP64]
for c in string.ascii_letters:
+ if c == "T":
+ # See NEP 55 about StringDtype "T". Notably, this doesn't work:
+ # >>> np.dtype(np.dtype("T").str)
+ continue
+ if _NP2 and c == "a":
+ # Data type alias 'a' was deprecated in NumPy 2.0. Use the 'S' alias instead.
+ continue
try:
dtype = np.dtype(c)
types.append(dtype)
except Exception:
pass
for dtype in types:
- s = dtypes._dtype_to_string(dtype)
+ s = core.dtypes._dtype_to_string(dtype)
try:
- dtype2 = dtypes._string_to_dtype(s)
+ dtype2 = core.dtypes._string_to_dtype(s)
except Exception:
with pytest.raises(ValueError, match="Unknown dtype"):
lookup_dtype(dtype)
@@ -241,7 +249,7 @@ def test_dtype_to_from_string():
def test_has_complex():
- """Only SuiteSparse has complex (with Windows support in Python after v7.4.3.1)"""
+ """Only SuiteSparse has complex (with Windows support in Python after v7.4.3.1)."""
if not suitesparse:
assert not dtypes._supports_complex
return
@@ -253,3 +261,20 @@ def test_has_complex():
from packaging.version import parse
assert dtypes._supports_complex == (parse(ssgb.__version__) >= parse("7.4.3.1"))
+
+
+def test_has_ss_attribute():
+ if suitesparse:
+ assert dtypes.ss is not None
+ else:
+ with pytest.raises(AttributeError):
+ dtypes.ss
+
+
+def test_dir():
+ must_have = {"DataType", "lookup_dtype", "register_anonymous", "register_new", "ss", "unify"}
+ must_have.update({"FP32", "FP64", "INT8", "INT16", "INT32", "INT64"})
+ must_have.update({"BOOL", "UINT8", "UINT16", "UINT32", "UINT64"})
+ if dtypes._supports_complex:
+ must_have.update({"FC32", "FC64"})
+ assert set(dir(dtypes)) & must_have == must_have
diff --git a/graphblas/tests/test_infix.py b/graphblas/tests/test_infix.py
index 72e1c8a42..601f282a7 100644
--- a/graphblas/tests/test_infix.py
+++ b/graphblas/tests/test_infix.py
@@ -1,6 +1,6 @@
import pytest
-from graphblas import monoid, op
+from graphblas import binary, monoid, op
from graphblas.exceptions import DimensionMismatch
from .conftest import autocompute
@@ -346,7 +346,7 @@ def test_inplace_infix(s1, v1, v2, A1, A2):
@autocompute
def test_infix_expr_value_types():
- """Test bug where `infix_expr._value` was used as MatrixExpression or Matrix"""
+ """Test bug where `infix_expr._value` was used as MatrixExpression or Matrix."""
from graphblas.core.matrix import MatrixExpression
A = Matrix(int, 3, 3)
@@ -367,3 +367,415 @@ def test_infix_expr_value_types():
expr._value = None
assert expr._value is None
assert expr._expr._value is None
+
+
+def test_multi_infix_vector():
+ D0 = Vector.from_scalar(0, 3).diag()
+ v1 = Vector.from_coo([0, 1], [1, 2], size=3) # 1 2 .
+ v2 = Vector.from_coo([1, 2], [1, 2], size=3) # . 1 2
+ v3 = Vector.from_coo([2, 0], [1, 2], size=3) # 2 . 1
+ # ewise_add
+ result = binary.plus((v1 | v2) | v3).new()
+ expected = Vector.from_scalar(3, size=3)
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (v2 | v3)).new()
+ assert result.isequal(expected)
+ result = monoid.min(v1 | v2 | v3).new()
+ expected = Vector.from_scalar(1, size=3)
+ assert result.isequal(expected)
+ # ewise_mult
+ result = monoid.max((v1 & v2) & v3).new()
+ expected = Vector(int, size=3)
+ assert result.isequal(expected)
+ result = monoid.max(v1 & (v2 & v3)).new()
+ assert result.isequal(expected)
+ result = monoid.min((v1 & v2) & v1).new()
+ expected = Vector.from_coo([1], [1], size=3)
+ assert result.isequal(expected)
+ # ewise_union
+ result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10).new()
+ expected = Vector.from_scalar(13, size=3)
+ assert result.isequal(expected)
+ result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10.0).new()
+ expected = Vector.from_scalar(13.0, size=3)
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (v2 | v3), left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+ # inner
+ assert op.plus_plus(v1 @ v1).new().value == 6
+ assert op.plus_plus(v1 @ (v1 @ D0)).new().value == 6
+ assert op.plus_plus((D0 @ v1) @ v1).new().value == 6
+ # matrix-vector ewise_add
+ result = binary.plus((D0 | v1) | v2).new()
+ expected = binary.plus(binary.plus(D0 | v1).new() | v2).new()
+ assert result.isequal(expected)
+ result = binary.plus(D0 | (v1 | v2)).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 | v2) | D0).new()
+ assert result.isequal(expected.T)
+ result = binary.plus(v1 | (v2 | D0)).new()
+ assert result.isequal(expected.T)
+ # matrix-vector ewise_mult
+ result = binary.plus((D0 & v1) & v2).new()
+ expected = binary.plus(binary.plus(D0 & v1).new() & v2).new()
+ assert result.isequal(expected)
+ assert result.nvals > 0
+ result = binary.plus(D0 & (v1 & v2)).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 & v2) & D0).new()
+ assert result.isequal(expected.T)
+ result = binary.plus(v1 & (v2 & D0)).new()
+ assert result.isequal(expected.T)
+ # matrix-vector ewise_union
+ kwargs = {"left_default": 10, "right_default": 20}
+ result = binary.plus((D0 | v1) | v2, **kwargs).new()
+ expected = binary.plus(binary.plus(D0 | v1, **kwargs).new() | v2, **kwargs).new()
+ assert result.isequal(expected)
+ result = binary.plus(D0 | (v1 | v2), **kwargs).new()
+ expected = binary.plus(D0 | binary.plus(v1 | v2, **kwargs).new(), **kwargs).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 | v2) | D0, **kwargs).new()
+ expected = binary.plus(binary.plus(v1 | v2, **kwargs).new() | D0, **kwargs).new()
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (v2 | D0), **kwargs).new()
+ expected = binary.plus(v1 | binary.plus(v2 | D0, **kwargs).new(), **kwargs).new()
+ assert result.isequal(expected)
+ # vxm, mxv
+ result = op.plus_plus((D0 @ v1) @ D0).new()
+ assert result.isequal(v1)
+ result = op.plus_plus(D0 @ (v1 @ D0)).new()
+ assert result.isequal(v1)
+ result = op.plus_plus(v1 @ (D0 @ D0)).new()
+ assert result.isequal(v1)
+ result = op.plus_plus((D0 @ D0) @ v1).new()
+ assert result.isequal(v1)
+ result = op.plus_plus((v1 @ D0) @ D0).new()
+ assert result.isequal(v1)
+ result = op.plus_plus(D0 @ (D0 @ v1)).new()
+ assert result.isequal(v1)
+
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | v3
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2).__ror__(v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | (v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1 | (v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1.__ror__(v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) | (v2 & v3)
+
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1 & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1.__rand__(v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & v3
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2).__rand__(v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & (v2 & v3)
+
+ # We differentiate between infix and methods
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_add(v2 & v3)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 & v2).ewise_add(v3)
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_mult(v2 | v3)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 | v2).ewise_mult(v3)
+
+
+@autocompute
+def test_multi_infix_vector_auto():
+ v1 = Vector.from_coo([0, 1], [1, 2], size=3) # 1 2 .
+ v2 = Vector.from_coo([1, 2], [1, 2], size=3) # . 1 2
+ v3 = Vector.from_coo([2, 0], [1, 2], size=3) # 2 . 1
+ # We differentiate between infix and methods
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_add(v2 & v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 & v2).ewise_add(v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_mult(v2 | v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 | v2).ewise_mult(v3)
+
+
+def test_multi_infix_matrix():
+ # Adapted from test_multi_infix_vector
+ D0 = Vector.from_scalar(0, 3).diag()
+ v1 = Matrix.from_coo([0, 1], [0, 0], [1, 2], nrows=3) # 1 2 .
+ v2 = Matrix.from_coo([1, 2], [0, 0], [1, 2], nrows=3) # . 1 2
+ v3 = Matrix.from_coo([2, 0], [0, 0], [1, 2], nrows=3) # 2 . 1
+ # ewise_add
+ result = binary.plus((v1 | v2) | v3).new()
+ expected = Matrix.from_scalar(3, 3, 1)
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (v2 | v3)).new()
+ assert result.isequal(expected)
+ result = monoid.min(v1 | v2 | v3).new()
+ expected = Matrix.from_scalar(1, 3, 1)
+ assert result.isequal(expected)
+ result = binary.plus(v1 | v1 | v1 | v1 | v1).new()
+ expected = (5 * v1).new()
+ assert result.isequal(expected)
+ # ewise_mult
+ result = monoid.max((v1 & v2) & v3).new()
+ expected = Matrix(int, 3, 1)
+ assert result.isequal(expected)
+ result = monoid.max(v1 & (v2 & v3)).new()
+ assert result.isequal(expected)
+ result = monoid.min((v1 & v2) & v1).new()
+ expected = Matrix.from_coo([1], [0], [1], nrows=3)
+ assert result.isequal(expected)
+ result = binary.plus(v1 & v1 & v1 & v1 & v1).new()
+ expected = (5 * v1).new()
+ assert result.isequal(expected)
+ # ewise_union
+ result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10).new()
+ expected = Matrix.from_scalar(13, 3, 1)
+ assert result.isequal(expected)
+ result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10.0).new()
+ expected = Matrix.from_scalar(13.0, 3, 1)
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (v2 | v3), left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+ # mxm
+ assert op.plus_plus(v1.T @ v1).new()[0, 0].new().value == 6
+ assert op.plus_plus(v1 @ (v1.T @ D0)).new()[0, 0].new().value == 2
+ assert op.plus_plus((v1.T @ D0) @ v1).new()[0, 0].new().value == 6
+ assert op.plus_plus(D0 @ D0 @ D0 @ D0 @ D0).new().isequal(D0)
+
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | v3
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2).__ror__(v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | (v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1 | (v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1.__ror__(v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) | (v2 & v3)
+
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1 & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1.__rand__(v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & v3
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2).__rand__(v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & (v2 & v3)
+
+ # We differentiate between infix and methods
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_add(v2 & v3)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 & v2).ewise_add(v3)
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_mult(v2 | v3)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 | v2).ewise_mult(v3)
+
+
+@autocompute
+def test_multi_infix_matrix_auto():
+ v1 = Matrix.from_coo([0, 1], [0, 0], [1, 2], nrows=3) # 1 2 .
+ v2 = Matrix.from_coo([1, 2], [0, 0], [1, 2], nrows=3) # . 1 2
+ v3 = Matrix.from_coo([2, 0], [0, 0], [1, 2], nrows=3) # 2 . 1
+ # We differentiate between infix and methods
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_add(v2 & v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 & v2).ewise_add(v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_mult(v2 | v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 | v2).ewise_mult(v3)
+
+
+def test_multi_infix_scalar():
+ # Adapted from test_multi_infix_vector
+ v1 = Scalar.from_value(1)
+ v2 = Scalar.from_value(2)
+ v3 = Scalar(int)
+ # ewise_add
+ result = binary.plus((v1 | v2) | v3).new()
+ expected = 3
+ assert result.isequal(expected)
+ result = binary.plus((1 | v2) | v3).new()
+ assert result.isequal(expected)
+ result = binary.plus((1 | v2) | 0).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 | 2) | v3).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 | 2) | 0).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 | v2) | 0).new()
+ assert result.isequal(expected)
+
+ result = binary.plus(v1 | (v2 | v3)).new()
+ assert result.isequal(expected)
+ result = binary.plus(1 | (v2 | v3)).new()
+ assert result.isequal(expected)
+ result = binary.plus(1 | (2 | v3)).new()
+ assert result.isequal(expected)
+ result = binary.plus(1 | (v2 | 0)).new()
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (2 | v3)).new()
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (v2 | 0)).new()
+ assert result.isequal(expected)
+
+ result = monoid.min(v1 | v2 | v3).new()
+ expected = 1
+ assert result.isequal(expected)
+ # ewise_mult
+ result = monoid.max((v1 & v2) & v3).new()
+ expected = None
+ assert result.isequal(expected)
+ result = monoid.max(v1 & (v2 & v3)).new()
+ assert result.isequal(expected)
+ result = monoid.min((v1 & v2) & v1).new()
+ expected = 1
+ assert result.isequal(expected)
+
+ result = monoid.min((1 & v2) & v1).new()
+ assert result.isequal(expected)
+ result = monoid.min((1 & v2) & 1).new()
+ assert result.isequal(expected)
+ result = monoid.min((v1 & 2) & v1).new()
+ assert result.isequal(expected)
+ result = monoid.min((v1 & 2) & 1).new()
+ assert result.isequal(expected)
+ result = monoid.min((v1 & v2) & 1).new()
+ assert result.isequal(expected)
+
+ result = monoid.min(1 & (v2 & v1)).new()
+ assert result.isequal(expected)
+ result = monoid.min(1 & (2 & v1)).new()
+ assert result.isequal(expected)
+ result = monoid.min(1 & (v2 & 1)).new()
+ assert result.isequal(expected)
+ result = monoid.min(v1 & (2 & v1)).new()
+ assert result.isequal(expected)
+ result = monoid.min(v1 & (v2 & 1)).new()
+ assert result.isequal(expected)
+
+ # ewise_union
+ result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10).new()
+ expected = 13
+ assert result.isequal(expected)
+ result = binary.plus((1 | v2) | v3, left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 | 2) | v3, left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+ result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10.0).new()
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (v2 | v3), left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+ result = binary.plus(1 | (v2 | v3), left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+ result = binary.plus(1 | (2 | v3), left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+ result = binary.plus(v1 | (2 | v3), left_default=10, right_default=10).new()
+ assert result.isequal(expected)
+
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | v3
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2).__ror__(v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | (v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) | (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1 | (v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1.__ror__(v2 & v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) | (v2 & v3)
+
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1 & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ v1.__rand__(v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 & v2) & (v2 | v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & v3
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2).__rand__(v3)
+ with pytest.raises(TypeError, match="XXX"): # TODO
+ (v1 | v2) & (v2 & v3)
+
+ # We differentiate between infix and methods
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_add(v2 & v3)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 & v2).ewise_add(v3)
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="to automatically compute"):
+ v1.ewise_mult(v2 | v3)
+ with pytest.raises(TypeError, match="Automatic computation"):
+ (v1 | v2).ewise_mult(v3)
+
+
+@autocompute
+def test_multi_infix_scalar_auto():
+ v1 = Scalar.from_value(1)
+ v2 = Scalar.from_value(2)
+ v3 = Scalar(int)
+ # We differentiate between infix and methods
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_add(v2 & v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 & v2).ewise_add(v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ v1.ewise_mult(v2 | v3)
+ with pytest.raises(TypeError, match="only valid for BOOL"):
+ (v1 | v2).ewise_mult(v3)
diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py
index 24df55e9d..7e786f0da 100644
--- a/graphblas/tests/test_io.py
+++ b/graphblas/tests/test_io.py
@@ -38,17 +38,6 @@
suitesparse = gb.backend == "suitesparse"
-@pytest.mark.skipif("not ss")
-def test_deprecated():
- a = np.array([0.0, 2.0, 4.1])
- with pytest.warns(DeprecationWarning):
- v = gb.io.from_numpy(a)
- assert v.isequal(gb.Vector.from_coo([1, 2], [2.0, 4.1]), check_dtype=True)
- with pytest.warns(DeprecationWarning):
- a2 = gb.io.to_numpy(v)
- np.testing.assert_array_equal(a, a2)
-
-
@pytest.mark.skipif("not ss")
def test_vector_to_from_numpy():
a = np.array([0.0, 2.0, 4.1])
@@ -59,18 +48,24 @@ def test_vector_to_from_numpy():
csr = gb.io.to_scipy_sparse(v, "csr")
assert csr.nnz == 2
- assert ss.isspmatrix_csr(csr)
+ # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csr`
+ assert isinstance(csr, getattr(ss, "sparray", ss.spmatrix))
+ assert csr.format == "csr"
np.testing.assert_array_equal(csr.toarray(), np.array([[0.0, 2.0, 4.1]]))
csc = gb.io.to_scipy_sparse(v, "csc")
assert csc.nnz == 2
- assert ss.isspmatrix_csc(csc)
+ # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csc`
+ assert isinstance(csc, getattr(ss, "sparray", ss.spmatrix))
+ assert csc.format == "csc"
np.testing.assert_array_equal(csc.toarray(), np.array([[0.0, 2.0, 4.1]]).T)
# default to csr-like
coo = gb.io.to_scipy_sparse(v, "coo")
assert coo.shape == csr.shape
- assert ss.isspmatrix_coo(coo)
+ # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_coo`
+ assert isinstance(coo, getattr(ss, "sparray", ss.spmatrix))
+ assert coo.format == "coo"
assert coo.nnz == 2
np.testing.assert_array_equal(coo.toarray(), np.array([[0.0, 2.0, 4.1]]))
@@ -99,7 +94,9 @@ def test_matrix_to_from_numpy():
for format in ["csr", "csc", "coo"]:
sparse = gb.io.to_scipy_sparse(M, format)
- assert getattr(ss, f"isspmatrix_{format}")(sparse)
+ # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csr`
+ assert isinstance(sparse, getattr(ss, "sparray", ss.spmatrix))
+ assert sparse.format == format
assert sparse.nnz == 3
np.testing.assert_array_equal(sparse.toarray(), a)
M2 = gb.io.from_scipy_sparse(sparse)
@@ -149,7 +146,7 @@ def test_matrix_to_from_networkx():
M = gb.io.from_networkx(G, nodelist=range(7))
if suitesparse:
assert M.ss.is_iso
- rows, cols = zip(*edges)
+ rows, cols = zip(*edges, strict=True)
expected = gb.Matrix.from_coo(rows, cols, 1)
assert expected.isequal(M)
# Test empty
@@ -167,7 +164,11 @@ def test_matrix_to_from_networkx():
def test_mmread_mmwrite(engine):
if engine == "fmm" and fmm is None: # pragma: no cover (import)
pytest.skip("needs fast_matrix_market")
- from scipy.io.tests import test_mmio
+ try:
+ from scipy.io.tests import test_mmio
+ except ImportError:
+ # Test files are mysteriously missing from some conda-forge builds
+ pytest.skip("scipy.io.tests.test_mmio unavailable :(")
p31 = 2**31
p63 = 2**63
@@ -365,6 +366,7 @@ def test_scipy_sparse():
@pytest.mark.skipif("not ak")
+@pytest.mark.xfail(np.__version__[:5] in {"1.25.", "1.26."}, reason="awkward bug with numpy >=1.25")
def test_awkward_roundtrip():
# Vector
v = gb.Vector.from_coo([1, 3, 5], [20, 21, -5], size=22)
@@ -386,6 +388,7 @@ def test_awkward_roundtrip():
@pytest.mark.skipif("not ak")
+@pytest.mark.xfail(np.__version__[:5] in {"1.25.", "1.26."}, reason="awkward bug with numpy >=1.25")
def test_awkward_iso_roundtrip():
# Vector
v = gb.Vector.from_coo([1, 3, 5], [20, 20, 20], size=22)
@@ -429,6 +432,7 @@ def test_awkward_errors():
@pytest.mark.skipif("not sparse")
+@pytest.mark.slow
def test_vector_to_from_pydata_sparse():
coords = np.array([0, 1, 2, 3, 4], dtype="int64")
data = np.array([10, 20, 30, 40, 50], dtype="int64")
@@ -442,6 +446,7 @@ def test_vector_to_from_pydata_sparse():
@pytest.mark.skipif("not sparse")
+@pytest.mark.slow
def test_matrix_to_from_pydata_sparse():
coords = np.array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], dtype="int64")
data = np.array([10, 20, 30, 40, 50], dtype="int64")
diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py
index 26017f364..24f0e73d7 100644
--- a/graphblas/tests/test_matrix.py
+++ b/graphblas/tests/test_matrix.py
@@ -2603,12 +2603,14 @@ def test_iter(A):
zip(
[3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1],
[0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6],
+ strict=True,
)
)
assert set(A.T) == set(
zip(
[0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6],
[3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1],
+ strict=True,
)
)
@@ -2731,8 +2733,8 @@ def test_ss_split(A):
for results in [A.ss.split([4, 3]), A.ss.split([[4, None], 3], name="split")]:
row_boundaries = [0, 4, 7]
col_boundaries = [0, 3, 6, 7]
- for i, (i1, i2) in enumerate(zip(row_boundaries[:-1], row_boundaries[1:])):
- for j, (j1, j2) in enumerate(zip(col_boundaries[:-1], col_boundaries[1:])):
+ for i, (i1, i2) in enumerate(itertools.pairwise(row_boundaries)):
+ for j, (j1, j2) in enumerate(itertools.pairwise(col_boundaries)):
expected = A[i1:i2, j1:j2].new()
assert expected.isequal(results[i][j])
with pytest.raises(DimensionMismatch):
@@ -2805,6 +2807,8 @@ def test_ss_nbytes(A):
@autocompute
def test_auto(A, v):
+ from graphblas.core.infix import MatrixEwiseMultExpr
+
expected = binary.land[bool](A & A).new()
B = A.dup(dtype=bool)
for expr in [(B & B), binary.land[bool](A & A)]:
@@ -2827,14 +2831,26 @@ def test_auto(A, v):
"__and__",
"__or__",
# "kronecker",
+ "__rand__",
+ "__ror__",
]:
+ # print(type(expr).__name__, method)
val1 = getattr(expected, method)(expected).new()
- val2 = getattr(expected, method)(expr)
- val3 = getattr(expr, method)(expected)
- val4 = getattr(expr, method)(expr)
- assert val1.isequal(val2)
- assert val1.isequal(val3)
- assert val1.isequal(val4)
+ if method in {"__or__", "__ror__"} and type(expr) is MatrixEwiseMultExpr:
+ # Doing e.g. `plus(A & B | C)` isn't allowed--make user be explicit
+ with pytest.raises(TypeError):
+ val2 = getattr(expected, method)(expr)
+ with pytest.raises(TypeError):
+ val3 = getattr(expr, method)(expected)
+ with pytest.raises(TypeError):
+ val4 = getattr(expr, method)(expr)
+ else:
+ val2 = getattr(expected, method)(expr)
+ assert val1.isequal(val2)
+ val3 = getattr(expr, method)(expected)
+ assert val1.isequal(val3)
+ val4 = getattr(expr, method)(expr)
+ assert val1.isequal(val4)
for method in ["reduce_rowwise", "reduce_columnwise", "reduce_scalar"]:
s1 = getattr(expected, method)(monoid.lor).new()
s2 = getattr(expr, method)(monoid.lor)
@@ -2938,11 +2954,11 @@ def test_expr_is_like_matrix(A):
"from_dicts",
"from_edgelist",
"from_scalar",
- "from_values",
"resize",
+ "setdiag",
"update",
}
- ignore = {"__sizeof__"}
+ ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_mxm", "_mxv"}
assert attrs - expr_attrs - ignore == expected, (
"If you see this message, you probably added a method to Matrix. You may need to "
"add an entry to `matrix` or `matrix_vector` set in `graphblas.core.automethods` "
@@ -3002,11 +3018,11 @@ def test_index_expr_is_like_matrix(A):
"from_dense",
"from_dicts",
"from_edgelist",
- "from_values",
"from_scalar",
"resize",
+ "setdiag",
}
- ignore = {"__sizeof__"}
+ ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_mxm", "_mxv"}
assert attrs - expr_attrs - ignore == expected, (
"If you see this message, you probably added a method to Matrix. You may need to "
"add an entry to `matrix` or `matrix_vector` set in `graphblas.core.automethods` "
@@ -3054,7 +3070,7 @@ def test_ss_flatten(A):
[3, 2, 3, 1, 5, 3, 7, 8, 3, 1, 7, 4],
]
# row-wise
- indices = [row * A.ncols + col for row, col in zip(data[0], data[1])]
+ indices = [row * A.ncols + col for row, col in zip(data[0], data[1], strict=True)]
expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols)
for fmt in ["csr", "hypercsr", "bitmapr"]:
B = Matrix.ss.import_any(**A.ss.export(format=fmt))
@@ -3073,7 +3089,7 @@ def test_ss_flatten(A):
assert C.isequal(B)
# column-wise
- indices = [col * A.nrows + row for row, col in zip(data[0], data[1])]
+ indices = [col * A.nrows + row for row, col in zip(data[0], data[1], strict=True)]
expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols)
for fmt in ["csc", "hypercsc", "bitmapc"]:
B = Matrix.ss.import_any(**A.ss.export(format=fmt))
@@ -3136,6 +3152,10 @@ def test_ss_reshape(A):
def test_autocompute_argument_messages(A, v):
with pytest.raises(TypeError, match="autocompute"):
A.ewise_mult(A & A)
+ with pytest.raises(TypeError, match="autocompute"):
+ A.ewise_mult(binary.plus(A & A))
+ with pytest.raises(TypeError, match="autocompute"):
+ A.ewise_mult(A + A)
with pytest.raises(TypeError, match="autocompute"):
A.mxv(A @ v)
@@ -3537,28 +3557,6 @@ def compare(A, expected, isequal=True, **kwargs):
A.ss.compactify("bad_how")
-def test_deprecated(A):
- if suitesparse:
- with pytest.warns(DeprecationWarning):
- A.ss.compactify_rowwise()
- with pytest.warns(DeprecationWarning):
- A.ss.compactify_columnwise()
- with pytest.warns(DeprecationWarning):
- A.ss.scan_rowwise()
- with pytest.warns(DeprecationWarning):
- A.ss.scan_columnwise()
- with pytest.warns(DeprecationWarning):
- A.ss.selectk_rowwise("first", 3)
- with pytest.warns(DeprecationWarning):
- A.ss.selectk_columnwise("first", 3)
- with pytest.warns(DeprecationWarning):
- A.to_values()
- with pytest.warns(DeprecationWarning):
- A.T.to_values()
- with pytest.warns(DeprecationWarning):
- A.from_values([1], [2], [3])
-
-
def test_ndim(A):
assert A.ndim == 2
assert A.ewise_mult(A).ndim == 2
@@ -3630,9 +3628,9 @@ def test_ss_iteration(A):
assert not list(B.ss.itervalues())
assert not list(B.ss.iteritems())
rows, columns, values = A.to_coo()
- assert sorted(zip(rows, columns)) == sorted(A.ss.iterkeys())
+ assert sorted(zip(rows, columns, strict=True)) == sorted(A.ss.iterkeys())
assert sorted(values) == sorted(A.ss.itervalues())
- assert sorted(zip(rows, columns, values)) == sorted(A.ss.iteritems())
+ assert sorted(zip(rows, columns, values, strict=True)) == sorted(A.ss.iteritems())
N = rows.size
A = Matrix.ss.import_bitmapr(**A.ss.export("bitmapr"))
@@ -3891,7 +3889,7 @@ def test_get(A):
assert compute(A.T.get(0, 1)) is None
assert A.T.get(1, 0) == 2
assert A.get(0, 1, "mittens") == 2
- assert type(compute(A.get(0, 1))) is int
+ assert isinstance(compute(A.get(0, 1)), int)
with pytest.raises(ValueError, match="Bad row, col"):
# Not yet supported
A.get(0, [0, 1])
@@ -4076,10 +4074,11 @@ def test_ss_pack_hyperhash(A):
Y = C.ss.unpack_hyperhash()
Y = C.ss.unpack_hyperhash(compute=True)
assert C.ss.unpack_hyperhash() is None
- assert Y.nrows == C.nrows
- C.ss.pack_hyperhash(Y)
- assert Y.gb_obj[0] == gb.core.NULL
- assert C.ss.unpack_hyperhash() is not None
+ if Y is not None: # hyperhash may or may not be computed
+ assert Y.nrows == C.nrows
+ C.ss.pack_hyperhash(Y)
+ assert Y.gb_obj[0] == gb.core.NULL
+ assert C.ss.unpack_hyperhash() is not None # May or may not be computed
def test_to_dicts_from_dicts(A):
@@ -4298,7 +4297,7 @@ def test_ss_descriptors(A):
A(nthreads=4, axb_method="dot", sort=True) << A @ A
assert A.isequal(C2)
# Bad option should show list of valid options
- with pytest.raises(ValueError, match="nthreads"):
+ with pytest.raises(ValueError, match="axb_method"):
C1(bad_opt=True) << A
with pytest.raises(ValueError, match="Duplicate descriptor"):
(A @ A).new(nthreads=4, Nthreads=5)
@@ -4375,3 +4374,174 @@ def test_subarray_dtypes():
if suitesparse:
Full2 = Matrix.ss.import_fullr(b2)
assert Full1.isequal(Full2, check_dtype=True)
+
+
+def test_power(A):
+ expected = A.dup()
+ for i in range(1, 50):
+ result = A.power(i).new()
+ assert result.isequal(expected)
+ expected << A @ expected
+ # Test transpose
+ expected = A.T.new()
+ for i in range(1, 10):
+ result = A.T.power(i).new()
+ assert result.isequal(expected)
+ expected << A.T @ expected
+ # Test other semiring
+ expected = A.dup()
+ for i in range(1, 10):
+ result = A.power(i, semiring.min_plus).new()
+ assert result.isequal(expected)
+ expected << semiring.min_plus(A @ expected)
+ # n == 0
+ result = A.power(0).new()
+ expected = Vector.from_scalar(1, A.nrows, A.dtype).diag()
+ assert result.isequal(expected)
+ result = A.power(0, semiring.plus_min).new()
+ identity = semiring.plus_min[A.dtype].binaryop.monoid.identity
+ assert identity != 1
+ expected = Vector.from_scalar(identity, A.nrows, A.dtype).diag()
+ assert result.isequal(expected)
+ # Exceptional
+ with pytest.raises(TypeError, match="must be a nonnegative integer"):
+ A.power(1.5)
+ with pytest.raises(ValueError, match="must be a nonnegative integer"):
+ A.power(-1)
+ with pytest.raises(ValueError, match="binaryop must be associated with a monoid"):
+ A.power(0, semiring.min_first)
+ B = A[:2, :3].new()
+ with pytest.raises(DimensionMismatch):
+ B.power(2)
+
+
+def test_setdiag():
+ A = Matrix(int, 2, 3)
+ A.setdiag(1)
+ expected = Matrix(int, 2, 3)
+ expected[0, 0] = 1
+ expected[1, 1] = 1
+ assert A.isequal(expected)
+ A.setdiag(Scalar.from_value(2), 2)
+ expected[0, 2] = 2
+ assert A.isequal(expected)
+ A.setdiag(3, k=-1)
+ expected[1, 0] = 3
+ assert A.isequal(expected)
+ # List (or array) is treated as dense
+ A.setdiag([10, 20], 1)
+ expected[0, 1] = 10
+ expected[1, 2] = 20
+ assert A.isequal(expected)
+ # Size 0 diagonals, which does not set anything.
+ # This could be valid (esp. given a size 0 vector), but let's raise for now.
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(-1, 3)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(-1, -2)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag([], 3)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(Vector(int, 0), -2)
+ # Now we're definitely out of bounds
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(-1, 4)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(-1, -3)
+ with pytest.raises(TypeError, match="k must be an integer"):
+ A.setdiag(-1, 0.5)
+ with pytest.raises(TypeError, match="Bad type for argument `values` in Matrix.setdiag"):
+ A.setdiag(object())
+ with pytest.raises(DimensionMismatch, match="Dimensions not compatible"):
+ A.setdiag([10, 20, 30], 1)
+ with pytest.raises(DimensionMismatch, match="Dimensions not compatible"):
+ A.setdiag([10], 1)
+
+ # Special care for dimensions of length 0
+ A = Matrix(int, 0, 2, name="A")
+ A.setdiag(0, 0)
+ A.setdiag(0, 1)
+ A.setdiag([], 0)
+ A.setdiag([], 1)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(0, -1)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag([], -1)
+ A = Matrix(int, 2, 0, name="A")
+ A.setdiag(0, 0)
+ A.setdiag(0, -1)
+ A.setdiag([], 0)
+ A.setdiag([], -1)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(0, 1)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag([], 1)
+ A = Matrix(int, 0, 0, name="A")
+ A.setdiag(0, 0)
+ A.setdiag([], 0)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(0, 1)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag([], 1)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag(0, -1)
+ with pytest.raises(IndexError, match="diagonal is out of range"):
+ A.setdiag([], -1)
+
+ A = Matrix(int, 2, 2, name="A")
+ expected = Matrix(int, 2, 2, name="expected")
+ v = Vector(int, 2, name="v")
+ Vector(int, 2)
+ v[0] = 1
+ A.setdiag(v)
+ expected[0, 0] = 1
+ assert A.isequal(expected)
+ A.setdiag(v, accum=binary.plus)
+ expected[0, 0] = 2
+ assert A.isequal(expected)
+ A.setdiag(10, mask=v.S)
+ expected[0, 0] = 10
+ assert A.isequal(expected)
+ A.setdiag(10, mask=v.S, accum="+")
+ expected[0, 0] = 20
+ assert A.isequal(expected)
+ # Allow mask to be a matrix
+ A.setdiag(10, mask=A.S, accum="+")
+ expected[0, 0] = 30
+ assert A.isequal(expected)
+ # Test how to clear or not clear missing elements
+ A.clear()
+ A.setdiag(99)
+ A.setdiag(v)
+ expected[0, 0] = 1
+ assert A.isequal(expected)
+ A.setdiag(99)
+ A.setdiag(v, accum="second")
+ expected[1, 1] = 99
+ assert A.isequal(expected)
+ A.setdiag(99)
+ A.setdiag(v, mask=v.S)
+ assert A.isequal(expected)
+
+ # We handle complemented masks!
+ A.clear()
+ expected.clear()
+ A.setdiag(42, mask=~v.S)
+ expected[1, 1] = 42
+ assert A.isequal(expected)
+ A.setdiag(7, mask=~A.V)
+ expected[0, 0] = 7
+ assert A.isequal(expected)
+
+ with pytest.raises(DimensionMismatch, match="Matrix mask in setdiag is the wrong "):
+ A.setdiag(9, mask=Matrix(int, 3, 3).S)
+ with pytest.raises(DimensionMismatch, match="Vector mask in setdiag is the wrong "):
+ A.setdiag(10, mask=Vector(int, 3).S)
+
+ A.clear()
+ A.resize(2, 3)
+ expected.clear()
+ expected.resize(2, 3)
+ A.setdiag(30, mask=v.S)
+ expected[0, 0] = 30
+ assert A.isequal(expected)
diff --git a/graphblas/tests/test_numpyops.py b/graphblas/tests/test_numpyops.py
index 25c52d7fd..999c6d5e0 100644
--- a/graphblas/tests/test_numpyops.py
+++ b/graphblas/tests/test_numpyops.py
@@ -5,6 +5,7 @@
import numpy as np
import pytest
+from packaging.version import parse
import graphblas as gb
import graphblas.binary.numpy as npbinary
@@ -112,6 +113,15 @@ def test_npunary():
match(accum=gb.binary.lor) << gb_result.apply(npunary.isnan)
compare = match.reduce(gb.monoid.land).new()
if not compare: # pragma: no cover (debug)
+ import numba
+
+ if (
+ unary_name in {"sign"}
+ and np.__version__.startswith("2.")
+ and parse(numba.__version__) < parse("0.61.0")
+ ):
+ # numba <0.61.0 does not match numpy 2.0
+ continue
print(unary_name, gb_input.dtype)
print(compute(gb_result))
print(np_result)
diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py
index c9a176afd..41fae80ae 100644
--- a/graphblas/tests/test_op.py
+++ b/graphblas/tests/test_op.py
@@ -19,7 +19,15 @@
)
from graphblas.core import _supports_udfs as supports_udfs
from graphblas.core import lib, operator
-from graphblas.core.operator import BinaryOp, IndexUnaryOp, Monoid, Semiring, UnaryOp, get_semiring
+from graphblas.core.operator import (
+ BinaryOp,
+ IndexUnaryOp,
+ Monoid,
+ SelectOp,
+ Semiring,
+ UnaryOp,
+ get_semiring,
+)
from graphblas.dtypes import (
BOOL,
FP32,
@@ -225,7 +233,7 @@ def plus_one(x):
UnaryOp.register_new("bad", object())
assert not hasattr(unary, "bad")
with pytest.raises(UdfParseError, match="Unable to parse function using Numba"):
- UnaryOp.register_new("bad", lambda x: v)
+ UnaryOp.register_new("bad", lambda x: v) # pragma: no branch (numba)
@pytest.mark.skipif("not supports_udfs")
@@ -1006,7 +1014,7 @@ def myplus(x, y):
def test_create_semiring():
# stress test / sanity check
- monoid_names = {x for x in dir(monoid) if not x.startswith("_")}
+ monoid_names = {x for x in dir(monoid) if not x.startswith("_") and x != "ss"}
binary_names = {x for x in dir(binary) if not x.startswith("_") and x != "ss"}
for monoid_name, binary_name in itertools.product(monoid_names, binary_names):
cur_monoid = getattr(monoid, monoid_name)
@@ -1336,6 +1344,19 @@ def badfunc2(x, y): # pragma: no cover (numba)
assert binary.first[udt, dtypes.INT8].type2 is dtypes.INT8
assert monoid.any[udt].type2 is udt
+ def _this_or_that(val, idx, _, thunk): # pragma: no cover (numba)
+ return val["x"]
+
+ sel = SelectOp.register_anonymous(_this_or_that, is_udt=True)
+ sel[udt]
+ assert udt in sel
+ result = v.select(sel, 0).new()
+ assert result.nvals == 0
+ assert result.dtype == v.dtype
+ result = w.select(sel, 0).new()
+ assert result.nvals == 3
+ assert result.isequal(w)
+
def test_dir():
for mod in [unary, binary, monoid, semiring, op]:
@@ -1429,10 +1450,9 @@ def test_deprecated():
gb.op.secondj
with pytest.warns(DeprecationWarning, match="please use"):
gb.agg.argmin
- with pytest.warns(DeprecationWarning, match="please use"):
- import graphblas.core.agg # noqa: F401
+@pytest.mark.slow
def test_is_idempotent():
assert monoid.min.is_idempotent
assert monoid.max[int].is_idempotent
@@ -1446,3 +1466,14 @@ def test_is_idempotent():
assert not monoid.numpy.equal.is_idempotent
with pytest.raises(AttributeError):
binary.min.is_idempotent
+
+
+def test_ops_have_ss():
+ modules = [unary, binary, monoid, semiring, indexunary, select, op]
+ if suitesparse:
+ for mod in modules:
+ assert mod.ss is not None
+ else:
+ for mod in modules:
+ with pytest.raises(AttributeError):
+ mod.ss
diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py
index 7b7c77177..e93511914 100644
--- a/graphblas/tests/test_scalar.py
+++ b/graphblas/tests/test_scalar.py
@@ -50,7 +50,7 @@ def test_dup(s):
s_empty = Scalar(dtypes.FP64)
s_unempty = Scalar.from_value(0.0)
if s_empty.is_cscalar:
- # NumPy wraps around
+ # NumPy <2 wraps around; >=2 raises OverflowError
uint_data = [
("UINT8", 2**8 - 2),
("UINT16", 2**16 - 2),
@@ -73,6 +73,10 @@ def test_dup(s):
("FP32", -2.5),
*uint_data,
]:
+ if dtype.startswith("UINT") and s_empty.is_cscalar and not np.__version__.startswith("1."):
+ with pytest.raises(OverflowError, match="out of bounds for uint"):
+ s4.dup(dtype=dtype, name="s5")
+ continue
s5 = s4.dup(dtype=dtype, name="s5")
assert s5.dtype == dtype
assert s5.value == val
@@ -128,12 +132,14 @@ def test_equal(s):
def test_casting(s):
assert int(s) == 5
- assert type(int(s)) is int
+ assert isinstance(int(s), int)
assert float(s) == 5.0
- assert type(float(s)) is float
+ assert isinstance(float(s), float)
assert range(s) == range(5)
+ with pytest.raises(AttributeError, match="Scalar .* only .*__index__.*integral"):
+ range(s.dup(float))
assert complex(s) == complex(5)
- assert type(complex(s)) is complex
+ assert isinstance(complex(s), complex)
def test_truthy(s):
@@ -248,7 +254,7 @@ def test_update(s):
def test_not_hashable(s):
with pytest.raises(TypeError, match="unhashable type"):
- {s}
+ _ = {s}
with pytest.raises(TypeError, match="unhashable type"):
hash(s)
@@ -358,7 +364,7 @@ def test_expr_is_like_scalar(s):
}
if s.is_cscalar:
expected.add("_empty")
- ignore = {"__sizeof__"}
+ ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union"}
assert attrs - expr_attrs - ignore == expected, (
"If you see this message, you probably added a method to Scalar. You may need to "
"add an entry to `scalar` set in `graphblas.core.automethods` "
@@ -400,7 +406,7 @@ def test_index_expr_is_like_scalar(s):
}
if s.is_cscalar:
expected.add("_empty")
- ignore = {"__sizeof__"}
+ ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union"}
assert attrs - expr_attrs - ignore == expected, (
"If you see this message, you probably added a method to Scalar. You may need to "
"add an entry to `scalar` set in `graphblas.core.automethods` "
@@ -578,7 +584,7 @@ def test_record_from_dict():
def test_get(s):
assert s.get() == 5
assert s.get("mittens") == 5
- assert type(compute(s.get())) is int
+ assert isinstance(compute(s.get()), int)
s.clear()
assert compute(s.get()) is None
assert s.get("mittens") == "mittens"
diff --git a/graphblas/tests/test_ss_utils.py b/graphblas/tests/test_ss_utils.py
index 12c8c6329..2df7ab939 100644
--- a/graphblas/tests/test_ss_utils.py
+++ b/graphblas/tests/test_ss_utils.py
@@ -4,6 +4,7 @@
import graphblas as gb
from graphblas import Matrix, Vector, backend
+from graphblas.exceptions import InvalidValue
if backend != "suitesparse":
pytest.skip("gb.ss and A.ss only available with suitesparse backend", allow_module_level=True)
@@ -231,6 +232,65 @@ def test_global_config():
else:
with pytest.raises(ValueError, match="Unable to set default value for"):
config[k] = None
- with pytest.raises(ValueError, match="Wrong number"):
- config["memory_pool"] = [1, 2]
+ # with pytest.raises(ValueError, match="Wrong number"):
+ # config["memory_pool"] = [1, 2] # No longer used
assert "format" in repr(config)
+
+
+@pytest.mark.skipif("gb.core.ss._IS_SSGB7")
+def test_context():
+ context = gb.ss.Context()
+ prev = dict(context)
+ context["chunk"] += 1
+ context["nthreads"] += 1
+ assert context["chunk"] == prev["chunk"] + 1
+ assert context["nthreads"] == prev["nthreads"] + 1
+ context2 = gb.ss.Context(stack=True)
+ assert context2 == context
+ context3 = gb.ss.Context(stack=False)
+ assert context3 == prev
+ context4 = gb.ss.Context(
+ chunk=context["chunk"] + 1, nthreads=context["nthreads"] + 1, stack=False
+ )
+ assert context4["chunk"] == context["chunk"] + 1
+ assert context4["nthreads"] == context["nthreads"] + 1
+ assert context == context.dup()
+ assert context4 == context.dup(chunk=context["chunk"] + 1, nthreads=context["nthreads"] + 1)
+ assert context.dup(gpu_id=-1)["gpu_id"] == -1
+
+ context.engage()
+ assert gb.core.ss.context.threadlocal.context is context
+ with gb.ss.Context(nthreads=1) as ctx:
+ assert gb.core.ss.context.threadlocal.context is ctx
+ v = Vector(int, 5)
+ v(nthreads=2) << v + v
+ assert gb.core.ss.context.threadlocal.context is ctx
+ assert gb.core.ss.context.threadlocal.context is context
+ with pytest.raises(InvalidValue):
+ # Wait, why does this raise?!
+ ctx.disengage()
+ assert gb.core.ss.context.threadlocal.context is context
+ context.disengage()
+ assert gb.core.ss.context.threadlocal.context is gb.core.ss.context.global_context
+ assert context._prev_context is None
+
+ # hackery
+ gb.core.ss.context.threadlocal.context = context
+ context.disengage()
+ context.disengage()
+ context.disengage()
+ assert gb.core.ss.context.threadlocal.context is gb.core.ss.context.global_context
+
+ # Actually engaged, but not set in threadlocal
+ context._engage()
+ assert gb.core.ss.context.threadlocal.context is gb.core.ss.context.global_context
+ context.disengage()
+
+ context.engage()
+ context._engage()
+ assert gb.core.ss.context.threadlocal.context is context
+ context.disengage()
+
+ context._context = context # This is allowed to work with config
+ with pytest.raises(AttributeError, match="_context"):
+ context._context = ctx # This is not
diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py
new file mode 100644
index 000000000..4cea0b563
--- /dev/null
+++ b/graphblas/tests/test_ssjit.py
@@ -0,0 +1,438 @@
+import os
+import pathlib
+import platform
+import sys
+import sysconfig
+
+import numpy as np
+import pytest
+from numpy.testing import assert_array_equal
+
+import graphblas as gb
+from graphblas import backend, binary, dtypes, indexunary, select, unary
+from graphblas.core import _supports_udfs as supports_udfs
+from graphblas.core.ss import _IS_SSGB7
+
+from .conftest import autocompute, burble
+
+from graphblas import Vector # isort:skip (for dask-graphblas)
+
+try:
+ import numba
+except ImportError:
+ numba = None
+
+if backend != "suitesparse":
+ pytest.skip("not suitesparse backend", allow_module_level=True)
+
+
+@pytest.fixture(scope="module", autouse=True)
+def _setup_jit():
+ """Set up the SuiteSparse:GraphBLAS JIT."""
+ if _IS_SSGB7:
+ # SuiteSparse JIT was added in SSGB 8
+ yield
+ return
+
+ if not os.environ.get("GITHUB_ACTIONS"):
+ # Try to run the tests with defaults from sysconfig if not running in CI
+ prev = gb.ss.config["jit_c_control"]
+ cc = sysconfig.get_config_var("CC")
+ cflags = sysconfig.get_config_var("CFLAGS")
+ include = sysconfig.get_path("include")
+ libs = sysconfig.get_config_var("LIBS")
+ if not (cc is None or cflags is None or include is None or libs is None):
+ gb.ss.config["jit_c_control"] = "on"
+ gb.ss.config["jit_c_compiler_name"] = cc
+ gb.ss.config["jit_c_compiler_flags"] = f"{cflags} -I{include}"
+ gb.ss.config["jit_c_libraries"] = libs
+ else:
+ # Should we skip or try to run if sysconfig vars aren't set?
+ gb.ss.config["jit_c_control"] = "on" # "off"
+ try:
+ yield
+ finally:
+ gb.ss.config["jit_c_control"] = prev
+ return
+
+ if (
+ sys.platform == "darwin"
+ or sys.platform == "linux"
+ and "conda" not in gb.ss.config["jit_c_compiler_name"]
+ ):
+ # XXX TODO: tests for SuiteSparse JIT are not passing on linux when using wheels or on osx
+ # This should be understood and fixed!
+ gb.ss.config["jit_c_control"] = "off"
+ yield
+ return
+
+ # Configuration values below were obtained from the output of the JIT config
+ # in CI, but with paths changed to use `{conda_prefix}` where appropriate.
+ conda_prefix = os.environ["CONDA_PREFIX"]
+ prev = gb.ss.config["jit_c_control"]
+ gb.ss.config["jit_c_control"] = "on"
+ if sys.platform == "linux":
+ gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/x86_64-conda-linux-gnu-cc"
+ gb.ss.config["jit_c_compiler_flags"] = (
+ "-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong "
+ f"-fno-plt -O2 -ffunction-sections -pipe -isystem {conda_prefix}/include -Wundef "
+ "-std=c11 -lm -Wno-pragmas -fexcess-precision=fast -fcx-limited-range "
+ "-fno-math-errno -fwrapv -O3 -DNDEBUG -fopenmp -fPIC"
+ )
+ gb.ss.config["jit_c_linker_flags"] = (
+ "-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now "
+ "-Wl,--disable-new-dtags -Wl,--gc-sections -Wl,--allow-shlib-undefined "
+ f"-Wl,-rpath,{conda_prefix}/lib -Wl,-rpath-link,{conda_prefix}/lib "
+ f"-L{conda_prefix}/lib -shared"
+ )
+ gb.ss.config["jit_c_libraries"] = (
+ f"-lm -ldl {conda_prefix}/lib/libgomp.so "
+ f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so"
+ )
+ gb.ss.config["jit_c_cmake_libs"] = (
+ f"m;dl;{conda_prefix}/lib/libgomp.so;"
+ f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so"
+ )
+ elif sys.platform == "darwin":
+ gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/clang"
+ gb.ss.config["jit_c_compiler_flags"] = (
+ "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE "
+ f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT "
+ f"-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch {platform.machine()}"
+ )
+ gb.ss.config["jit_c_linker_flags"] = (
+ "-Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs "
+ f"-Wl,-rpath,{conda_prefix}/lib -L{conda_prefix}/lib -dynamiclib"
+ )
+ gb.ss.config["jit_c_libraries"] = f"-lm -ldl {conda_prefix}/lib/libomp.dylib"
+ gb.ss.config["jit_c_cmake_libs"] = f"m;dl;{conda_prefix}/lib/libomp.dylib"
+ elif sys.platform == "win32": # pragma: no branch (sanity)
+ if "mingw" in gb.ss.config["jit_c_libraries"]:
+ # This probably means we're testing a `python-suitesparse-graphblas` wheel
+ # in a conda environment. This is not yet working.
+ gb.ss.config["jit_c_control"] = "off"
+ yield
+ return
+
+ gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc"
+ gb.ss.config["jit_c_compiler_flags"] = (
+ '/DWIN32 /D_WINDOWS -DGBNCPUFEAT /O2 -wd"4244" -wd"4146" -wd"4018" '
+ '-wd"4996" -wd"4047" -wd"4554" /O2 /Ob2 /DNDEBUG -openmp'
+ )
+ gb.ss.config["jit_c_linker_flags"] = "/machine:x64"
+ gb.ss.config["jit_c_libraries"] = ""
+ gb.ss.config["jit_c_cmake_libs"] = ""
+
+ if not pathlib.Path(gb.ss.config["jit_c_compiler_name"]).exists():
+ # Can't use the JIT if we don't have a compiler!
+ gb.ss.config["jit_c_control"] = "off"
+ yield
+ return
+ try:
+ yield
+ finally:
+ gb.ss.config["jit_c_control"] = prev
+
+
+@pytest.fixture
+def v():
+ return Vector.from_coo([1, 3, 4, 6], [1, 1, 2, 0])
+
+
+@autocompute
+def test_jit_udt():
+ if _IS_SSGB7:
+ with pytest.raises(RuntimeError, match="JIT was added"):
+ dtypes.ss.register_new(
+ "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;"
+ )
+ return
+ if gb.ss.config["jit_c_control"] == "off":
+ return
+ with burble():
+ dtype = dtypes.ss.register_new(
+ "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;"
+ )
+ assert not hasattr(dtypes, "myquaternion")
+ assert dtypes.ss.myquaternion is dtype
+ assert dtype.name == "myquaternion"
+ assert str(dtype) == "myquaternion"
+ assert dtype.gb_name is None
+ v = Vector(dtype, 2)
+ np_type = np.dtype([("x", "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "Primary\n",
+ "\n",
+ "#409DC1\n",
+ "\n",
+ "#FF8552\n",
+ "\n",
+ "#39393A\n",
+ "\n",
+ "#C3C3C7\n",
+ "\n",
+ "#848487\n",
+ "Secondary\n",
+ "\n",
+ "#81B7CC\n",
+ "\n",
+ "#FFBB9E\n",
+ "\n",
+ "#6D213C\n",
+ "\n",
+ "#BA708A\n",
+ "\n",
+ "#85FFC7\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "d = draw.Drawing(750, 500, origin=\"center\")\n",
+ "d.append(\n",
+ " draw.Rectangle(-375, -250, 750, 500, fill=\"white\")\n",
+ ") # Add `stroke=\"black\"` border to see boundaries for testing\n",
+ "\n",
+ "dy = 25\n",
+ "dx = 0\n",
+ "w = h = 150\n",
+ "b = 25\n",
+ "x = -400 + 62.5 + dx\n",
+ "y = -200 + dy\n",
+ "\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " \"Primary\",\n",
+ " x=x + 1.5 * (b + w) + w / 2,\n",
+ " y=y - b,\n",
+ " font_size=1.5 * b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x, y, w, h, fill=blue))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " blue.upper(),\n",
+ " x=x + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x + b + w, y, w, h, fill=orange))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " orange.upper(),\n",
+ " x=x + (b + w) + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x + 2 * (b + w), y, w, h, fill=dark_gray))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " dark_gray.upper(),\n",
+ " x=x + 2 * (b + w) + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"white\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x + 3 * (b + w), y, w, h, fill=light_gray))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " light_gray.upper(),\n",
+ " x=x + 3 * (b + w) + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "d.draw(draw.Rectangle(x, -25 + dy, 675, 45, fill=medium_gray))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " medium_gray.upper(),\n",
+ " x=x + 675 / 2,\n",
+ " y=-25 + 30 + dy,\n",
+ " font_size=22.5,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "y = 40 + dy\n",
+ "w = h = 119\n",
+ "b = 20\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " \"Secondary\",\n",
+ " x=x + 2 * (b + w) + w / 2,\n",
+ " y=y + h + 2 * b,\n",
+ " font_size=1.5 * b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x, y, w, h, fill=light_blue))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " light_blue.upper(),\n",
+ " x=x + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x + b + w, y, w, h, fill=light_orange))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " light_orange.upper(),\n",
+ " x=x + (b + w) + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x + 2 * (b + w), y, w, h, fill=red))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " red.upper(),\n",
+ " x=x + 2 * (b + w) + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"white\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x + 3 * (b + w), y, w, h, fill=light_red))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " light_red.upper(),\n",
+ " x=x + 3 * (b + w) + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "d.draw(draw.Rectangle(x + 4 * (b + w), y, w, h, fill=green))\n",
+ "d.draw(\n",
+ " draw.Text(\n",
+ " green.upper(),\n",
+ " x=x + 4 * (b + w) + w / 2,\n",
+ " y=y + h - b,\n",
+ " font_size=b,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Arial\",\n",
+ " fill=\"black\",\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "color_palette = d\n",
+ "d"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e59c3941-c73b-455e-88f2-4b3aae228421",
+ "metadata": {},
+ "source": [
+ "## Display color wheel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "c27e8ef2-04f2-4752-9c3b-cf297a0c87a5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def create_color_wheel(color_wheel):\n",
+ " d = draw.Drawing(300, 300, origin=\"center\")\n",
+ " theta = np.pi / 3\n",
+ "\n",
+ " angle = 0\n",
+ " for i, color in enumerate(color_wheel):\n",
+ " angle = i * np.pi / 3\n",
+ " clip = draw.ClipPath()\n",
+ " if i == 5:\n",
+ " angle_offset = theta\n",
+ " else:\n",
+ " angle_offset = theta * 1.05\n",
+ " clip.append(\n",
+ " draw.Lines(\n",
+ " 0,\n",
+ " 0,\n",
+ " 300 * np.sin(angle),\n",
+ " 300 * np.cos(angle),\n",
+ " 300 * np.sin(angle + angle_offset),\n",
+ " 300 * np.cos(angle + angle_offset),\n",
+ " close=True,\n",
+ " )\n",
+ " )\n",
+ " if i == 0:\n",
+ " clip = None\n",
+ " d.append(draw.Circle(0, 0, 145, fill=color, clip_path=clip))\n",
+ "\n",
+ " angle = 3 * theta\n",
+ " for i, color in enumerate(color_wheel):\n",
+ " angle = ((i + 3) % 6) * np.pi / 3\n",
+ " clip = draw.ClipPath()\n",
+ " if i == 5:\n",
+ " angle_offset = theta\n",
+ " else:\n",
+ " angle_offset = theta * 1.05\n",
+ " clip.append(\n",
+ " draw.Lines(\n",
+ " 0,\n",
+ " 0,\n",
+ " 300 * np.sin(angle),\n",
+ " 300 * np.cos(angle),\n",
+ " 300 * np.sin(angle + angle_offset),\n",
+ " 300 * np.cos(angle + angle_offset),\n",
+ " close=True,\n",
+ " )\n",
+ " )\n",
+ " if i == 0:\n",
+ " clip = None\n",
+ " d.append(draw.Circle(0, 0, 105, fill=color, clip_path=clip))\n",
+ "\n",
+ " angle = theta\n",
+ " for i, color in enumerate(color_wheel):\n",
+ " angle = ((i + 1) % 6) * np.pi / 3\n",
+ " clip = draw.ClipPath()\n",
+ " if i == 5:\n",
+ " angle_offset = theta\n",
+ " else:\n",
+ " angle_offset = theta * 1.05\n",
+ " clip.append(\n",
+ " draw.Lines(\n",
+ " 0,\n",
+ " 0,\n",
+ " 300 * np.sin(angle),\n",
+ " 300 * np.cos(angle),\n",
+ " 300 * np.sin(angle + angle_offset),\n",
+ " 300 * np.cos(angle + angle_offset),\n",
+ " close=True,\n",
+ " )\n",
+ " )\n",
+ " if i == 0:\n",
+ " clip = None\n",
+ " d.append(draw.Circle(0, 0, 65, fill=color, clip_path=clip))\n",
+ "\n",
+ " d.append(draw.Circle(0, 0, 25, fill=medium_gray))\n",
+ " return d"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "2564bf63-8293-4828-8e38-d00a3b96b067",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Standard\n",
+ "standard_wheel = create_color_wheel(\n",
+ " [\n",
+ " blue,\n",
+ " light_gray,\n",
+ " light_blue,\n",
+ " dark_gray,\n",
+ " orange,\n",
+ " light_orange,\n",
+ " ]\n",
+ ")\n",
+ "standard_wheel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "7a500a39-4114-49bb-aa19-912c6a8a8d95",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# High contrast\n",
+ "high_wheel = create_color_wheel(\n",
+ " [\n",
+ " light_gray,\n",
+ " blue,\n",
+ " green,\n",
+ " dark_gray,\n",
+ " orange,\n",
+ " red,\n",
+ " ]\n",
+ ")\n",
+ "high_wheel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "8f404efe-2b88-4bdf-9102-2e6ad9389ca3",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Low contrast\n",
+ "low_wheel = create_color_wheel(\n",
+ " [\n",
+ " green,\n",
+ " light_red,\n",
+ " orange,\n",
+ " light_blue,\n",
+ " light_orange,\n",
+ " blue,\n",
+ " ]\n",
+ ")\n",
+ "low_wheel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "fd913698-ea45-4219-8003-0fd30124d091",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Warm :)\n",
+ "warm_wheel = create_color_wheel(\n",
+ " [\n",
+ " light_gray, # or dark_gray\n",
+ " light_red,\n",
+ " french_rose, # ;)\n",
+ " red,\n",
+ " orange,\n",
+ " light_orange,\n",
+ " ]\n",
+ ")\n",
+ "warm_wheel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "c7a3a5e6-4be4-4def-9687-00d1e3f80375",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Cool\n",
+ "cool_wheel = create_color_wheel(\n",
+ " [\n",
+ " light_blue,\n",
+ " light_gray,\n",
+ " blue,\n",
+ " light_red,\n",
+ " green,\n",
+ " dark_gray,\n",
+ " ]\n",
+ ")\n",
+ "cool_wheel"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "343256c8-35a7-4c89-aa60-c6bf60930c09",
+ "metadata": {},
+ "source": [
+ "## Create logos"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "7855cd3f-8155-4d11-9730-b6041578e112",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "default_angles = [\n",
+ " 180, # Don't modify this\n",
+ " 30, # How much of the \"left face\" to see\n",
+ " 22.5, # How much of the \"top face\" to see\n",
+ "]\n",
+ "R = Rotation.from_euler(\"ZYX\", default_angles, degrees=True).as_matrix()\n",
+ "\n",
+ "gcube = np.array(\n",
+ " [\n",
+ " [-1, 1, -1],\n",
+ " [-1, 1, 1],\n",
+ " [1, 1, 1],\n",
+ " [-1, -1, 1],\n",
+ " [1, -1, 1],\n",
+ " [1, 0, 1],\n",
+ " [0, 0, 1],\n",
+ " ]\n",
+ ")\n",
+ "gcube_major = gcube[:5] # Big circles\n",
+ "gcube_minor = gcube[5:] # Small circles\n",
+ "lines = np.array(\n",
+ " [\n",
+ " [gcube[1], gcube[0]],\n",
+ " ]\n",
+ ")\n",
+ "Gpath = np.array(\n",
+ " [\n",
+ " gcube[2],\n",
+ " gcube[1],\n",
+ " gcube[3],\n",
+ " gcube[4],\n",
+ " gcube[5],\n",
+ " gcube[6],\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "\n",
+ "def create_logo(\n",
+ " *,\n",
+ " bracket_color=None,\n",
+ " bg_color=None,\n",
+ " edge_color=None,\n",
+ " edge_width=8,\n",
+ " edge_border_color=\"white\",\n",
+ " edge_border_width=16,\n",
+ " node_color=None,\n",
+ " large_node_width=16,\n",
+ " small_node_width=8,\n",
+ " node_border_color=\"white\",\n",
+ " node_stroke_width=4,\n",
+ " large_border=True,\n",
+ " g_color=None,\n",
+ " angles=None,\n",
+ "):\n",
+ " if angles is None:\n",
+ " angles = default_angles\n",
+ " if edge_color is None:\n",
+ " edge_color = blue\n",
+ " if bracket_color is None:\n",
+ " bracket_color = edge_color\n",
+ " if node_color is None:\n",
+ " node_color = orange\n",
+ " if g_color is None:\n",
+ " g_color = edge_color\n",
+ "\n",
+ " d = draw.Drawing(190, 190, origin=\"center\")\n",
+ " if bg_color:\n",
+ " d.append(\n",
+ " draw.Rectangle(-95, -95, 190, 190, fill=bg_color)\n",
+ " ) # Add `stroke=\"black\"` border to see boundaries for testing\n",
+ "\n",
+ " scale = 40\n",
+ " dx = 0\n",
+ " dy = -2\n",
+ "\n",
+ " if edge_border_width:\n",
+ " # Add white border around lines\n",
+ " d.append(\n",
+ " draw.Lines(\n",
+ " *(((Gpath @ R) * scale)[:, :2] * [-1, 1]).ravel().tolist(),\n",
+ " fill=\"none\",\n",
+ " stroke=edge_border_color,\n",
+ " stroke_width=edge_border_width,\n",
+ " )\n",
+ " )\n",
+ " for (x0, y0, z0), (x1, y1, z1) in ((lines @ R) * scale).tolist():\n",
+ " x0 = -x0\n",
+ " x1 = -x1 # Just live with this\n",
+ " d.append(\n",
+ " draw.Line(\n",
+ " x0 + dx,\n",
+ " y0 + dy,\n",
+ " x1 + dx,\n",
+ " y1 + dy,\n",
+ " stroke=edge_border_color,\n",
+ " stroke_width=edge_border_width,\n",
+ " )\n",
+ " )\n",
+ "\n",
+ " # Add edges\n",
+ " d.append(\n",
+ " draw.Lines(\n",
+ " *(((Gpath @ R) * scale)[:, :2] * [-1, 1]).ravel().tolist(),\n",
+ " fill=\"none\",\n",
+ " stroke=g_color,\n",
+ " stroke_width=edge_width,\n",
+ " )\n",
+ " )\n",
+ " for (x0, y0, z0), (x1, y1, z1) in ((lines @ R) * scale).tolist():\n",
+ " x0 = -x0\n",
+ " x1 = -x1\n",
+ " d.append(\n",
+ " draw.Line(\n",
+ " x0 + dx, y0 + dy, x1 + dx, y1 + dy, stroke=edge_color, stroke_width=edge_width\n",
+ " )\n",
+ " )\n",
+ "\n",
+ " # Add vertices\n",
+ " for x, y, z in ((gcube_major @ R) * scale).tolist():\n",
+ " x = -x\n",
+ " d.append(\n",
+ " draw.Circle(\n",
+ " x + dx,\n",
+ " y + dy,\n",
+ " large_node_width,\n",
+ " fill=node_color,\n",
+ " stroke=node_border_color,\n",
+ " stroke_width=node_stroke_width if large_border else 0,\n",
+ " )\n",
+ " )\n",
+ " for x, y, z in ((gcube_minor @ R) * scale).tolist():\n",
+ " x = -x\n",
+ " d.append(\n",
+ " draw.Circle(\n",
+ " x + dx,\n",
+ " y + dy,\n",
+ " small_node_width,\n",
+ " fill=node_color,\n",
+ " stroke=node_border_color,\n",
+ " stroke_width=node_stroke_width,\n",
+ " )\n",
+ " )\n",
+ "\n",
+ " # Add brackets\n",
+ " d.append(\n",
+ " draw.Text(\n",
+ " \"[\",\n",
+ " x=-85,\n",
+ " y=52,\n",
+ " font_size=214,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Courier New\",\n",
+ " fill=bracket_color,\n",
+ " )\n",
+ " )\n",
+ " d.append(\n",
+ " draw.Text(\n",
+ " \"]\",\n",
+ " x=85,\n",
+ " y=52,\n",
+ " font_size=214,\n",
+ " text_anchor=\"middle\",\n",
+ " font_family=\"Courier New\",\n",
+ " fill=bracket_color,\n",
+ " )\n",
+ " )\n",
+ "\n",
+ " return d"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "4325e0b8-dbbb-4219-a2b3-4d9cdee2bdc8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "logo_defaults = dict(\n",
+ " bracket_color=blue,\n",
+ " edge_color=blue,\n",
+ " node_color=orange,\n",
+ " edge_border_width=0,\n",
+ " edge_width=12,\n",
+ " small_node_width=11,\n",
+ " large_node_width=17,\n",
+ " node_border_color=\"none\",\n",
+ " node_stroke_width=0,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "f886df89-b3b5-4671-bcc0-98e8705feb5a",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "[\n",
+ "]\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "create_logo(bg_color=\"white\", **logo_defaults)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "68e01137-55e3-4973-bf97-4fcd36c8c662",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "[\n",
+ "]\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "create_logo(bg_color=\"black\", **logo_defaults)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "b1d5e928-16c5-4377-aee1-1489ab45efc8",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "[\n",
+ "]\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Transparent background\n",
+ "logo = create_logo(**logo_defaults)\n",
+ "logo"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b187c131-d337-4a7b-ab54-80ebe0f48ab4",
+ "metadata": {},
+ "source": [
+ "## Alternatives with gray brackets\n",
+ "### Background-agnostic (works with light and dark mode)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "acca9b2e-2f54-4b86-9a33-2c57502f6160",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "[\n",
+ "]\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "medium_logo = create_logo(**{**logo_defaults, \"bracket_color\": medium_gray})\n",
+ "create_logo(bg_color=\"white\", **{**logo_defaults, \"bracket_color\": medium_gray})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "id": "f5d0086d-b50e-49eb-9aae-b0953cdc0045",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "[\n",
+ "]\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "create_logo(bg_color=\"black\", **{**logo_defaults, \"bracket_color\": medium_gray})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c4dce89d-e34c-4190-a068-7e78cdeea745",
+ "metadata": {},
+ "source": [
+ "### For light mode"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "27137343-141a-422e-abd6-123af3416ea4",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "[\n",
+ "]\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "light_logo = create_logo(**{**logo_defaults, \"bracket_color\": dark_gray})\n",
+ "create_logo(bg_color=\"white\", **{**logo_defaults, \"bracket_color\": dark_gray})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8a70b0f7-c3c4-44ae-af09-8992400f362e",
+ "metadata": {},
+ "source": [
+ "### For dark mode"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "3ab9bb40-d7a8-4788-9971-54a5779d284d",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "[\n",
+ "]\n",
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "dark_logo = create_logo(**{**logo_defaults, \"bracket_color\": light_gray})\n",
+ "create_logo(bg_color=\"black\", **{**logo_defaults, \"bracket_color\": light_gray})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "d53046c1-8cbb-47fa-a88b-4d98958df26b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if False:\n",
+ " logo.save_svg(\"python-graphblas-logo.svg\")\n",
+ " light_logo.save_svg(\"python-graphblas-logo-light.svg\")\n",
+ " medium_logo.save_svg(\"python-graphblas-logo-medium.svg\")\n",
+ " dark_logo.save_svg(\"python-graphblas-logo-dark.svg\")\n",
+ " color_palette.save_svg(\"color-palette.svg\")\n",
+ " standard_wheel.save_svg(\"color-wheel.svg\")\n",
+ " high_wheel.save_svg(\"color-wheel-high.svg\")\n",
+ " low_wheel.save_svg(\"color-wheel-low.svg\")\n",
+ " warm_wheel.save_svg(\"color-wheel-warm.svg\")\n",
+ " cool_wheel.save_svg(\"color-wheel-cool.svg\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "51093fab-600b-47d7-9809-fa0f16e7246f",
+ "metadata": {},
+ "source": [
+ "### *NOTE: The font in the SVG files should be converted to paths, because not all systems have Courier New*\n",
+ "Also, SVG files can be minified here: https://vecta.io/nano"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/pyproject.toml b/pyproject.toml
index 245dc35bd..1bad95118 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,70 +1,68 @@
[build-system]
build-backend = "setuptools.build_meta"
-requires = [
- "setuptools >=64",
- "setuptools-git-versioning",
-]
+requires = ["setuptools >=64", "setuptools-git-versioning"]
[project]
name = "python-graphblas"
dynamic = ["version"]
description = "Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics"
readme = "README.md"
-requires-python = ">=3.8"
-license = {file = "LICENSE"}
+requires-python = ">=3.10"
+license = { file = "LICENSE" }
authors = [
- {name = "Erik Welch", email = "erik.n.welch@gmail.com"},
- {name = "Jim Kitchen"},
- {name = "Python-graphblas contributors"},
+ { name = "Erik Welch", email = "erik.n.welch@gmail.com" },
+ { name = "Jim Kitchen" },
+ { name = "Python-graphblas contributors" },
]
maintainers = [
- {name = "Erik Welch", email = "erik.n.welch@gmail.com"},
- {name = "Jim Kitchen", email = "jim22k@gmail.com"},
- {name = "Sultan Orazbayev", email = "contact@econpoint.com"},
+ { name = "Erik Welch", email = "erik.n.welch@gmail.com" },
+ { name = "Jim Kitchen", email = "jim22k@gmail.com" },
+ { name = "Sultan Orazbayev", email = "contact@econpoint.com" },
]
keywords = [
- "graphblas",
- "graph",
- "sparse",
- "matrix",
- "lagraph",
- "suitesparse",
- "Networks",
- "Graph Theory",
- "Mathematics",
- "network",
- "discrete mathematics",
- "math",
+ "graphblas",
+ "graph",
+ "sparse",
+ "matrix",
+ "lagraph",
+ "suitesparse",
+ "Networks",
+ "Graph Theory",
+ "Mathematics",
+ "network",
+ "discrete mathematics",
+ "math",
]
classifiers = [
- "Development Status :: 5 - Production/Stable",
- "License :: OSI Approved :: Apache Software License",
- "Operating System :: MacOS :: MacOS X",
- "Operating System :: POSIX :: Linux",
- "Operating System :: Microsoft :: Windows",
- "Programming Language :: Python",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.8",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- "Programming Language :: Python :: 3.11",
- "Programming Language :: Python :: 3 :: Only",
- "Intended Audience :: Developers",
- "Intended Audience :: Other Audience",
- "Intended Audience :: Science/Research",
- "Topic :: Scientific/Engineering",
- "Topic :: Scientific/Engineering :: Information Analysis",
- "Topic :: Scientific/Engineering :: Mathematics",
- "Topic :: Software Development :: Libraries :: Python Modules",
+ "Development Status :: 5 - Production/Stable",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: Microsoft :: Windows",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3 :: Only",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Other Audience",
+ "Intended Audience :: Science/Research",
+ "Topic :: Scientific/Engineering",
+ "Topic :: Scientific/Engineering :: Information Analysis",
+ "Topic :: Scientific/Engineering :: Mathematics",
+ "Topic :: Software Development :: Libraries :: Python Modules",
]
dependencies = [
- "numpy >=1.21",
- "donfig >=0.6",
- "pyyaml >=5.4",
- # These won't be installed by default after 2024.3.0
- # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead
- "suitesparse-graphblas >=7.4.0.0, <7.5",
- "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported
+ "numpy >=1.23",
+ "donfig >=0.6",
+ "pyyaml >=5.4",
+ # These won't be installed by default after 2024.3.0
+ # once pep-771 is supported: https://peps.python.org/pep-0771/
+ # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead
+ "suitesparse-graphblas >=7.4.0.0, <10",
+ "numba >=0.55; python_version<'3.14'", # make optional where numba is not supported
]
[project.urls]
@@ -74,56 +72,41 @@ repository = "https://github.com/python-graphblas/python-graphblas"
changelog = "https://github.com/python-graphblas/python-graphblas/releases"
[project.optional-dependencies]
-suitesparse = [
- "suitesparse-graphblas >=7.4.0.0, <7.5",
-]
-networkx = [
- "networkx >=2.8",
-]
-numba = [
- "numba >=0.55",
-]
-pandas = [
- "pandas >=1.2",
-]
-scipy = [
- "scipy >=1.8",
-]
-suitesparse-udf = [ # udf requires numba
- "python-graphblas[suitesparse,numba]",
-]
-repr = [
- "python-graphblas[pandas]",
+suitesparse = ["suitesparse-graphblas >=7.4.0.0, <10"]
+networkx = ["networkx >=2.8"]
+numba = ["numba >=0.55"]
+pandas = ["pandas >=1.5"]
+scipy = ["scipy >=1.9"]
+suitesparse-udf = [ # udf requires numba
+ "python-graphblas[suitesparse,numba]",
]
+repr = ["python-graphblas[pandas]"]
io = [
- "python-graphblas[networkx,scipy]",
- "python-graphblas[numba]; python_version<'3.12'",
- "awkward >=1.9",
- "sparse >=0.13; python_version<'3.12'", # make optional, b/c sparse needs numba
- "fast-matrix-market >=1.4.5",
-]
-viz = [
- "python-graphblas[networkx,scipy]",
- "matplotlib >=3.5",
+ "python-graphblas[networkx,scipy]",
+ "python-graphblas[numba]; python_version<'3.14'",
+ "awkward >=2.0",
+ "sparse >=0.14; python_version<'3.13'", # make optional, b/c sparse needs numba
+ "fast-matrix-market >=1.4.5; python_version<'3.13'", # py3.13 not supported yet
]
-datashade = [ # datashade requires numba
- "python-graphblas[numba,pandas,scipy]",
- "datashader >=0.12",
- "hvplot >=0.7",
+viz = ["python-graphblas[networkx,scipy]", "matplotlib >=3.6"]
+datashade = [ # datashade requires numba
+ "python-graphblas[numba,pandas,scipy]",
+ "datashader >=0.14",
+ "hvplot >=0.8",
]
test = [
- "python-graphblas[suitesparse,pandas,scipy]",
- "packaging >=21",
- "pytest >=6.2",
- "tomli >=1",
+ "python-graphblas[suitesparse,pandas,scipy]",
+ "packaging >=21",
+ "pytest >=6.2",
+ "tomli >=1",
]
default = [
- "python-graphblas[suitesparse,pandas,scipy]",
- "python-graphblas[numba]; python_version<'3.12'", # make optional where numba is not supported
+ "python-graphblas[suitesparse,pandas,scipy]",
+ "python-graphblas[numba]; python_version<'3.14'", # make optional where numba is not supported
]
all = [
- "python-graphblas[default,io,viz,test]",
- "python-graphblas[datashade]; python_version<'3.12'", # make optional, b/c datashade needs numba
+ "python-graphblas[default,io,viz,test]",
+ "python-graphblas[datashade]; python_version<'3.14'", # make optional, b/c datashade needs numba
]
[tool.setuptools]
@@ -132,21 +115,22 @@ all = [
# $ find graphblas/ -name __init__.py -print | sort | sed -e 's/\/__init__.py//g' -e 's/\//./g'
# $ python -c 'import tomli ; [print(x) for x in sorted(tomli.load(open("pyproject.toml", "rb"))["tool"]["setuptools"]["packages"])]'
packages = [
- "graphblas",
- "graphblas.agg",
- "graphblas.binary",
- "graphblas.core",
- "graphblas.core.operator",
- "graphblas.core.ss",
- "graphblas.indexunary",
- "graphblas.io",
- "graphblas.monoid",
- "graphblas.op",
- "graphblas.semiring",
- "graphblas.select",
- "graphblas.ss",
- "graphblas.tests",
- "graphblas.unary",
+ "graphblas",
+ "graphblas.agg",
+ "graphblas.binary",
+ "graphblas.core",
+ "graphblas.core.operator",
+ "graphblas.core.ss",
+ "graphblas.dtypes",
+ "graphblas.indexunary",
+ "graphblas.io",
+ "graphblas.monoid",
+ "graphblas.op",
+ "graphblas.semiring",
+ "graphblas.select",
+ "graphblas.ss",
+ "graphblas.tests",
+ "graphblas.unary",
]
[tool.setuptools-git-versioning]
@@ -156,7 +140,7 @@ dirty_template = "{tag}+{ccount}.g{sha}.dirty"
[tool.black]
line-length = 100
-target-version = ["py38", "py39", "py310", "py311"]
+target-version = ["py310", "py311", "py312", "py313"]
[tool.isort]
sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"]
@@ -168,39 +152,56 @@ known_first_party = "graphblas"
line_length = 100
[tool.pytest.ini_options]
+minversion = "6.0"
testpaths = "graphblas/tests"
-xfail_strict = true
-markers = [
- "slow: Skipped unless --runslow passed",
+xfail_strict = false # 2023-07-23: awkward and numpy 1.25 sometimes conflict
+addopts = [
+ "--strict-config", # Force error if config is mispelled
+ "--strict-markers", # Force error if marker is mispelled (must be defined in config)
+ "-ra", # Print summary of all fails/errors
]
+markers = ["slow: Skipped unless --runslow passed"]
+log_cli_level = "info"
filterwarnings = [
- # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters
- # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings
- "error",
- # sparse 0.14.0 (2022-02-24) began raising this warning; it has been reported and fixed upstream.
- "ignore:coords should be an ndarray. This will raise a ValueError:DeprecationWarning:sparse._coo.core",
-
- # setuptools v67.3.0 deprecated `pkg_resources.declare_namespace` on 13 Feb 2023. See:
- # https://setuptools.pypa.io/en/latest/history.html#v67-3-0
- # MAINT: check if this is still necessary in 2025
- "ignore:Deprecated call to `pkg_resources.declare_namespace:DeprecationWarning:pkg_resources",
- # And this deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See:
- # https://setuptools.pypa.io/en/latest/history.html#v67-5-0
- "ignore:pkg_resources is deprecated as an API:DeprecationWarning:pkg_resources",
-
- # sre_parse deprecated in 3.11; this is triggered by awkward 0.10
- "ignore:module 'sre_parse' is deprecated:DeprecationWarning:",
- "ignore:module 'sre_constants' is deprecated:DeprecationWarning:",
-
- # pypy gives this warning
- "ignore:can't resolve package from __spec__ or __package__:ImportWarning:",
+ # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters
+ # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings
+ "error",
+
+ # sparse 0.14.0 (2022-02-24) began raising this warning; it has been reported and fixed upstream.
+ "ignore:coords should be an ndarray. This will raise a ValueError:DeprecationWarning:sparse._coo.core",
+
+ # setuptools v67.3.0 deprecated `pkg_resources.declare_namespace` on 13 Feb 2023. See:
+ # https://setuptools.pypa.io/en/latest/history.html#v67-3-0
+ # MAINT: check if this is still necessary in 2025
+ "ignore:Deprecated call to `pkg_resources.declare_namespace:DeprecationWarning:pkg_resources",
+
+ # This deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See:
+ # https://setuptools.pypa.io/en/latest/history.html#v67-5-0
+ "ignore:pkg_resources is deprecated as an API:DeprecationWarning:",
+
+ # sre_parse deprecated in 3.11; this is triggered by awkward 0.10
+ "ignore:module 'sre_parse' is deprecated:DeprecationWarning:",
+ "ignore:module 'sre_constants' is deprecated:DeprecationWarning:",
+
+ # numpy 1.25.0 (2023-06-17) deprecated `np.find_common_type`; many other dependencies use it.
+ # See if we can remove this filter in 2025.
+ "ignore:np.find_common_type is deprecated:DeprecationWarning:",
+
+ # pypy gives this warning
+ "ignore:can't resolve package from __spec__ or __package__:ImportWarning:",
+
+ # Python 3.12 introduced this deprecation, which is triggered by pandas 2.1.1
+ "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:dateutil",
+
+ # Pandas 2.2 warns that pyarrow will become a required dependency in pandas 3.0
+ "ignore:\\nPyarrow will become a required dependency of pandas:DeprecationWarning:",
]
[tool.coverage.run]
branch = true
source = ["graphblas"]
omit = [
- "graphblas/viz.py", # TODO: test and get coverage for viz.py
+ "graphblas/viz.py", # TODO: test and get coverage for viz.py
]
[tool.coverage.report]
@@ -210,9 +211,9 @@ fail_under = 0
skip_covered = true
skip_empty = true
exclude_lines = [
- "pragma: no cover",
- "raise AssertionError",
- "raise NotImplementedError",
+ "pragma: no cover",
+ "raise AssertionError",
+ "raise NotImplementedError",
]
[tool.codespell]
@@ -221,231 +222,278 @@ ignore-words-list = "coo,ba"
[tool.ruff]
# https://github.com/charliermarsh/ruff/
line-length = 100
-target-version = "py38"
+target-version = "py310"
+
+[tool.ruff.format]
+exclude = ["*.ipynb"] # Consider enabling auto-formatting of notebooks
+
+[tool.ruff.lint]
+exclude = ["*.ipynb"] # Consider enabling auto-formatting of notebooks
+unfixable = [
+ "F841", # unused-variable (Note: can leave useless expression)
+ "B905", # zip-without-explicit-strict (Note: prefer `zip(x, y, strict=True)`)
+]
select = [
- # Have we enabled too many checks that they'll become a nuisance? We'll see...
- "F", # pyflakes
- "E", # pycodestyle Error
- "W", # pycodestyle Warning
- # "C90", # mccabe (Too strict, but maybe we should make things less complex)
- # "I", # isort (Should we replace `isort` with this?)
- "N", # pep8-naming
- "D", # pydocstyle
- "UP", # pyupgrade
- "YTT", # flake8-2020
- # "ANN", # flake8-annotations (We don't use annotations yet)
- "S", # bandit
- # "BLE", # flake8-blind-except (Maybe consider)
- # "FBT", # flake8-boolean-trap (Why?)
- "B", # flake8-bugbear
- "A", # flake8-builtins
- "COM", # flake8-commas
- "C4", # flake8-comprehensions
- "DTZ", # flake8-datetimez
- "T10", # flake8-debugger
- # "DJ", # flake8-django (We don't use django)
- # "EM", # flake8-errmsg (Perhaps nicer, but too much work)
- "EXE", # flake8-executable
- "ISC", # flake8-implicit-str-concat
- # "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`)
- "G", # flake8-logging-format
- "INP", # flake8-no-pep420
- "PIE", # flake8-pie
- "T20", # flake8-print
- # "PYI", # flake8-pyi (We don't have stub files yet)
- "PT", # flake8-pytest-style
- "Q", # flake8-quotes
- "RSE", # flake8-raise
- "RET", # flake8-return
- # "SLF", # flake8-self (We can use our own private variables--sheesh!)
- "SIM", # flake8-simplify
- # "TID", # flake8-tidy-imports (Rely on isort and our own judgement)
- # "TCH", # flake8-type-checking (Note: figure out type checking later)
- # "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict)
- "PTH", # flake8-use-pathlib (Often better, but not always)
- # "ERA", # eradicate (We like code in comments!)
- # "PD", # pandas-vet (Intended for scripts that use pandas, not libraries)
- "PGH", # pygrep-hooks
- "PL", # pylint
- "PLC", # pylint Convention
- "PLE", # pylint Error
- "PLR", # pylint Refactor
- "PLW", # pylint Warning
- "TRY", # tryceratops
- "NPY", # NumPy-specific rules
- "RUF", # ruff-specific rules
- "ALL", # Try new categories by default (making the above list unnecessary)
+ # Have we enabled too many checks that they'll become a nuisance? We'll see...
+ "F", # pyflakes
+ "E", # pycodestyle Error
+ "W", # pycodestyle Warning
+ # "C90", # mccabe (Too strict, but maybe we should make things less complex)
+ # "I", # isort (Should we replace `isort` with this?)
+ "N", # pep8-naming
+ "D", # pydocstyle
+ "UP", # pyupgrade
+ "YTT", # flake8-2020
+ # "ANN", # flake8-annotations (We don't use annotations yet)
+ "S", # bandit
+ # "BLE", # flake8-blind-except (Maybe consider)
+ # "FBT", # flake8-boolean-trap (Why?)
+ "B", # flake8-bugbear
+ "A", # flake8-builtins
+ "COM", # flake8-commas
+ "C4", # flake8-comprehensions
+ "DTZ", # flake8-datetimez
+ "T10", # flake8-debugger
+ # "DJ", # flake8-django (We don't use django)
+ # "EM", # flake8-errmsg (Perhaps nicer, but too much work)
+ "EXE", # flake8-executable
+ "ISC", # flake8-implicit-str-concat
+ # "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`)
+ "G", # flake8-logging-format
+ "INP", # flake8-no-pep420
+ "PIE", # flake8-pie
+ "T20", # flake8-print
+ # "PYI", # flake8-pyi (We don't have stub files yet)
+ "PT", # flake8-pytest-style
+ "Q", # flake8-quotes
+ "RSE", # flake8-raise
+ "RET", # flake8-return
+ # "SLF", # flake8-self (We can use our own private variables--sheesh!)
+ "SIM", # flake8-simplify
+ # "TID", # flake8-tidy-imports (Rely on isort and our own judgement)
+ # "TCH", # flake8-type-checking (Note: figure out type checking later)
+ # "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict)
+ "PTH", # flake8-use-pathlib (Often better, but not always)
+ # "ERA", # eradicate (We like code in comments!)
+ # "PD", # pandas-vet (Intended for scripts that use pandas, not libraries)
+ "PGH", # pygrep-hooks
+ "PL", # pylint
+ "PLC", # pylint Convention
+ "PLE", # pylint Error
+ "PLR", # pylint Refactor
+ "PLW", # pylint Warning
+ "TRY", # tryceratops
+ "NPY", # NumPy-specific rules
+ "RUF", # ruff-specific rules
+ "ALL", # Try new categories by default (making the above list unnecessary)
]
external = [
- # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external
+ # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external
+ "F811",
]
ignore = [
- # Would be nice to fix these
- "D100", # Missing docstring in public module
- "D101", # Missing docstring in public class
- "D102", # Missing docstring in public method
- "D103", # Missing docstring in public function
- "D104", # Missing docstring in public package
- "D105", # Missing docstring in magic method
- # "D107", # Missing docstring in `__init__`
- "D205", # 1 blank line required between summary line and description
- "D401", # First line of docstring should be in imperative mood:
- # "D417", # Missing argument description in the docstring:
- "PLE0605", # Invalid format for `__all__`, must be `tuple` or `list` (Note: broken in v0.0.237)
-
- # Maybe consider
- # "SIM300", # Yoda conditions are discouraged, use ... instead (Note: we're not this picky)
- # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer)
- "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance)
- "TRY200", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception)
-
- # Intentionally ignored
- "COM812", # Trailing comma missing
- "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred)
- "D400", # First line should end with a period (Note: prefer D415, which also allows "?" and "!")
- "N801", # Class name ... should use CapWords convention (Note:we have a few exceptions to this)
- "N802", # Function name ... should be lowercase
- "N803", # Argument name ... should be lowercase (Maybe okay--except in tests)
- "N806", # Variable ... in function should be lowercase
- "N807", # Function name should not start and end with `__`
- "N818", # Exception name ... should be named with an Error suffix (Note: good advice)
- "PLR0911", # Too many return statements
- "PLR0912", # Too many branches
- "PLR0913", # Too many arguments to function call
- "PLR0915", # Too many statements
- "PLR2004", # Magic number used in comparison, consider replacing magic with a constant variable
- "PLW0603", # Using the global statement to update ... is discouraged (Note: yeah, discouraged, but too strict)
- "PLW2901", # Outer for loop variable ... overwritten by inner assignment target (Note: good advice, but too strict)
- "RET502", # Do not implicitly `return None` in function able to return non-`None` value
- "RET503", # Missing explicit `return` at the end of function able to return non-`None` value
- "RET504", # Unnecessary variable assignment before `return` statement
- "S110", # `try`-`except`-`pass` detected, consider logging the exception (Note: good advice, but we don't log)
- "S112", # `try`-`except`-`continue` detected, consider logging the exception (Note: good advice, but we don't log)
- "S603", # `subprocess` call: check for execution of untrusted input (Note: not important for us)
- "S607", # Starting a process with a partial executable path (Note: not important for us)
- "SIM102", # Use a single `if` statement instead of nested `if` statements (Note: often necessary)
- "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster)
- "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer)
- "TRY003", # Avoid specifying long messages outside the exception class (Note: why?)
-
- # Ignored categories
- "C90", # mccabe (Too strict, but maybe we should make things less complex)
- "I", # isort (Should we replace `isort` with this?)
- "ANN", # flake8-annotations (We don't use annotations yet)
- "BLE", # flake8-blind-except (Maybe consider)
- "FBT", # flake8-boolean-trap (Why?)
- "DJ", # flake8-django (We don't use django)
- "EM", # flake8-errmsg (Perhaps nicer, but too much work)
- "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`)
- "PYI", # flake8-pyi (We don't have stub files yet)
- "SLF", # flake8-self (We can use our own private variables--sheesh!)
- "TID", # flake8-tidy-imports (Rely on isort and our own judgement)
- "TCH", # flake8-type-checking (Note: figure out type checking later)
- "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict)
- "ERA", # eradicate (We like code in comments!)
- "PD", # pandas-vet (Intended for scripts that use pandas, not libraries)
+ # Would be nice to fix these
+ "D100", # Missing docstring in public module
+ "D101", # Missing docstring in public class
+ "D102", # Missing docstring in public method
+ "D103", # Missing docstring in public function
+ "D104", # Missing docstring in public package
+ "D105", # Missing docstring in magic method
+ "D107", # Missing docstring in `__init__`
+ "D205", # 1 blank line required between summary line and description
+ "D401", # First line of docstring should be in imperative mood:
+ "D417", # D417 Missing argument description in the docstring for ...: ...
+ "PLE0605", # Invalid format for `__all__`, must be `tuple` or `list` (Note: broken in v0.0.237)
+
+ # Maybe consider
+ # "SIM300", # Yoda conditions are discouraged, use ... instead (Note: we're not this picky)
+ # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer)
+ "B904", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception)
+ "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance)
+ "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet)
+ "RUF021", # parenthesize-chained-operators (Note: results don't look good yet)
+ "RUF023", # unsorted-dunder-slots (Note: maybe fine, but noisy changes)
+ "PERF401", # Use a list comprehension to create a transformed list (Note: poorly implemented atm)
+
+ # Intentionally ignored
+ "COM812", # Trailing comma missing
+ "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred)
+ "D213", # (Note: conflicts with D212, which is preferred)
+ "D400", # First line should end with a period (Note: prefer D415, which also allows "?" and "!")
+ "N801", # Class name ... should use CapWords convention (Note:we have a few exceptions to this)
+ "N802", # Function name ... should be lowercase
+ "N803", # Argument name ... should be lowercase (Maybe okay--except in tests)
+ "N806", # Variable ... in function should be lowercase
+ "N807", # Function name should not start and end with `__`
+ "N818", # Exception name ... should be named with an Error suffix (Note: good advice)
+ "PERF203", # `try`-`except` within a loop incurs performance overhead (Note: too strict)
+ "PLC0205", # Class `__slots__` should be a non-string iterable (Note: string is fine)
+ "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict)
+ "PLR0911", # Too many return statements
+ "PLR0912", # Too many branches
+ "PLR0913", # Too many arguments to function call
+ "PLR0915", # Too many statements
+ "PLR2004", # Magic number used in comparison, consider replacing magic with a constant variable
+ "PLW0603", # Using the global statement to update ... is discouraged (Note: yeah, discouraged, but too strict)
+ "PLW0642", # Reassigned `self` variable in instance method (Note: too strict for us)
+ "PLW2901", # Outer for loop variable ... overwritten by inner assignment target (Note: good advice, but too strict)
+ "RET502", # Do not implicitly `return None` in function able to return non-`None` value
+ "RET503", # Missing explicit `return` at the end of function able to return non-`None` value
+ "RET504", # Unnecessary variable assignment before `return` statement
+ "S110", # `try`-`except`-`pass` detected, consider logging the exception (Note: good advice, but we don't log)
+ "S112", # `try`-`except`-`continue` detected, consider logging the exception (Note: good advice, but we don't log)
+ "S603", # `subprocess` call: check for execution of untrusted input (Note: not important for us)
+ "S607", # Starting a process with a partial executable path (Note: not important for us)
+ "SIM102", # Use a single `if` statement instead of nested `if` statements (Note: often necessary)
+ "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster)
+ "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer)
+ "TRY003", # Avoid specifying long messages outside the exception class (Note: why?)
+ "UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)` (Note: using `|` is slower atm)
+
+ # Ignored categories
+ "C90", # mccabe (Too strict, but maybe we should make things less complex)
+ "I", # isort (Should we replace `isort` with this?)
+ "ANN", # flake8-annotations (We don't use annotations yet)
+ "BLE", # flake8-blind-except (Maybe consider)
+ "FBT", # flake8-boolean-trap (Why?)
+ "DJ", # flake8-django (We don't use django)
+ "EM", # flake8-errmsg (Perhaps nicer, but too much work)
+ "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`)
+ "PYI", # flake8-pyi (We don't have stub files yet)
+ "SLF", # flake8-self (We can use our own private variables--sheesh!)
+ "TID", # flake8-tidy-imports (Rely on isort and our own judgement)
+ "TCH", # flake8-type-checking (Note: figure out type checking later)
+ "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict)
+ "TD", # flake8-todos (Maybe okay to add some of these)
+ "FIX", # flake8-fixme (like flake8-todos)
+ "ERA", # eradicate (We like code in comments!)
+ "PD", # pandas-vet (Intended for scripts that use pandas, not libraries)
]
-[tool.ruff.per-file-ignores]
-"graphblas/core/agg.py" = ["F401", "F403"] # Deprecated
-"graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF
-"graphblas/core/ss/matrix.py" = ["NPY002"] # numba doesn't support rng generator yet
-"graphblas/core/ss/vector.py" = ["NPY002"] # numba doesn't support rng generator yet
-"graphblas/core/utils.py" = ["PLE0302"] # `__set__` is used as a property
-"graphblas/ss/_core.py" = ["N999"] # We want _core.py to be underscopre
+[tool.ruff.lint.per-file-ignores]
+"graphblas/core/operator/__init__.py" = ["A005"]
+"graphblas/io/__init__.py" = ["A005"] # shadows a standard-library module
+"graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF
+"graphblas/core/ss/matrix.py" = [
+ "NPY002", # numba doesn't support rng generator yet
+ "PLR1730",
+]
+"graphblas/core/ss/vector.py" = [
+ "NPY002", # numba doesn't support rng generator yet
+]
+"graphblas/core/utils.py" = ["PLE0302"] # `__set__` is used as a property
+"graphblas/ss/_core.py" = ["N999"] # We want _core.py to be underscopre
# Allow useless expressions, assert, pickle, RNG, print, no docstring, and yoda in tests
-"graphblas/tests/*py" = ["B018", "S101", "S301", "S311", "T201", "D103", "D100", "SIM300"]
-"graphblas/tests/test_formatting.py" = ["E501"] # Allow long lines
-"graphblas/**/__init__.py" = ["F401"] # Allow unused imports (w/o defining `__all__`)
-"scripts/*.py" = ["INP001"] # Not a package
-"scripts/create_pickle.py" = ["F403", "F405"] # Allow `from foo import *`
-"docs/*.py" = ["INP001"] # Not a package
+"graphblas/tests/*py" = [
+ "B018",
+ "S101",
+ "S301",
+ "S311",
+ "T201",
+ "D103",
+ "D100",
+ "SIM300",
+]
+"graphblas/tests/test_formatting.py" = ["E501"] # Allow long lines
+"graphblas/**/__init__.py" = [
+ "F401", # Allow unused imports (w/o defining `__all__`)
+]
+"scripts/*.py" = ["INP001"] # Not a package
+"scripts/create_pickle.py" = ["F403", "F405"] # Allow `from foo import *`
+"docs/*.py" = ["INP001"] # Not a package
-[tool.ruff.flake8-builtins]
+[tool.ruff.lint.flake8-builtins]
builtins-ignorelist = ["copyright", "format", "min", "max"]
+builtins-allowed-modules = ["select"]
-[tool.ruff.flake8-pytest-style]
+[tool.ruff.lint.flake8-pytest-style]
fixture-parentheses = false
mark-parentheses = false
-[tool.ruff.pydocstyle]
+[tool.lint.ruff.pydocstyle]
convention = "numpy"
+[tool.bandit]
+exclude_dirs = ["graphblas/tests", "scripts"]
+skips = [
+ "B110", # Try, Except, Pass detected. (Note: it would be nice to not have this pattern)
+]
+
[tool.pylint.messages_control]
# To run a single check, do: pylint graphblas --disable E,W,R,C,I --enable assignment-from-no-return
max-line-length = 100
-py-version = "3.8"
+py-version = "3.10"
enable = ["I"]
disable = [
- # Error
- "assignment-from-no-return",
-
- # Warning
- "arguments-differ",
- "arguments-out-of-order",
- "expression-not-assigned",
- "fixme",
- "global-statement",
- "non-parent-init-called",
- "redefined-builtin",
- "redefined-outer-name",
- "super-init-not-called",
- "unbalanced-tuple-unpacking",
- "unnecessary-lambda",
- "unspecified-encoding",
- "unused-argument",
- "unused-variable",
-
- # Refactor
- "cyclic-import",
- "duplicate-code",
- "inconsistent-return-statements",
- "too-few-public-methods",
-
- # Convention
- "missing-class-docstring",
- "missing-function-docstring",
- "missing-module-docstring",
- "too-many-lines",
-
- # Intentionally turned off
- # error
- "class-variable-slots-conflict",
- "invalid-unary-operand-type",
- "no-member",
- "no-name-in-module",
- "not-an-iterable",
- "too-many-function-args",
- "unexpected-keyword-arg",
- # warning
- "broad-except",
- "pointless-statement",
- "protected-access",
- "undefined-loop-variable",
- "unused-import",
- # refactor
- "comparison-with-itself",
- "too-many-arguments",
- "too-many-boolean-expressions",
- "too-many-branches",
- "too-many-instance-attributes",
- "too-many-locals",
- "too-many-nested-blocks",
- "too-many-public-methods",
- "too-many-return-statements",
- "too-many-statements",
- # convention
- "import-outside-toplevel",
- "invalid-name",
- "line-too-long",
- "singleton-comparison",
- "single-string-used-for-slots",
- "unidiomatic-typecheck",
- "unnecessary-dunder-call",
- "wrong-import-order",
- "wrong-import-position",
- # informative
- "locally-disabled",
- "suppressed-message",
+ # Error
+ "assignment-from-no-return",
+
+ # Warning
+ "arguments-differ",
+ "arguments-out-of-order",
+ "expression-not-assigned",
+ "fixme",
+ "global-statement",
+ "non-parent-init-called",
+ "redefined-builtin",
+ "redefined-outer-name",
+ "super-init-not-called",
+ "unbalanced-tuple-unpacking",
+ "unnecessary-lambda",
+ "unspecified-encoding",
+ "unused-argument",
+ "unused-variable",
+
+ # Refactor
+ "cyclic-import",
+ "duplicate-code",
+ "inconsistent-return-statements",
+ "too-few-public-methods",
+
+ # Convention
+ "missing-class-docstring",
+ "missing-function-docstring",
+ "missing-module-docstring",
+ "too-many-lines",
+
+ # Intentionally turned off
+ # error
+ "class-variable-slots-conflict",
+ "invalid-unary-operand-type",
+ "no-member",
+ "no-name-in-module",
+ "not-an-iterable",
+ "too-many-function-args",
+ "unexpected-keyword-arg",
+ # warning
+ "broad-except",
+ "pointless-statement",
+ "protected-access",
+ "undefined-loop-variable",
+ "unused-import",
+ # refactor
+ "comparison-with-itself",
+ "too-many-arguments",
+ "too-many-boolean-expressions",
+ "too-many-branches",
+ "too-many-instance-attributes",
+ "too-many-locals",
+ "too-many-nested-blocks",
+ "too-many-public-methods",
+ "too-many-return-statements",
+ "too-many-statements",
+ # convention
+ "import-outside-toplevel",
+ "invalid-name",
+ "line-too-long",
+ "singleton-comparison",
+ "single-string-used-for-slots",
+ "unidiomatic-typecheck",
+ "unnecessary-dunder-call",
+ "wrong-import-order",
+ "wrong-import-position",
+ # informative
+ "locally-disabled",
+ "suppressed-message",
]
diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh
index 3809eb805..5aa88e045 100755
--- a/scripts/check_versions.sh
+++ b/scripts/check_versions.sh
@@ -3,15 +3,15 @@
# Use, adjust, copy/paste, etc. as necessary to answer your questions.
# This may be helpful when updating dependency versions in CI.
# Tip: add `--json` for more information.
-conda search 'numpy[channel=conda-forge]>=1.24.3'
-conda search 'pandas[channel=conda-forge]>=2.0.1'
-conda search 'scipy[channel=conda-forge]>=1.10.1'
-conda search 'networkx[channel=conda-forge]>=3.1'
-conda search 'awkward[channel=conda-forge]>=2.1.4'
-conda search 'sparse[channel=conda-forge]>=0.14.0'
-conda search 'fast_matrix_market[channel=conda-forge]>=1.5.1'
-conda search 'numba[channel=conda-forge]>=0.56.4'
-conda search 'pyyaml[channel=conda-forge]>=6.0'
-conda search 'flake8-bugbear[channel=conda-forge]>=23.3.23'
-conda search 'flake8-simplify[channel=conda-forge]>=0.20.0'
-# conda search 'python[channel=conda-forge]>=3.8 *pypy*'
+conda search 'flake8-bugbear[channel=conda-forge]>=24.12.12'
+conda search 'flake8-simplify[channel=conda-forge]>=0.21.0'
+conda search 'numpy[channel=conda-forge]>=2.2.3'
+conda search 'pandas[channel=conda-forge]>=2.2.3'
+conda search 'scipy[channel=conda-forge]>=1.15.2'
+conda search 'networkx[channel=conda-forge]>=3.4.2'
+conda search 'awkward[channel=conda-forge]>=2.7.4'
+conda search 'sparse[channel=conda-forge]>=0.15.5'
+conda search 'fast_matrix_market[channel=conda-forge]>=1.7.6'
+conda search 'numba[channel=conda-forge]>=0.61.0'
+conda search 'pyyaml[channel=conda-forge]>=6.0.2'
+# conda search 'python[channel=conda-forge]>=3.10 *pypy*'
diff --git a/scripts/test_imports.sh b/scripts/test_imports.sh
index cc989ef06..6ce88c83e 100755
--- a/scripts/test_imports.sh
+++ b/scripts/test_imports.sh
@@ -13,7 +13,7 @@ if ! python -c "from graphblas.select import tril" ; then exit 1 ; fi
if ! python -c "from graphblas.semiring import plus_times" ; then exit 1 ; fi
if ! python -c "from graphblas.unary import exp" ; then exit 1 ; fi
if ! (for attr in Matrix Scalar Vector Recorder agg binary dtypes exceptions \
- init io monoid op select semiring tests unary ss viz
+ init io monoid op select semiring tests unary ss viz MAX_SIZE
do echo python -c \"from graphblas import $attr\"
if ! python -c "from graphblas import $attr"
then exit 1