From 0eeebfcb3bdfad446899e8f01edeaef5b92c27a1 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 3 May 2023 13:03:45 -0500 Subject: [PATCH 01/66] Use external link for images in README (to show up on PyPI) (#450) --- MANIFEST.in | 3 --- README.md | 6 +++--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index e2ff9c410..bdba30a31 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,6 +7,3 @@ include LICENSE include MANIFEST.in include graphblas/graphblas.yaml include graphblas/tests/pickle*.pkl -include docs/_static/img/logo-name-medium.svg -include docs/_static/img/draw-example.png -include docs/_static/img/repr-matrix.png diff --git a/README.md b/README.md index 083483fe2..570a82de5 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Python-graphblas](docs/_static/img/logo-name-medium.svg) +![Python-graphblas](https://raw.githubusercontent.com/python-graphblas/python-graphblas/main/docs/_static/img/logo-name-medium.svg) [![conda-forge](https://img.shields.io/conda/vn/conda-forge/python-graphblas.svg)](https://anaconda.org/conda-forge/python-graphblas) [![pypi](https://img.shields.io/pypi/v/python-graphblas.svg)](https://pypi.python.org/pypi/python-graphblas/) @@ -28,8 +28,8 @@ For algorithms, see - **Chat via Discord:** [https://discord.com/invite/vur45CbwMz](https://discord.com/invite/vur45CbwMz) in the [#graphblas channel](https://discord.com/channels/786703927705862175/1024732940233605190)

- Directed graph - Adjacency matrix + Directed graph + Adjacency matrix

## Install From f140f9c39dc0ddacd099cd0f5029b9c623795fa9 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 6 May 2023 09:00:02 -0500 Subject: [PATCH 02/66] include conftest.py in MANIFEST.in (#451) * include conftest.py in MANIFEST.in * Run slow tests when running via `pytest --pyargs graphblas` * bump ruff (also, test pre-commit.ci) * pre-commit.ci works fine, let's only use it for linting --- .github/workflows/lint.yml | 10 ++++++---- .pre-commit-config.yaml | 4 ++-- MANIFEST.in | 1 + graphblas/tests/conftest.py | 17 +++++++++++------ graphblas/tests/test_core.py | 5 ++++- graphblas/tests/test_op.py | 2 +- graphblas/tests/test_vector.py | 4 ++-- 7 files changed, 27 insertions(+), 16 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 5ef2b1033..81d9415ad 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,10 +1,12 @@ +# Rely on pre-commit.ci instead name: Lint via pre-commit on: - pull_request: - push: - branches-ignore: - - main + workflow_dispatch: + # pull_request: + # push: + # branches-ignore: + # - main permissions: contents: read diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8f4fac317..d995f4253 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -58,7 +58,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.264 + rev: v0.0.265 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -86,7 +86,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.264 + rev: v0.0.265 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/MANIFEST.in b/MANIFEST.in index bdba30a31..27cd3f0c4 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,6 +2,7 @@ recursive-include graphblas *.py prune docs prune scripts include setup.py +include conftest.py include README.md include LICENSE include MANIFEST.in diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index a4df5d336..0d1f4008a 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -18,26 +18,31 @@ def pytest_configure(config): rng = np.random.default_rng() - randomly = config.getoption("--randomly", False) + randomly = config.getoption("--randomly", None) + if randomly is None: # pragma: no cover + options_unavailable = True + randomly = True + config.addinivalue_line("markers", "slow: Skipped unless --runslow passed") + else: + options_unavailable = False backend = config.getoption("--backend", None) if backend is None: if randomly: backend = "suitesparse" if rng.random() < 0.5 else "suitesparse-vanilla" else: backend = "suitesparse" - blocking = config.getoption("--blocking", True) + blocking = config.getoption("--blocking", None) if blocking is None: # pragma: no branch blocking = rng.random() < 0.5 if randomly else True record = config.getoption("--record", False) if record is None: # pragma: no branch record = rng.random() < 0.5 if randomly else False - mapnumpy = config.getoption("--mapnumpy", False) + mapnumpy = config.getoption("--mapnumpy", None) if mapnumpy is None: mapnumpy = rng.random() < 0.5 if randomly else False - runslow = config.getoption("--runslow", False) + runslow = config.getoption("--runslow", None) if runslow is None: - # Add a small amount of randomization to be safer - runslow = rng.random() < 0.05 if randomly else False + runslow = options_unavailable config.runslow = runslow gb.config.set(autocompute=False, mapnumpy=mapnumpy) diff --git a/graphblas/tests/test_core.py b/graphblas/tests/test_core.py index ae2051145..003affc6c 100644 --- a/graphblas/tests/test_core.py +++ b/graphblas/tests/test_core.py @@ -83,7 +83,10 @@ def test_packages(): if not pyproject.exists(): # pragma: no cover (safety) pytest.skip("Did not find pyproject.toml") with pyproject.open("rb") as f: - pkgs2 = sorted(tomli.load(f)["tool"]["setuptools"]["packages"]) + cfg = tomli.load(f) + if cfg.get("project", {}).get("name") != "python-graphblas": # pragma: no cover (safety) + pytest.skip("Did not find correct pyproject.toml") + pkgs2 = sorted(cfg["tool"]["setuptools"]["packages"]) assert ( pkgs == pkgs2 ), "If there are extra items on the left, add them to pyproject.toml:tool.setuptools.packages" diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py index c9a176afd..a80012ab7 100644 --- a/graphblas/tests/test_op.py +++ b/graphblas/tests/test_op.py @@ -225,7 +225,7 @@ def plus_one(x): UnaryOp.register_new("bad", object()) assert not hasattr(unary, "bad") with pytest.raises(UdfParseError, match="Unable to parse function using Numba"): - UnaryOp.register_new("bad", lambda x: v) + UnaryOp.register_new("bad", lambda x: v) # pragma: no branch (numba) @pytest.mark.skipif("not supports_udfs") diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index ab019b734..bd2083fd1 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -858,7 +858,7 @@ def inner(x, idx, _, thunk): # pragma: no cover (numba) delattr(indexunary, "iin") delattr(select, "iin") with pytest.raises(UdfParseError, match="Unable to parse function using Numba"): - indexunary.register_new("bad", lambda x, row, col, thunk: result) + indexunary.register_new("bad", lambda x, row, col, thunk: result) # pragma: no branch def test_reduce(v): @@ -2425,7 +2425,7 @@ def test_lambda_udfs(v): # with pytest.raises(TypeError): v.ewise_add(v, lambda x, y: x + y) # pragma: no branch (numba) with pytest.raises(TypeError): - v.inner(v, lambda x, y: x + y) + v.inner(v, lambda x, y: x + y) # pragma: no branch (numba) def test_get(v): From b19836200d60727fafc0e257ae14079ef80f8549 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 May 2023 16:46:29 -0500 Subject: [PATCH 03/66] Bump pypa/gh-action-pypi-publish from 1.8.5 to 1.8.6 (#452) Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.5 to 1.8.6. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.8.5...v1.8.6) --- .github/workflows/publish_pypi.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index abf3fefa6..eca456c28 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -35,7 +35,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.5 + uses: pypa/gh-action-pypi-publish@v1.8.6 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From 4c166531f8e34c25f59205e1c8aca66c93e4c5be Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 14 May 2023 11:24:10 -0500 Subject: [PATCH 04/66] Use double (not single) backtick in docstrings (#454) * Use double (not single) backtick in docstrings * That's annoying; why are scipy tests missing?! --- .github/workflows/test_and_build.yml | 8 +- .pre-commit-config.yaml | 8 +- environment.yml | 2 + graphblas/agg/__init__.py | 8 +- graphblas/core/agg.py | 4 +- graphblas/core/automethods.py | 2 +- graphblas/core/mask.py | 10 +- graphblas/core/matrix.py | 30 ++--- graphblas/core/operator/base.py | 2 +- graphblas/core/recorder.py | 2 +- graphblas/core/scalar.py | 2 +- graphblas/core/ss/descriptor.py | 2 +- graphblas/core/ss/matrix.py | 164 +++++++++++++-------------- graphblas/core/ss/vector.py | 72 ++++++------ graphblas/core/utils.py | 26 +++-- graphblas/core/vector.py | 18 +-- graphblas/io/_numpy.py | 12 +- graphblas/select/__init__.py | 12 +- graphblas/ss/_core.py | 10 +- graphblas/tests/test_io.py | 6 +- graphblas/viz.py | 6 +- scripts/check_versions.sh | 4 +- 22 files changed, 209 insertions(+), 201 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index c20530fbe..064dd93d8 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -171,22 +171,22 @@ jobs: npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') else # Python 3.11 npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", ""]))') fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d995f4253..10fcca649 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,7 +43,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.3.2 + rev: v3.4.0 hooks: - id: pyupgrade args: [--py38-plus] @@ -58,7 +58,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.265 + rev: v0.0.267 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -71,7 +71,7 @@ repos: additional_dependencies: &flake8_dependencies # These versions need updated manually - flake8==6.0.0 - - flake8-bugbear==23.3.23 + - flake8-bugbear==23.5.9 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa rev: v1.4.0 @@ -86,7 +86,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.265 + rev: v0.0.267 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/environment.yml b/environment.yml index 875ec5cbd..1a7fb6fa8 100644 --- a/environment.yml +++ b/environment.yml @@ -94,8 +94,10 @@ dependencies: # - python-igraph # - python-louvain # - pyupgrade + # - rich # - ruff # - scalene + # - scikit-network # - setuptools-git-versioning # - snakeviz # - sphinx-lint diff --git a/graphblas/agg/__init__.py b/graphblas/agg/__init__.py index c1319facb..9f6ead0b5 100644 --- a/graphblas/agg/__init__.py +++ b/graphblas/agg/__init__.py @@ -1,4 +1,4 @@ -"""`graphblas.agg` is an experimental module for exploring Aggregators. +"""``graphblas.agg`` is an experimental module for exploring Aggregators. Aggregators may be used in reduce methods: - Matrix.reduce_rowwise @@ -59,9 +59,9 @@ - ss.argmax .. deprecated:: 2023.1.0 - Aggregators `first`, `last`, `first_index`, `last_index`, `argmin`, and `argmax` are - deprecated in the `agg` namespace such as `agg.first`. Use them from `agg.ss` namespace - instead such as `agg.ss.first`. Will be removed in version 2023.9.0 or later. + Aggregators ``first``, ``last``, ``first_index``, ``last_index``, ``argmin``, and ``argmax`` + are deprecated in the ``agg`` namespace such as ``agg.first``. Use them from ``agg.ss`` + namespace instead such as ``agg.ss.first``. Will be removed in version 2023.9.0 or later. # Possible aggregators: # - absolute_deviation, sum(abs(x - mean(x))), sum_absminus(x, mean(x)) diff --git a/graphblas/core/agg.py b/graphblas/core/agg.py index b9f1977ab..23848d3b9 100644 --- a/graphblas/core/agg.py +++ b/graphblas/core/agg.py @@ -1,8 +1,8 @@ """graphblas.core.agg namespace is deprecated; please use graphblas.core.operator.agg instead. .. deprecated:: 2023.3.0 -`graphblas.core.agg` will be removed in a future release. -Use `graphblas.core.operator.agg` instead. +``graphblas.core.agg`` will be removed in a future release. +Use ``graphblas.core.operator.agg`` instead. Will be removed in version 2023.11.0 or later. """ diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py index 98dc61137..937e331fd 100644 --- a/graphblas/core/automethods.py +++ b/graphblas/core/automethods.py @@ -1,6 +1,6 @@ """Define functions to use as property methods on expressions. -These will automatically compute the value and avoid the need for `.new()`. +These will automatically compute the value and avoid the need for ``.new()``. To automatically create the functions, run: diff --git a/graphblas/core/mask.py b/graphblas/core/mask.py index 9ad209095..3bda2188a 100644 --- a/graphblas/core/mask.py +++ b/graphblas/core/mask.py @@ -35,7 +35,7 @@ def new(self, dtype=None, *, complement=False, mask=None, name=None, **opts): """Return a new object with True values determined by the mask(s). By default, the result is True wherever the mask(s) would have been applied, - and empty otherwise. If `complement` is True, then these are switched: + and empty otherwise. If ``complement`` is True, then these are switched: the result is empty where the mask(s) would have been applied, and True otherwise. In other words, these are equivalent if complement is False (and mask keyword is None): @@ -48,14 +48,14 @@ def new(self, dtype=None, *, complement=False, mask=None, name=None, **opts): >>> C(self) << expr >>> C(~result.S) << expr # equivalent when complement is True - This can also efficiently merge two masks by using the `mask=` argument. + This can also efficiently merge two masks by using the ``mask=`` argument. This is equivalent to the following (but uses more efficient recipes): >>> val = Matrix(...) >>> val(self) << True >>> val(mask, replace=True) << val - If `complement=` argument is True, then the *complement* will be returned. + If ``complement=`` argument is True, then the *complement* will be returned. This is equivalent to the following (but uses more efficient recipes): >>> val = Matrix(...) @@ -83,7 +83,7 @@ def new(self, dtype=None, *, complement=False, mask=None, name=None, **opts): def __and__(self, other, **opts): """Return the intersection of two masks as a new mask. - `new_mask = mask1 & mask2` is equivalent to the following: + ``new_mask = mask1 & mask2`` is equivalent to the following: >>> val = Matrix(bool, nrows, ncols) >>> val(mask1) << True @@ -109,7 +109,7 @@ def __and__(self, other, **opts): def __or__(self, other, **opts): """Return the union of two masks as a new mask. - `new_mask = mask1 | mask2` is equivalent to the following: + ``new_mask = mask1 | mask2`` is equivalent to the following: >>> val = Matrix(bool, nrows, ncols) >>> val(mask1) << True diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 0183893fd..b74ca347a 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -355,7 +355,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False, **opts Returns ------- bool - Whether all values of the Matrix are close to the values in `other`. + Whether all values of the Matrix are close to the values in ``other``. """ other = self._expect_type( other, (Matrix, TransposedMatrix), within="isclose", argname="other" @@ -448,19 +448,19 @@ def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=Tr corresponding to the COO format of the Matrix. .. deprecated:: 2022.11.0 - `Matrix.to_values` will be removed in a future release. - Use `Matrix.to_coo` instead. Will be removed in version 2023.9.0 or later + ``Matrix.to_values`` will be removed in a future release. + Use ``Matrix.to_coo`` instead. Will be removed in version 2023.9.0 or later Parameters ---------- dtype : Requested dtype for the output values array. rows : bool, default=True - Whether to return rows; will return `None` for rows if `False` + Whether to return rows; will return ``None`` for rows if ``False`` columns :bool, default=True - Whether to return columns; will return `None` for columns if `False` + Whether to return columns; will return ``None`` for columns if ``False`` values : bool, default=True - Whether to return values; will return `None` for values if `False` + Whether to return values; will return ``None`` for values if ``False`` sort : bool, default=True Whether to require sorted indices. If internally stored rowwise, the sorting will be first by rows, then by column. @@ -488,11 +488,11 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True) dtype : Requested dtype for the output values array. rows : bool, default=True - Whether to return rows; will return `None` for rows if `False` + Whether to return rows; will return ``None`` for rows if ``False`` columns :bool, default=True - Whether to return columns; will return `None` for columns if `False` + Whether to return columns; will return ``None`` for columns if ``False`` values : bool, default=True - Whether to return values; will return `None` for values if `False` + Whether to return values; will return ``None`` for values if ``False`` sort : bool, default=True Whether to require sorted indices. If internally stored rowwise, the sorting will be first by rows, then by column. @@ -559,7 +559,7 @@ def to_edgelist(self, dtype=None, *, values=True, sort=True): dtype : Requested dtype for the output values array. values : bool, default=True - Whether to return values; will return `None` for values if `False` + Whether to return values; will return ``None`` for values if ``False`` sort : bool, default=True Whether to require sorted indices. If internally stored rowwise, the sorting will be first by rows, then by column. @@ -585,7 +585,7 @@ def build(self, rows, columns, values, *, dup_op=None, clear=False, nrows=None, The typical use case is to create a new Matrix and insert values at the same time using :meth:`from_coo`. - All the arguments are used identically in :meth:`from_coo`, except for `clear`, which + All the arguments are used identically in :meth:`from_coo`, except for ``clear``, which indicates whether to clear the Matrix prior to adding the new values. """ # TODO: accept `dtype` keyword to match the dtype of `values`? @@ -781,8 +781,8 @@ def from_values( """Create a new Matrix from row and column indices and values. .. deprecated:: 2022.11.0 - `Matrix.from_values` will be removed in a future release. - Use `Matrix.from_coo` instead. Will be removed in version 2023.9.0 or later + ``Matrix.from_values`` will be removed in a future release. + Use ``Matrix.from_coo`` instead. Will be removed in version 2023.9.0 or later Parameters ---------- @@ -1086,7 +1086,7 @@ def from_csr( Parameters ---------- indptr : list or np.ndarray - Pointers for each row into col_indices and values; `indptr.size == nrows + 1`. + Pointers for each row into col_indices and values; ``indptr.size == nrows + 1``. col_indices : list or np.ndarray Column indices. values : list or np.ndarray or scalar, default 1.0 @@ -1133,7 +1133,7 @@ def from_csc( Parameters ---------- indptr : list or np.ndarray - Pointers for each column into row_indices and values; `indptr.size == ncols + 1`. + Pointers for each column into row_indices and values; ``indptr.size == ncols + 1``. col_indices : list or np.ndarray Column indices. values : list or np.ndarray or scalar, default 1.0 diff --git a/graphblas/core/operator/base.py b/graphblas/core/operator/base.py index a40438f14..cddee6a33 100644 --- a/graphblas/core/operator/base.py +++ b/graphblas/core/operator/base.py @@ -398,7 +398,7 @@ def _find(cls, funcname): def _initialize(cls, include_in_ops=True): """ include_in_ops determines whether the operators are included in the - `gb.ops` namespace in addition to the defined module. + ``gb.ops`` namespace in addition to the defined module. """ if cls._initialized: # pragma: no cover (safety) return diff --git a/graphblas/core/recorder.py b/graphblas/core/recorder.py index 2268c31eb..ca776f697 100644 --- a/graphblas/core/recorder.py +++ b/graphblas/core/recorder.py @@ -34,7 +34,7 @@ def gbstr(arg): class Recorder: """Record GraphBLAS C calls. - The recorder can use `.start()` and `.stop()` to enable/disable recording, + The recorder can use ``.start()`` and ``.stop()`` to enable/disable recording, or it can be used as a context manager. For example, diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index a7a251a1d..b55d601af 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -1056,7 +1056,7 @@ def _as_scalar(scalar, dtype=None, *, is_cscalar): def _dict_to_record(np_type, d): - """Converts e.g. `{"x": 1, "y": 2.3}` to `(1, 2.3)`.""" + """Converts e.g. ``{"x": 1, "y": 2.3}`` to ``(1, 2.3)``.""" rv = [] for name, (dtype, _) in np_type.fields.items(): val = d[name] diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index dffc4dec1..2f7d11ffa 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -132,7 +132,7 @@ def get_descriptor(**opts): sort : bool, default False A hint for whether methods may return a "jumbled" matrix secure_import : bool, default False - Whether to trust the data for `import` and `pack` functions. + Whether to trust the data for ``import`` and ``pack`` functions. When True, checks are performed to ensure input data is valid. compression : str, {"none", "default", "lz4", "lz4hc", "zstd"} Whether and how to compress the data for serialization. diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index cac0296c7..64aa43a96 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -63,7 +63,7 @@ def head(matrix, n=10, dtype=None, *, sort=False): def _concat_mn(tiles, *, is_matrix=None): - """Argument checking for `Matrix.ss.concat` and returns number of tiles in each dimension.""" + """Argument checking for ``Matrix.ss.concat`` and returns number of tiles in each dimension.""" from ..matrix import Matrix, TransposedMatrix from ..vector import Vector @@ -261,8 +261,8 @@ def build_diag(self, vector, k=0, **opts): vector : Vector Create a diagonal from this Vector. k : int, default 0 - Diagonal in question. Use `k>0` for diagonals above the main diagonal, - and `k<0` for diagonals below the main diagonal. + Diagonal in question. Use ``k>0`` for diagonals above the main diagonal, + and ``k<0`` for diagonals below the main diagonal. See Also -------- @@ -282,12 +282,12 @@ def split(self, chunks, *, name=None, **opts): """ GxB_Matrix_split. - Split a Matrix into a 2D array of sub-matrices according to `chunks`. + Split a Matrix into a 2D array of sub-matrices according to ``chunks``. This performs the opposite operation as ``concat``. - `chunks` is short for "chunksizes" and indicates the chunk sizes for each dimension. - `chunks` may be a single integer, or a length 2 tuple or list. Example chunks: + ``chunks`` is short for "chunksizes" and indicates the chunk sizes for each dimension. + ``chunks`` may be a single integer, or a length 2 tuple or list. Example chunks: - ``chunks=10`` - Split each dimension into chunks of size 10 (the last chunk may be smaller). @@ -295,7 +295,7 @@ def split(self, chunks, *, name=None, **opts): - Split rows into chunks of size 10 and columns into chunks of size 20. - ``chunks=(None, [5, 10])`` - Don't split rows into chunks, and split columns into two chunks of size 5 and 10. - ` ``chunks=(10, [20, None])`` + - ``chunks=(10, [20, None])`` - Split columns into two chunks of size 20 and ``ncols - 20`` See Also @@ -366,9 +366,9 @@ def concat(self, tiles, **opts): Concatenate a 2D list of Matrix objects into the current Matrix. Any existing values in the current Matrix will be discarded. - To concatenate into a new Matrix, use `graphblas.ss.concat`. + To concatenate into a new Matrix, use ``graphblas.ss.concat``. - Vectors may be used as `Nx1` Matrix objects. + Vectors may be used as ``Nx1`` Matrix objects. This performs the opposite operation as ``split``. @@ -542,8 +542,8 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** Parameters ---------- format : str, optional - If `format` is not specified, this method exports in the currently stored format. - To control the export format, set `format` to one of: + If ``format`` is not specified, this method exports in the currently stored format. + To control the export format, set ``format`` to one of: - "csr" - "csc" - "hypercsr" @@ -578,7 +578,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** Returns ------- - dict; keys depend on `format` and `raw` arguments (see below). + dict; keys depend on ``format`` and ``raw`` arguments (see below). See Also -------- @@ -732,10 +732,10 @@ def unpack(self, format=None, *, sort=False, raw=False, **opts): """ GxB_Matrix_unpack_xxx. - `unpack` is like `export`, except that the Matrix remains valid but empty. - `pack_*` methods are the opposite of `unpack`. + ``unpack`` is like ``export``, except that the Matrix remains valid but empty. + ``pack_*`` methods are the opposite of ``unpack``. - See `Matrix.ss.export` documentation for more details. + See ``Matrix.ss.export`` documentation for more details. """ return self._export( format, sort=sort, raw=raw, give_ownership=True, method="unpack", opts=opts @@ -1193,7 +1193,7 @@ def import_csr( col_indices : array-like is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_cols : bool, default False Indicate whether the values in "col_indices" are sorted. take_ownership : bool, default False @@ -1210,7 +1210,7 @@ def import_csr( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "csr" or None. This is included to be compatible with the dict returned from exporting. @@ -1259,10 +1259,10 @@ def pack_csr( """ GxB_Matrix_pack_CSR. - `pack_csr` is like `import_csr` except it "packs" data into an + ``pack_csr`` is like ``import_csr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csr")`` - See `Matrix.ss.import_csr` documentation for more details. + See ``Matrix.ss.import_csr`` documentation for more details. """ return self._import_csr( indptr=indptr, @@ -1383,7 +1383,7 @@ def import_csc( row_indices : array-like is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_rows : bool, default False Indicate whether the values in "row_indices" are sorted. take_ownership : bool, default False @@ -1400,7 +1400,7 @@ def import_csc( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "csc" or None. This is included to be compatible with the dict returned from exporting. @@ -1449,10 +1449,10 @@ def pack_csc( """ GxB_Matrix_pack_CSC. - `pack_csc` is like `import_csc` except it "packs" data into an + ``pack_csc`` is like ``import_csc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csc")`` - See `Matrix.ss.import_csc` documentation for more details. + See ``Matrix.ss.import_csc`` documentation for more details. """ return self._import_csc( indptr=indptr, @@ -1579,7 +1579,7 @@ def import_hypercsr( If not specified, will be set to ``len(rows)``. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_cols : bool, default False Indicate whether the values in "col_indices" are sorted. take_ownership : bool, default False @@ -1596,7 +1596,7 @@ def import_hypercsr( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "hypercsr" or None. This is included to be compatible with the dict returned from exporting. @@ -1649,10 +1649,10 @@ def pack_hypercsr( """ GxB_Matrix_pack_HyperCSR. - `pack_hypercsr` is like `import_hypercsr` except it "packs" data into an + ``pack_hypercsr`` is like ``import_hypercsr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsr")`` - See `Matrix.ss.import_hypercsr` documentation for more details. + See ``Matrix.ss.import_hypercsr`` documentation for more details. """ return self._import_hypercsr( rows=rows, @@ -1803,7 +1803,7 @@ def import_hypercsc( If not specified, will be set to ``len(cols)``. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_rows : bool, default False Indicate whether the values in "row_indices" are sorted. take_ownership : bool, default False @@ -1820,7 +1820,7 @@ def import_hypercsc( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "hypercsc" or None. This is included to be compatible with the dict returned from exporting. @@ -1873,10 +1873,10 @@ def pack_hypercsc( """ GxB_Matrix_pack_HyperCSC. - `pack_hypercsc` is like `import_hypercsc` except it "packs" data into an + ``pack_hypercsc`` is like ``import_hypercsc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsc")`` - See `Matrix.ss.import_hypercsc` documentation for more details. + See ``Matrix.ss.import_hypercsc`` documentation for more details. """ return self._import_hypercsc( cols=cols, @@ -2028,7 +2028,7 @@ def import_bitmapr( If not provided, will be inferred from values or bitmap if either is 2d. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. take_ownership : bool, default False If True, perform a zero-copy data transfer from input numpy arrays to GraphBLAS if possible. To give ownership of the underlying @@ -2043,7 +2043,7 @@ def import_bitmapr( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "bitmapr" or None. This is included to be compatible with the dict returned from exporting. @@ -2090,10 +2090,10 @@ def pack_bitmapr( """ GxB_Matrix_pack_BitmapR. - `pack_bitmapr` is like `import_bitmapr` except it "packs" data into an + ``pack_bitmapr`` is like ``import_bitmapr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapr")`` - See `Matrix.ss.import_bitmapr` documentation for more details. + See ``Matrix.ss.import_bitmapr`` documentation for more details. """ return self._import_bitmapr( bitmap=bitmap, @@ -2221,7 +2221,7 @@ def import_bitmapc( If not provided, will be inferred from values or bitmap if either is 2d. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. take_ownership : bool, default False If True, perform a zero-copy data transfer from input numpy arrays to GraphBLAS if possible. To give ownership of the underlying @@ -2236,7 +2236,7 @@ def import_bitmapc( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "bitmapc" or None. This is included to be compatible with the dict returned from exporting. @@ -2283,10 +2283,10 @@ def pack_bitmapc( """ GxB_Matrix_pack_BitmapC. - `pack_bitmapc` is like `import_bitmapc` except it "packs" data into an + ``pack_bitmapc`` is like ``import_bitmapc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapc")`` - See `Matrix.ss.import_bitmapc` documentation for more details. + See ``Matrix.ss.import_bitmapc`` documentation for more details. """ return self._import_bitmapc( bitmap=bitmap, @@ -2407,7 +2407,7 @@ def import_fullr( If not provided, will be inferred from values if it is 2d. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. take_ownership : bool, default False If True, perform a zero-copy data transfer from input numpy arrays to GraphBLAS if possible. To give ownership of the underlying @@ -2422,7 +2422,7 @@ def import_fullr( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "fullr" or None. This is included to be compatible with the dict returned from exporting. @@ -2465,10 +2465,10 @@ def pack_fullr( """ GxB_Matrix_pack_FullR. - `pack_fullr` is like `import_fullr` except it "packs" data into an + ``pack_fullr`` is like ``import_fullr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullr")`` - See `Matrix.ss.import_fullr` documentation for more details. + See ``Matrix.ss.import_fullr`` documentation for more details. """ return self._import_fullr( values=values, @@ -2566,7 +2566,7 @@ def import_fullc( If not provided, will be inferred from values if it is 2d. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. take_ownership : bool, default False If True, perform a zero-copy data transfer from input numpy arrays to GraphBLAS if possible. To give ownership of the underlying @@ -2581,7 +2581,7 @@ def import_fullc( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "fullc" or None. This is included to be compatible with the dict returned from exporting. @@ -2624,10 +2624,10 @@ def pack_fullc( """ GxB_Matrix_pack_FullC. - `pack_fullc` is like `import_fullc` except it "packs" data into an + ``pack_fullc`` is like ``import_fullc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullc")`` - See `Matrix.ss.import_fullc` documentation for more details. + See ``Matrix.ss.import_fullc`` documentation for more details. """ return self._import_fullc( values=values, @@ -2727,7 +2727,7 @@ def import_coo( The number of columns for the Matrix. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_rows : bool, default False True if rows are sorted or when (cols, rows) are sorted lexicographically sorted_cols : bool, default False @@ -2736,7 +2736,7 @@ def import_coo( Ignored. Zero-copy is not possible for "coo" format. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "coo" or None. This is included to be compatible with the dict returned from exporting. @@ -2787,10 +2787,10 @@ def pack_coo( """ GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. - `pack_coo` is like `import_coo` except it "packs" data into an + ``pack_coo`` is like ``import_coo`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coo")`` - See `Matrix.ss.import_coo` documentation for more details. + See ``Matrix.ss.import_coo`` documentation for more details. """ return self._import_coo( nrows=self._parent._nrows, @@ -2914,7 +2914,7 @@ def import_coor( The number of columns for the Matrix. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_cols : bool, default False True indicates indices are sorted by column, then row. take_ownership : bool, default False @@ -2932,7 +2932,7 @@ def import_coor( For "coor", ownership of "rows" will never change. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "coor" or None. This is included to be compatible with the dict returned from exporting. @@ -2983,10 +2983,10 @@ def pack_coor( """ GxB_Matrix_pack_CSR. - `pack_coor` is like `import_coor` except it "packs" data into an + ``pack_coor`` is like ``import_coor`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coor")`` - See `Matrix.ss.import_coor` documentation for more details. + See ``Matrix.ss.import_coor`` documentation for more details. """ return self._import_coor( rows=rows, @@ -3083,7 +3083,7 @@ def import_cooc( The number of columns for the Matrix. is_iso : bool, default False Is the Matrix iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_rows : bool, default False True indicates indices are sorted by column, then row. take_ownership : bool, default False @@ -3101,7 +3101,7 @@ def import_cooc( For "cooc", ownership of "cols" will never change. dtype : dtype, optional dtype of the new Matrix. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "cooc" or None. This is included to be compatible with the dict returned from exporting. @@ -3152,10 +3152,10 @@ def pack_cooc( """ GxB_Matrix_pack_CSC. - `pack_cooc` is like `import_cooc` except it "packs" data into an + ``pack_cooc`` is like ``import_cooc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("cooc")`` - See `Matrix.ss.import_cooc` documentation for more details. + See ``Matrix.ss.import_cooc`` documentation for more details. """ return self._import_cooc( ncols=self._parent._ncols, @@ -3255,7 +3255,7 @@ def import_any( GxB_Matrix_import_xxx. Dispatch to appropriate import method inferred from inputs. - See the other import functions and `Matrix.ss.export`` for details. + See the other import functions and ``Matrix.ss.export`` for details. Returns ------- @@ -3352,10 +3352,10 @@ def pack_any( """ GxB_Matrix_pack_xxx. - `pack_any` is like `import_any` except it "packs" data into an + ``pack_any`` is like ``import_any`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack()`` - See `Matrix.ss.import_any` documentation for more details. + See ``Matrix.ss.import_any`` documentation for more details. """ return self._import_any( values=values, @@ -3701,8 +3701,8 @@ def head(self, n=10, dtype=None, *, sort=False): def scan(self, op=monoid.plus, order="rowwise", *, name=None, **opts): """Perform a prefix scan across rows (default) or columns with the given monoid. - For example, use `monoid.plus` (the default) to perform a cumulative sum, - and `monoid.times` for cumulative product. Works with any monoid. + For example, use ``monoid.plus`` (the default) to perform a cumulative sum, + and ``monoid.times`` for cumulative product. Works with any monoid. Returns ------- @@ -3718,12 +3718,12 @@ def scan_columnwise(self, op=monoid.plus, *, name=None, **opts): """Perform a prefix scan across columns with the given monoid. .. deprecated:: 2022.11.1 - `Matrix.ss.scan_columnwise` will be removed in a future release. - Use `Matrix.ss.scan(order="columnwise")` instead. + ``Matrix.ss.scan_columnwise`` will be removed in a future release. + Use ``Matrix.ss.scan(order="columnwise")`` instead. Will be removed in version 2023.7.0 or later - For example, use `monoid.plus` (the default) to perform a cumulative sum, - and `monoid.times` for cumulative product. Works with any monoid. + For example, use ``monoid.plus`` (the default) to perform a cumulative sum, + and ``monoid.times`` for cumulative product. Works with any monoid. Returns ------- @@ -3741,12 +3741,12 @@ def scan_rowwise(self, op=monoid.plus, *, name=None, **opts): """Perform a prefix scan across rows with the given monoid. .. deprecated:: 2022.11.1 - `Matrix.ss.scan_rowwise` will be removed in a future release. - Use `Matrix.ss.scan` instead. + ``Matrix.ss.scan_rowwise`` will be removed in a future release. + Use ``Matrix.ss.scan`` instead. Will be removed in version 2023.7.0 or later - For example, use `monoid.plus` (the default) to perform a cumulative sum, - and `monoid.times` for cumulative product. Works with any monoid. + For example, use ``monoid.plus`` (the default) to perform a cumulative sum, + and ``monoid.times`` for cumulative product. Works with any monoid. Returns ------- @@ -3904,8 +3904,8 @@ def selectk_rowwise(self, how, k, *, name=None): # pragma: no cover (deprecated """Select (up to) k elements from each row. .. deprecated:: 2022.11.1 - `Matrix.ss.selectk_rowwise` will be removed in a future release. - Use `Matrix.ss.selectk` instead. + ``Matrix.ss.selectk_rowwise`` will be removed in a future release. + Use ``Matrix.ss.selectk`` instead. Will be removed in version 2023.7.0 or later Parameters @@ -3950,8 +3950,8 @@ def selectk_columnwise(self, how, k, *, name=None): # pragma: no cover (depreca """Select (up to) k elements from each column. .. deprecated:: 2022.11.1 - `Matrix.ss.selectk_columnwise` will be removed in a future release. - Use `Matrix.ss.selectk(order="columnwise")` instead. + ``Matrix.ss.selectk_columnwise`` will be removed in a future release. + Use ``Matrix.ss.selectk(order="columnwise")`` instead. Will be removed in version 2023.7.0 or later Parameters @@ -4216,23 +4216,23 @@ def sort(self, op=binary.lt, order="rowwise", *, values=True, permutation=True, """GxB_Matrix_sort to sort values along the rows (default) or columns of the Matrix. Sorting moves all the elements to the left (if rowwise) or top (if columnwise) just - like `compactify`. The returned matrices will be the same shape as the input Matrix. + like ``compactify``. The returned matrices will be the same shape as the input Matrix. Parameters ---------- op : :class:`~graphblas.core.operator.BinaryOp`, optional Binary operator with a bool return type used to sort the values. - For example, `binary.lt` (the default) sorts the smallest elements first. + For example, ``binary.lt`` (the default) sorts the smallest elements first. Ties are broken according to indices (smaller first). order : {"rowwise", "columnwise"}, optional Whether to sort rowwise or columnwise. Rowwise shifts all values to the left, and columnwise shifts all values to the top. The default is "rowwise". values : bool, default=True - Whether to return values; will return `None` for values if `False`. + Whether to return values; will return ``None`` for values if ``False``. permutation : bool, default=True Whether to compute the permutation Matrix that has the original column indices (if rowwise) or row indices (if columnwise) of the sorted values. - Will return None if `False`. + Will return None if ``False``. nthreads : int, optional The maximum number of threads to use for this operation. None, 0 or negative nthreads means to use the default number of threads. @@ -4301,7 +4301,7 @@ def serialize(self, compression="default", level=None, **opts): None, 0 or negative nthreads means to use the default number of threads. For best performance, this function returns a numpy array with uint8 dtype. - Use `Matrix.ss.deserialize(blob)` to create a Matrix from the result of serialization + Use ``Matrix.ss.deserialize(blob)`` to create a Matrix from the result of serialization This method is intended to support all serialization options from SuiteSparse:GraphBLAS. @@ -4327,7 +4327,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): """Deserialize a Matrix from bytes, buffer, or numpy array using GxB_Matrix_deserialize. The data should have been previously serialized with a compatible version of - SuiteSparse:GraphBLAS. For example, from the result of `data = matrix.ss.serialize()`. + SuiteSparse:GraphBLAS. For example, from the result of ``data = matrix.ss.serialize()``. Examples -------- diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index 2b1e8bf05..1babc556e 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -155,8 +155,8 @@ def build_diag(self, matrix, k=0, **opts): matrix : Matrix or TransposedMatrix Extract a diagonal from this matrix. k : int, default 0 - Diagonal in question. Use `k>0` for diagonals above the main diagonal, - and `k<0` for diagonals below the main diagonal. + Diagonal in question. Use ``k>0`` for diagonals above the main diagonal, + and ``k<0`` for diagonals below the main diagonal. See Also -------- @@ -185,12 +185,12 @@ def split(self, chunks, *, name=None, **opts): """ GxB_Matrix_split. - Split a Vector into a 1D array of sub-vectors according to `chunks`. + Split a Vector into a 1D array of sub-vectors according to ``chunks``. This performs the opposite operation as ``concat``. - `chunks` is short for "chunksizes" and indicates the chunk sizes. - `chunks` may be a single integer, or a tuple or list. Example chunks: + ``chunks`` is short for "chunksizes" and indicates the chunk sizes. + ``chunks`` may be a single integer, or a tuple or list. Example chunks: - ``chunks=10`` - Split vector into chunks of size 10 (the last chunk may be smaller). @@ -253,7 +253,7 @@ def concat(self, tiles, **opts): Concatenate a 1D list of Vector objects into the current Vector. Any existing values in the current Vector will be discarded. - To concatenate into a new Vector, use `graphblas.ss.concat`. + To concatenate into a new Vector, use ``graphblas.ss.concat``. This performs the opposite operation as ``split``. @@ -415,8 +415,8 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** Parameters ---------- format : str or None, default None - If `format` is not specified, this method exports in the currently stored format. - To control the export format, set `format` to one of: + If ``format`` is not specified, this method exports in the currently stored format. + To control the export format, set ``format`` to one of: - "sparse" - "bitmap" - "full" @@ -434,7 +434,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** Returns ------- - dict; keys depend on `format` and `raw` arguments (see below). + dict; keys depend on ``format`` and ``raw`` arguments (see below). See Also -------- @@ -442,7 +442,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** Vector.ss.import_any Return values - - Note: for `raw=True`, arrays may be larger than specified. + - Note: for ``raw=True``, arrays may be larger than specified. - "sparse" format - indices : ndarray(dtype=uint64, size=nvals) - values : ndarray(size=nvals) @@ -481,10 +481,10 @@ def unpack(self, format=None, *, sort=False, raw=False, **opts): """ GxB_Vector_unpack_xxx. - `unpack` is like `export`, except that the Vector remains valid but empty. - `pack_*` methods are the opposite of `unpack`. + ``unpack`` is like ``export``, except that the Vector remains valid but empty. + ``pack_*`` methods are the opposite of ``unpack``. - See `Vector.ss.export` documentation for more details. + See ``Vector.ss.export`` documentation for more details. """ return self._export( format=format, sort=sort, give_ownership=True, raw=raw, method="unpack", opts=opts @@ -658,7 +658,7 @@ def import_any( GxB_Vector_import_xxx. Dispatch to appropriate import method inferred from inputs. - See the other import functions and `Vector.ss.export`` for details. + See the other import functions and ``Vector.ss.export`` for details. Returns ------- @@ -724,10 +724,10 @@ def pack_any( """ GxB_Vector_pack_xxx. - `pack_any` is like `import_any` except it "packs" data into an + ``pack_any`` is like ``import_any`` except it "packs" data into an existing Vector. This is the opposite of ``unpack()`` - See `Vector.ss.import_any` documentation for more details. + See ``Vector.ss.import_any`` documentation for more details. """ return self._import_any( values=values, @@ -858,7 +858,7 @@ def import_sparse( If not specified, will be set to ``len(values)``. is_iso : bool, default False Is the Vector iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. sorted_index : bool, default False Indicate whether the values in "col_indices" are sorted. take_ownership : bool, default False @@ -875,7 +875,7 @@ def import_sparse( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Vector. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "sparse" or None. This is included to be compatible with the dict returned from exporting. @@ -922,10 +922,10 @@ def pack_sparse( """ GxB_Vector_pack_CSC. - `pack_sparse` is like `import_sparse` except it "packs" data into an + ``pack_sparse`` is like ``import_sparse`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("sparse")`` - See `Vector.ss.import_sparse` documentation for more details. + See ``Vector.ss.import_sparse`` documentation for more details. """ return self._import_sparse( indices=indices, @@ -1045,7 +1045,7 @@ def import_bitmap( If not specified, it will be set to the size of values. is_iso : bool, default False Is the Vector iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. take_ownership : bool, default False If True, perform a zero-copy data transfer from input numpy arrays to GraphBLAS if possible. To give ownership of the underlying @@ -1060,7 +1060,7 @@ def import_bitmap( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Vector. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "bitmap" or None. This is included to be compatible with the dict returned from exporting. @@ -1105,10 +1105,10 @@ def pack_bitmap( """ GxB_Vector_pack_Bitmap. - `pack_bitmap` is like `import_bitmap` except it "packs" data into an + ``pack_bitmap`` is like ``import_bitmap`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("bitmap")`` - See `Vector.ss.import_bitmap` documentation for more details. + See ``Vector.ss.import_bitmap`` documentation for more details. """ return self._import_bitmap( bitmap=bitmap, @@ -1226,7 +1226,7 @@ def import_full( If not specified, it will be set to the size of values. is_iso : bool, default False Is the Vector iso-valued (meaning all the same value)? - If true, then `values` should be a length 1 array. + If true, then ``values`` should be a length 1 array. take_ownership : bool, default False If True, perform a zero-copy data transfer from input numpy arrays to GraphBLAS if possible. To give ownership of the underlying @@ -1241,7 +1241,7 @@ def import_full( read-only and will no longer own the data. dtype : dtype, optional dtype of the new Vector. - If not specified, this will be inferred from `values`. + If not specified, this will be inferred from ``values``. format : str, optional Must be "full" or None. This is included to be compatible with the dict returned from exporting. @@ -1282,10 +1282,10 @@ def pack_full( """ GxB_Vector_pack_Full. - `pack_full` is like `import_full` except it "packs" data into an + ``pack_full`` is like ``import_full`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("full")`` - See `Vector.ss.import_full` documentation for more details. + See ``Vector.ss.import_full`` documentation for more details. """ return self._import_full( values=values, @@ -1364,8 +1364,8 @@ def head(self, n=10, dtype=None, *, sort=False): def scan(self, op=monoid.plus, *, name=None, **opts): """Perform a prefix scan with the given monoid. - For example, use `monoid.plus` (the default) to perform a cumulative sum, - and `monoid.times` for cumulative product. Works with any monoid. + For example, use ``monoid.plus`` (the default) to perform a cumulative sum, + and ``monoid.times`` for cumulative product. Works with any monoid. Returns ------- @@ -1561,20 +1561,20 @@ def compactify(self, how="first", size=None, *, reverse=False, asindex=False, na def sort(self, op=binary.lt, *, values=True, permutation=True, **opts): """GxB_Vector_sort to sort values of the Vector. - Sorting moves all the elements to the left just like `compactify`. + Sorting moves all the elements to the left just like ``compactify``. The returned vectors will be the same size as the input Vector. Parameters ---------- op : :class:`~graphblas.core.operator.BinaryOp`, optional Binary operator with a bool return type used to sort the values. - For example, `binary.lt` (the default) sorts the smallest elements first. + For example, ``binary.lt`` (the default) sorts the smallest elements first. Ties are broken according to indices (smaller first). values : bool, default=True - Whether to return values; will return `None` for values if `False`. + Whether to return values; will return ``None`` for values if ``False``. permutation : bool, default=True Whether to compute the permutation Vector that has the original indices of the - sorted values. Will return None if `False`. + sorted values. Will return None if ``False``. nthreads : int, optional The maximum number of threads to use for this operation. None, 0 or negative nthreads means to use the default number of threads. @@ -1642,7 +1642,7 @@ def serialize(self, compression="default", level=None, **opts): None, 0 or negative nthreads means to use the default number of threads. For best performance, this function returns a numpy array with uint8 dtype. - Use `Vector.ss.deserialize(blob)` to create a Vector from the result of serialization· + Use ``Vector.ss.deserialize(blob)`` to create a Vector from the result of serialization· This method is intended to support all serialization options from SuiteSparse:GraphBLAS. @@ -1668,7 +1668,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): """Deserialize a Vector from bytes, buffer, or numpy array using GxB_Vector_deserialize. The data should have been previously serialized with a compatible version of - SuiteSparse:GraphBLAS. For example, from the result of `data = vector.ss.serialize()`. + SuiteSparse:GraphBLAS. For example, from the result of ``data = vector.ss.serialize()``. Examples -------- diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 77c64a7ac..74e03f2f9 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -22,7 +22,7 @@ def libget(name): def wrapdoc(func_with_doc): - """Decorator to copy `__doc__` from a function onto the wrapped function.""" + """Decorator to copy ``__doc__`` from a function onto the wrapped function.""" def inner(func_wo_doc): func_wo_doc.__doc__ = func_with_doc.__doc__ @@ -159,7 +159,7 @@ def get_order(order): def normalize_chunks(chunks, shape): - """Normalize chunks argument for use by `Matrix.ss.split`. + """Normalize chunks argument for use by ``Matrix.ss.split``. Examples -------- @@ -249,17 +249,17 @@ def normalize_chunks(chunks, shape): def ensure_type(x, types): - """Try to ensure `x` is one of the given types, computing if necessary. + """Try to ensure ``x`` is one of the given types, computing if necessary. - `types` must be a type or a tuple of types as used in `isinstance`. + ``types`` must be a type or a tuple of types as used in ``isinstance``. - For example, if `types` is a Vector, then a Vector input will be returned, - and a `VectorExpression` input will be computed and returned as a Vector. + For example, if ``types`` is a Vector, then a Vector input will be returned, + and a ``VectorExpression`` input will be computed and returned as a Vector. TypeError will be raised if the input is not or can't be converted to types. - This function ignores `graphblas.config["autocompute"]`; it always computes - if the return type will match `types`. + This function ignores ``graphblas.config["autocompute"]``; it always computes + if the return type will match ``types``. """ if isinstance(x, types): return x @@ -358,6 +358,7 @@ def _autogenerate_code( specializer=None, begin="# Begin auto-generated code", end="# End auto-generated code", + callblack=True, ): """Super low-tech auto-code generation used by automethods.py and infixmethods.py.""" with filepath.open() as f: # pragma: no branch (flaky) @@ -384,7 +385,8 @@ def _autogenerate_code( f.write(new_text) import subprocess - try: - subprocess.check_call(["black", filepath]) - except FileNotFoundError: # pragma: no cover (safety) - pass # It's okay if `black` isn't installed; pre-commit hooks will do linting + if callblack: + try: + subprocess.check_call(["black", filepath]) + except FileNotFoundError: # pragma: no cover (safety) + pass # It's okay if `black` isn't installed; pre-commit hooks will do linting diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index 57851420d..d2ddee372 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -453,17 +453,17 @@ def to_values(self, dtype=None, *, indices=True, values=True, sort=True): """Extract the indices and values as a 2-tuple of numpy arrays. .. deprecated:: 2022.11.0 - `Vector.to_values` will be removed in a future release. - Use `Vector.to_coo` instead. Will be removed in version 2023.9.0 or later + ``Vector.to_values`` will be removed in a future release. + Use ``Vector.to_coo`` instead. Will be removed in version 2023.9.0 or later Parameters ---------- dtype : Requested dtype for the output values array. indices :bool, default=True - Whether to return indices; will return `None` for indices if `False` + Whether to return indices; will return ``None`` for indices if ``False`` values : bool, default=True - Whether to return values; will return `None` for values if `False` + Whether to return values; will return ``None`` for values if ``False`` sort : bool, default=True Whether to require sorted indices. @@ -487,9 +487,9 @@ def to_coo(self, dtype=None, *, indices=True, values=True, sort=True): dtype : Requested dtype for the output values array. indices :bool, default=True - Whether to return indices; will return `None` for indices if `False` + Whether to return indices; will return ``None`` for indices if ``False`` values : bool, default=True - Whether to return values; will return `None` for values if `False` + Whether to return values; will return ``None`` for values if ``False`` sort : bool, default=True Whether to require sorted indices. @@ -539,7 +539,7 @@ def build(self, indices, values, *, dup_op=None, clear=False, size=None): """Rarely used method to insert values into an existing Vector. The typical use case is to create a new Vector and insert values at the same time using :meth:`from_coo`. - All the arguments are used identically in :meth:`from_coo`, except for `clear`, which + All the arguments are used identically in :meth:`from_coo`, except for ``clear``, which indicates whether to clear the Vector prior to adding the new values. """ # TODO: accept `dtype` keyword to match the dtype of `values`? @@ -695,8 +695,8 @@ def from_values(cls, indices, values, dtype=None, *, size=None, dup_op=None, nam """Create a new Vector from indices and values. .. deprecated:: 2022.11.0 - `Vector.from_values` will be removed in a future release. - Use `Vector.from_coo` instead. Will be removed in version 2023.9.0 or later + ``Vector.from_values`` will be removed in a future release. + Use ``Vector.from_coo`` instead. Will be removed in version 2023.9.0 or later Parameters ---------- diff --git a/graphblas/io/_numpy.py b/graphblas/io/_numpy.py index 1c40e1633..954d28df7 100644 --- a/graphblas/io/_numpy.py +++ b/graphblas/io/_numpy.py @@ -11,14 +11,14 @@ def from_numpy(m): # pragma: no cover (deprecated) """Create a sparse Vector or Matrix from a dense numpy array. .. deprecated:: 2023.2.0 - `from_numpy` will be removed in a future release. - Use `Vector.from_dense` or `Matrix.from_dense` instead. + ``from_numpy`` will be removed in a future release. + Use ``Vector.from_dense`` or ``Matrix.from_dense`` instead. Will be removed in version 2023.10.0 or later A value of 0 is considered as "missing". - - m.ndim == 1 returns a `Vector` - - m.ndim == 2 returns a `Matrix` + - m.ndim == 1 returns a ``Vector`` + - m.ndim == 2 returns a ``Matrix`` - m.ndim > 2 raises an error dtype is inferred from m.dtype @@ -65,8 +65,8 @@ def to_numpy(m): # pragma: no cover (deprecated) """Create a dense numpy array from a sparse Vector or Matrix. .. deprecated:: 2023.2.0 - `to_numpy` will be removed in a future release. - Use `Vector.to_dense` or `Matrix.to_dense` instead. + ``to_numpy`` will be removed in a future release. + Use ``Vector.to_dense`` or ``Matrix.to_dense`` instead. Will be removed in version 2023.10.0 or later Missing values will become 0 in the output. diff --git a/graphblas/select/__init__.py b/graphblas/select/__init__.py index c7a1897f5..72aa8d226 100644 --- a/graphblas/select/__init__.py +++ b/graphblas/select/__init__.py @@ -57,9 +57,9 @@ def _resolve_expr(expr, callname, opname): def _match_expr(parent, expr): - """Match expressions to rewrite `A.select(A < 5)` into select expression. + """Match expressions to rewrite ``A.select(A < 5)`` into select expression. - The argument must match the parent, so this _won't_ be rewritten: `A.select(B < 5)` + The argument must match the parent, so this _won't_ be rewritten: ``A.select(B < 5)`` """ args = expr.args op = expr.op @@ -83,7 +83,7 @@ def value(expr): Example usage: >>> gb.select.value(A > 0) - The example will dispatch to `gb.select.valuegt(A, 0)` + The example will dispatch to ``gb.select.valuegt(A, 0)`` while being nicer to read. """ return _resolve_expr(expr, "value", "value") @@ -97,7 +97,7 @@ def row(expr): Example usage: >>> gb.select.row(A <= 5) - The example will dispatch to `gb.select.rowle(A, 5)` + The example will dispatch to ``gb.select.rowle(A, 5)`` while being potentially nicer to read. """ return _resolve_expr(expr, "row", "row") @@ -111,7 +111,7 @@ def column(expr): Example usage: >>> gb.select.column(A <= 5) - The example will dispatch to `gb.select.colle(A, 5)` + The example will dispatch to ``gb.select.colle(A, 5)`` while being potentially nicer to read. """ return _resolve_expr(expr, "column", "col") @@ -125,7 +125,7 @@ def index(expr): Example usage: >>> gb.select.index(v <= 5) - The example will dispatch to `gb.select.indexle(v, 5)` + The example will dispatch to ``gb.select.indexle(v, 5)`` while being potentially nicer to read. """ return _resolve_expr(expr, "index", "index") diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index 441458a42..ec5a89504 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -12,7 +12,7 @@ class _graphblas_ss: - """Used in `_expect_type`.""" + """Used in ``_expect_type``.""" _graphblas_ss.__name__ = "graphblas.ss" @@ -33,8 +33,8 @@ def diag(x, k=0, dtype=None, *, name=None, **opts): The Vector to assign to the diagonal, or the Matrix from which to extract the diagonal. k : int, default 0 - Diagonal in question. Use `k>0` for diagonals above the main diagonal, - and `k<0` for diagonals below the main diagonal. + Diagonal in question. Use ``k>0`` for diagonals above the main diagonal, + and ``k<0`` for diagonals below the main diagonal. See Also -------- @@ -71,9 +71,9 @@ def concat(tiles, dtype=None, *, name=None, **opts): Concatenate a 2D list of Matrix objects into a new Matrix, or a 1D list of Vector objects into a new Vector. To concatenate into existing objects, - use ``Matrix.ss.concat`` or `Vector.ss.concat`. + use ``Matrix.ss.concat`` or ``Vector.ss.concat``. - Vectors may be used as `Nx1` Matrix objects when creating a new Matrix. + Vectors may be used as ``Nx1`` Matrix objects when creating a new Matrix. This performs the opposite operation as ``split``. diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index 24df55e9d..df94c6469 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -167,7 +167,11 @@ def test_matrix_to_from_networkx(): def test_mmread_mmwrite(engine): if engine == "fmm" and fmm is None: # pragma: no cover (import) pytest.skip("needs fast_matrix_market") - from scipy.io.tests import test_mmio + try: + from scipy.io.tests import test_mmio + except ImportError: + # Test files are mysteriously missing from some conda-forge builds + pytest.skip("scipy.io.tests.test_mmio unavailable :(") p31 = 2**31 p63 = 2**63 diff --git a/graphblas/viz.py b/graphblas/viz.py index d8a96d343..fafeae5f0 100644 --- a/graphblas/viz.py +++ b/graphblas/viz.py @@ -67,7 +67,7 @@ def draw(m): # pragma: no cover def spy(M, *, centered=False, show=True, figure=None, axes=None, figsize=None, **kwargs): - """Plot the sparsity pattern of a Matrix using `matplotlib.spy`. + """Plot the sparsity pattern of a Matrix using ``matplotlib.spy``. See: - https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.spy.html @@ -106,8 +106,8 @@ def spy(M, *, centered=False, show=True, figure=None, axes=None, figsize=None, * def datashade(M, agg="count", *, width=None, height=None, opts_kwargs=None, **kwargs): """Interactive plot of the sparsity pattern of a Matrix using hvplot and datashader. - The `datashader` library rasterizes large data into a 2d grid of pixels. Each pixel - may contain multiple data points, which are combined by an aggregator (`agg="count"`). + The ``datashader`` library rasterizes large data into a 2d grid of pixels. Each pixel + may contain multiple data points, which are combined by an aggregator (``agg="count"``). Common aggregators are "count", "sum", "mean", "min", and "max". See full list here: - https://datashader.org/api.html#reductions diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 3809eb805..dda7adbaa 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -7,11 +7,11 @@ conda search 'numpy[channel=conda-forge]>=1.24.3' conda search 'pandas[channel=conda-forge]>=2.0.1' conda search 'scipy[channel=conda-forge]>=1.10.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.1.4' +conda search 'awkward[channel=conda-forge]>=2.2.0' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.5.1' conda search 'numba[channel=conda-forge]>=0.56.4' conda search 'pyyaml[channel=conda-forge]>=6.0' -conda search 'flake8-bugbear[channel=conda-forge]>=23.3.23' +conda search 'flake8-bugbear[channel=conda-forge]>=23.5.9' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' # conda search 'python[channel=conda-forge]>=3.8 *pypy*' From f8682ffd936b9fef4db896d1d3bf1272285eddff Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 19 May 2023 13:47:51 -0500 Subject: [PATCH 05/66] blacken docs (and other misc) (#455) * blacken docs (and other misc) --- .github/workflows/test_and_build.yml | 7 +- .pre-commit-config.yaml | 28 +++-- README.md | 7 +- docs/getting_started/primer.rst | 31 ++--- docs/user_guide/init.rst | 3 +- docs/user_guide/operations.rst | 172 ++++++++++++++++++--------- docs/user_guide/operators.rst | 4 +- docs/user_guide/recorder.rst | 4 +- docs/user_guide/udf.rst | 2 +- graphblas/core/matrix.py | 2 +- graphblas/viz.py | 3 +- pyproject.toml | 8 ++ 12 files changed, 176 insertions(+), 95 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 064dd93d8..d129ef26f 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -17,6 +17,10 @@ on: branches: - main +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: rngs: # To achieve consistent coverage, we need a little bit of correlated collaboration. @@ -248,8 +252,7 @@ jobs: fi echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" - # TODO: remove `-c numba` when numba 0.57 is properly released on conda-forge - $(command -v mamba || command -v conda) install -c numba packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 10fcca649..4588ed4f4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,10 +5,12 @@ # To update: `pre-commit autoupdate` # - &flake8_dependencies below needs updated manually ci: - # See: https://pre-commit.ci/#configuration - autofix_prs: false - autoupdate_schedule: monthly - skip: [pylint, no-commit-to-branch] + # See: https://pre-commit.ci/#configuration + autofix_prs: false + autoupdate_schedule: monthly + autoupdate_commit_msg: "chore: update pre-commit hooks" + autofix_commit_msg: "style: pre-commit fixes" + skip: [pylint, no-commit-to-branch] fail_fast: true default_language_version: python: python3 @@ -17,15 +19,21 @@ repos: rev: v4.4.0 hooks: - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks - id: check-ast - id: check-toml - id: check-yaml - id: debug-statements - id: end-of-file-fixer + exclude_types: [svg] - id: mixed-line-ending - id: trailing-whitespace + - id: name-tests-test + args: ["--pytest-test-first"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.2 + rev: v0.13 hooks: - id: validate-pyproject name: Validate pyproject.toml @@ -58,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.267 + rev: v0.0.269 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -86,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.267 + rev: v0.0.269 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint @@ -101,6 +109,10 @@ repos: hooks: - id: pyroma args: [-n, "10", .] + - repo: https://github.com/shellcheck-py/shellcheck-py + rev: "v0.9.0.2" + hooks: + - id: shellcheck - repo: local hooks: # Add `--hook-stage manual` to pre-commit command to run (very slow) @@ -137,4 +149,4 @@ repos: # hooks: # - id: bandit # -# blacken-docs, blackdoc mypy, pydocstringformatter, velin, flynt, yamllint +# blacken-docs, blackdoc prettier, mypy, pydocstringformatter, velin, flynt, yamllint diff --git a/README.md b/README.md index 570a82de5..13067df6e 100644 --- a/README.md +++ b/README.md @@ -176,8 +176,9 @@ use as well as the blocking/non-blocking mode. If the context is not initialized be performed automatically. ```python import graphblas as gb + # Context initialization must happen before any other imports -gb.init('suitesparse', blocking=True) +gb.init("suitesparse", blocking=True) # Now we can import other items from graphblas from graphblas import binary, semiring @@ -195,7 +196,7 @@ def force_odd_func(x): return x + 1 return x -unary.register_new('force_odd', force_odd_func) +unary.register_new("force_odd", force_odd_func) v = Vector.from_coo([0, 1, 3], [1, 2, 3]) w = v.apply(unary.force_odd).new() @@ -210,7 +211,7 @@ import graphblas as gb # scipy.sparse matrices A = gb.io.from_scipy_sparse(m) -m = gb.io.to_scipy_sparse(m, format='csr') +m = gb.io.to_scipy_sparse(m, format="csr") # networkx graphs A = gb.io.from_networkx(g) diff --git a/docs/getting_started/primer.rst b/docs/getting_started/primer.rst index 710dca702..104eb5738 100644 --- a/docs/getting_started/primer.rst +++ b/docs/getting_started/primer.rst @@ -89,26 +89,13 @@ makes for faster graph algorithms. # networkx-style storage of an undirected graph G = { - 0: {1: {'weight': 5.6}, - 2: {'weight': 2.3}, - 3: {'weight': 4.6}}, - 1: {0: {'weight': 5.6}, - 2: {'weight': 1.9}, - 3: {'weight': 6.2}}, - 2: {0: {'weight': 2.3}, - 1: {'weight': 1.9}, - 3: {'weight': 3.0}}, - 3: {0: {'weight': 4.6}, - 1: {'weight': 6.2}, - 2: {'weight': 3.0}, - 4: {'weight': 1.4}}, - 4: {3: {'weight': 1.4}, - 5: {'weight': 4.4}, - 6: {'weight': 1.0}}, - 5: {4: {'weight': 4.4}, - 6: {'weight': 2.8}}, - 6: {4: {'weight': 1.0}, - 5: {'weight': 2.8}} + 0: {1: {"weight": 5.6}, 2: {"weight": 2.3}, 3: {"weight": 4.6}}, + 1: {0: {"weight": 5.6}, 2: {"weight": 1.9}, 3: {"weight": 6.2}}, + 2: {0: {"weight": 2.3}, 1: {"weight": 1.9}, 3: {"weight": 3.0}}, + 3: {0: {"weight": 4.6}, 1: {"weight": 6.2}, 2: {"weight": 3.0}, 4: {"weight": 1.4}}, + 4: {3: {"weight": 1.4}, 5: {"weight": 4.4}, 6: {"weight": 1.0}}, + 5: {4: {"weight": 4.4}, 6: {"weight": 2.8}}, + 6: {4: {"weight": 1.0}, 5: {"weight": 2.8}}, } An alternative way to store a graph is as an adjacency matrix. Each node becomes both a row @@ -240,7 +227,9 @@ node 0. [0, 0, 1, 1, 2], [1, 2, 2, 3, 3], [2.0, 5.0, 1.5, 4.25, 0.5], - nrows=4, ncols=4) + nrows=4, + ncols=4 + ) v = Vector.from_coo([start_node], [0.0], size=4) # Compute SSSP diff --git a/docs/user_guide/init.rst b/docs/user_guide/init.rst index 62f81b50f..ffb6a3463 100644 --- a/docs/user_guide/init.rst +++ b/docs/user_guide/init.rst @@ -8,8 +8,9 @@ GraphBLAS must be initialized before it can be used. This is done with the .. code-block:: python import graphblas as gb + # Context initialization must happen before any other imports - gb.init('suitesparse', blocking=False) + gb.init("suitesparse", blocking=False) # Now we can import other items from graphblas from graphblas import binary, semiring diff --git a/docs/user_guide/operations.rst b/docs/user_guide/operations.rst index 9ee76ab4c..ede2efb06 100644 --- a/docs/user_guide/operations.rst +++ b/docs/user_guide/operations.rst @@ -26,14 +26,22 @@ a Vector is treated as an nx1 column matrix. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 2, 3, 3], - [2., 5., 1.5, 4.25, 0.5], nrows=4, ncols=4) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2, 3, 3], [1, 2, 0, 1, 1, 2, 0, 1], - [3., 2., 9., 6., 3., 1., 0., 5.]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2], + [1, 2, 2, 3, 3], + [2., 5., 1.5, 4.25, 0.5], + nrows=4, + ncols=4 + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2, 3, 3], + [1, 2, 0, 1, 1, 2, 0, 1], + [3., 2., 9., 6., 3., 1., 0., 5.] + ) C = gb.Matrix(float, A.nrows, B.ncols) # These are equivalent - C << A.mxm(B, op='min_plus') # method style + C << A.mxm(B, op="min_plus") # method style C << gb.semiring.min_plus(A @ B) # functional style .. csv-table:: A @@ -67,13 +75,18 @@ a Vector is treated as an nx1 column matrix. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 2, 3, 3], - [2., 5., 1.5, 4.25, 0.5], nrows=4, ncols=4) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2], + [1, 2, 2, 3, 3], + [2., 5., 1.5, 4.25, 0.5], + nrows=4, + ncols=4 + ) v = gb.Vector.from_coo([0, 1, 3], [10., 20., 40.]) w = gb.Vector(float, A.nrows) # These are equivalent - w << A.mxv(v, op='plus_times') # method style + w << A.mxv(v, op="plus_times") # method style w << gb.semiring.plus_times(A @ v) # functional style .. csv-table:: A @@ -102,12 +115,15 @@ a Vector is treated as an nx1 column matrix. .. code-block:: python v = gb.Vector.from_coo([0, 1, 3], [10., 20., 40.]) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2, 3, 3], [1, 2, 0, 1, 1, 2, 0, 1], - [3., 2., 9., 6., 3., 1., 0., 5.]) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2, 3, 3], + [1, 2, 0, 1, 1, 2, 0, 1], + [3., 2., 9., 6., 3., 1., 0., 5.] + ) u = gb.Vector(float, B.ncols) # These are equivalent - u << v.vxm(B, op='plus_plus') # method style + u << v.vxm(B, op="plus_plus") # method style u << gb.semiring.plus_plus(v @ B) # functional style .. csv-table:: v @@ -148,14 +164,20 @@ Example usage: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 0, 2, 1], - [2.0, 5.0, 1.5, 4.0, 0.5]) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 1, 2], - [3., -2., 0., 6., 3., 1.]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2], + [1, 2, 0, 2, 1], + [2., 5., 1.5, 4., 0.5] + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 1, 2], + [3., -2., 0., 6., 3., 1.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.ewise_mult(B, op='min') # method style + C << A.ewise_mult(B, op="min") # method style C << gb.binary.min(A & B) # functional style .. csv-table:: A @@ -225,14 +247,21 @@ should be used with the functional syntax, ``left_default`` and ``right_default` .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 0, 1, 1], [0, 1, 2, 0, 2], - [9.0, 2.0, 5.0, 1.5, 4.0], nrows=3) - B = gb.Matrix.from_coo([0, 0, 0, 2, 2, 2], [0, 1, 2, 0, 1, 2], - [4., 0., -2., 6., 3., 1.]) + A = gb.Matrix.from_coo( + [0, 0, 0, 1, 1], + [0, 1, 2, 0, 2], + [9., 2., 5., 1.5, 4.], + nrows=3 + ) + B = gb.Matrix.from_coo( + [0, 0, 0, 2, 2, 2], + [0, 1, 2, 0, 1, 2], + [4., 0., -2., 6., 3., 1.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.ewise_add(B, op='minus') # method style + C << A.ewise_add(B, op="minus") # method style C << gb.binary.minus(A | B) # functional style .. csv-table:: A @@ -263,14 +292,21 @@ should be used with the functional syntax, ``left_default`` and ``right_default` .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 0, 1, 1], [0, 1, 2, 0, 2], - [9.0, 2.0, 5.0, 1.5, 4.0], nrows=3) - B = gb.Matrix.from_coo([0, 0, 0, 2, 2, 2], [0, 1, 2, 0, 1, 2], - [4., 0., -2., 6., 3., 1.]) + A = gb.Matrix.from_coo( + [0, 0, 0, 1, 1], + [0, 1, 2, 0, 2], + [9., 2., 5., 1.5, 4.], + nrows=3 + ) + B = gb.Matrix.from_coo( + [0, 0, 0, 2, 2, 2], + [0, 1, 2, 0, 1, 2], + [4., 0., -2., 6., 3., 1.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.ewise_union(B, op='minus', left_default=0, right_default=0) # method style + C << A.ewise_union(B, op="minus", left_default=0, right_default=0) # method style C << gb.binary.minus(A | B, left_default=0, right_default=0) # functional style .. csv-table:: A @@ -341,8 +377,11 @@ Matrix List Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, 2, A.ncols) C << A[[0, 2], :] @@ -382,11 +421,16 @@ Matrix-Matrix Assignment Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) - B = gb.Matrix.from_coo([0, 0, 1, 1], [0, 1, 0, 1], - [-99., -98., -97., -96.]) - + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1], + [0, 1, 0, 1], + [-99., -98., -97., -96.] + ) A[::2, ::2] << B .. csv-table:: A @@ -416,8 +460,11 @@ Matrix-Vector Assignment Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) v = gb.Vector.from_coo([2], [-99.]) A[1, :] << v @@ -530,7 +577,7 @@ function with the collection as the argument. w = gb.Vector(float, v.size) # These are all equivalent - w << v.apply('minus', right=15) + w << v.apply("minus", right=15) w << gb.binary.minus(v, right=15) w << v - 15 @@ -557,12 +604,15 @@ Upper Triangle Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 1, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 2, 1, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.select('triu') + C << A.select("triu") C << gb.select.triu(A) .. csv-table:: A @@ -589,7 +639,7 @@ Select by Value Example: w = gb.Vector(float, v.size) # These are equivalent - w << v.select('>=', 5) + w << v.select(">=", 5) w << gb.select.value(v >= 5) .. csv-table:: v @@ -618,11 +668,14 @@ A monoid or aggregator is used to perform the reduction. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 1], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 3, 0, 1, 0, 1], + [2., 5., 1.5, 4., 0.5, -7.] + ) w = gb.Vector(float, A.ncols) - w << A.reduce_columnwise('times') + w << A.reduce_columnwise("times") .. csv-table:: A :class: inline @@ -642,11 +695,14 @@ A monoid or aggregator is used to perform the reduction. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 1], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 3, 0, 1, 0, 1], + [2., 5., 1.5, 4., 0.5, -7.] + ) s = gb.Scalar(float) - s << A.reduce_scalar('max') + s << A.reduce_scalar("max") .. csv-table:: A :class: inline @@ -670,7 +726,7 @@ A monoid or aggregator is used to perform the reduction. s = gb.Scalar(int) # These are equivalent - s << v.reduce('argmin') + s << v.reduce("argmin") s << gb.agg.argmin(v) .. csv-table:: v @@ -695,8 +751,11 @@ To force the transpose to be computed by itself, use it by itself as the right-h .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 3, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, A.ncols, A.nrows) C << A.T @@ -728,12 +787,19 @@ The Kronecker product uses a binary operator. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1], [0, 1, 0], [1., -2., 3.]) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1], + [0, 1, 0], + [1., -2., 3.] + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, A.nrows * B.nrows, A.ncols * B.ncols) - C << A.kronecker(B, 'times') + C << A.kronecker(B, "times") .. csv-table:: A :class: inline diff --git a/docs/user_guide/operators.rst b/docs/user_guide/operators.rst index 84fe9312c..9499562f2 100644 --- a/docs/user_guide/operators.rst +++ b/docs/user_guide/operators.rst @@ -273,7 +273,7 @@ Example usage: minval = v.reduce(gb.monoid.min).value # This will force the FP32 version of min to be used, possibly type casting the elements - minvalFP32 = v.reduce(gb.monoid.min['FP32']).value + minvalFP32 = v.reduce(gb.monoid.min["FP32"]).value The gb.op Namespace @@ -431,7 +431,7 @@ the power of y for overlapping elements. .. code-block:: python - v ** w + v**w .. csv-table:: :header: 0,1,2,3,4,5 diff --git a/docs/user_guide/recorder.rst b/docs/user_guide/recorder.rst index ee6d2bbb9..3355d93ce 100644 --- a/docs/user_guide/recorder.rst +++ b/docs/user_guide/recorder.rst @@ -25,7 +25,9 @@ Instead, only the calls from the last iteration will be returned. [0, 0, 1, 1, 2], [1, 2, 2, 3, 3], [2.0, 5.0, 1.5, 4.25, 0.5], - nrows=4, ncols=4) + nrows=4, + ncols=4 + ) v = Vector.from_coo([start_node], [0.0], size=4) # Compute SSSP diff --git a/docs/user_guide/udf.rst b/docs/user_guide/udf.rst index 6c72535fc..b96097a85 100644 --- a/docs/user_guide/udf.rst +++ b/docs/user_guide/udf.rst @@ -21,7 +21,7 @@ Example user-defined UnaryOp: return x + 1 return x - unary.register_new('force_odd', force_odd_func) + unary.register_new("force_odd", force_odd_func) v = Vector.from_coo([0, 1, 3, 4, 5], [1, 2, 3, 8, 14]) w = v.apply(unary.force_odd).new() diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index b74ca347a..2542ad00e 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -457,7 +457,7 @@ def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=Tr Requested dtype for the output values array. rows : bool, default=True Whether to return rows; will return ``None`` for rows if ``False`` - columns :bool, default=True + columns : bool, default=True Whether to return columns; will return ``None`` for columns if ``False`` values : bool, default=True Whether to return values; will return ``None`` for values if ``False`` diff --git a/graphblas/viz.py b/graphblas/viz.py index fafeae5f0..f0367e119 100644 --- a/graphblas/viz.py +++ b/graphblas/viz.py @@ -35,8 +35,7 @@ def _get_imports(names, within): except ImportError: modname = _LAZY_IMPORTS[name].split(".")[0] raise ImportError(f"`{within}` requires {modname} to be installed") from None - finally: - globals()[name] = val + globals()[name] = val rv.append(val) if is_string: return rv[0] diff --git a/pyproject.toml b/pyproject.toml index 245dc35bd..9d635c778 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -168,11 +168,18 @@ known_first_party = "graphblas" line_length = 100 [tool.pytest.ini_options] +minversion = "6.0" testpaths = "graphblas/tests" xfail_strict = true +addopts = [ + "--strict-config", # Force error if config is mispelled + "--strict-markers", # Force error if marker is mispelled (must be defined in config) + "-ra", # Print summary of all fails/errors +] markers = [ "slow: Skipped unless --runslow passed", ] +log_cli_level = "info" filterwarnings = [ # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings @@ -342,6 +349,7 @@ ignore = [ "TID", # flake8-tidy-imports (Rely on isort and our own judgement) "TCH", # flake8-type-checking (Note: figure out type checking later) "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) + "TD", # flake8-todos (Maybe okay to add some of these) "ERA", # eradicate (We like code in comments!) "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ] From f0e03249fab46f36e12b66806a01f6e8e94cfde1 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 2 Jun 2023 19:47:47 -0500 Subject: [PATCH 06/66] Don't install graphblas=8.0 yet (#459) * Don't install graphblas=8.0 yet Fixes #458 * Don't install python-suitesparse-graphblas from upstream (which needs ss:gb 8.0) --- .github/workflows/test_and_build.yml | 19 +++++++++++-------- .pre-commit-config.yaml | 6 +++--- graphblas/tests/test_vector.py | 2 +- scripts/check_versions.sh | 8 ++++---- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index d129ef26f..ac541294f 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -17,9 +17,9 @@ on: branches: - main -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true +# concurrency: +# group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} +# cancel-in-progress: true jobs: rngs: @@ -131,9 +131,9 @@ jobs: source upstream weights: | - 1 - 1 - 1 + 1000000 + 1000000 + 1000000 1 - name: Setup mamba uses: conda-incubator/setup-miniconda@v2 @@ -170,7 +170,7 @@ jobs: nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') - fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", ""]))') if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.8') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') @@ -252,13 +252,14 @@ jobs: fi echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" + set -x # echo on $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7"' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ - ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4.0"' || '' }} \ + ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas=7.4"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} - name: Build extension module run: | @@ -308,6 +309,7 @@ jobs: if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi) echo ${args} pytest -v --pyargs suitesparse_graphblas + set -x # echo on coverage run -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }} - name: Unit tests (bizarro scalars) @@ -343,6 +345,7 @@ jobs: if [[ $H && $normal ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi)$( \ if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi) echo ${args} + set -x # echo on coverage run -a -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_bizarro' && '--runslow' || '' }} git checkout . # Undo changes to scalar default diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4588ed4f4..4d0e5c0b6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.269 + rev: v0.0.270 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.269 + rev: v0.0.270 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint @@ -110,7 +110,7 @@ repos: - id: pyroma args: [-n, "10", .] - repo: https://github.com/shellcheck-py/shellcheck-py - rev: "v0.9.0.2" + rev: "v0.9.0.5" hooks: - id: shellcheck - repo: local diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index bd2083fd1..36ab346b8 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -1432,7 +1432,7 @@ def test_vector_index_with_scalar(): s0 = Scalar.from_value(0, dtype=dtype) w = v[[s1, s0]].new() assert w.isequal(expected) - for dtype in ["bool", "fp32", "fp64"] + ["fc32", "fc64"] if dtypes._supports_complex else []: + for dtype in ["bool", "fp32", "fp64"] + (["fc32", "fc64"] if dtypes._supports_complex else []): s = Scalar.from_value(1, dtype=dtype) with pytest.raises(TypeError, match="An integer is required for indexing"): v[s] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index dda7adbaa..af72f9655 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -4,13 +4,13 @@ # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. conda search 'numpy[channel=conda-forge]>=1.24.3' -conda search 'pandas[channel=conda-forge]>=2.0.1' +conda search 'pandas[channel=conda-forge]>=2.0.2' conda search 'scipy[channel=conda-forge]>=1.10.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.2.0' +conda search 'awkward[channel=conda-forge]>=2.2.1' conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.5.1' -conda search 'numba[channel=conda-forge]>=0.56.4' +conda search 'fast_matrix_market[channel=conda-forge]>=1.6.0' +conda search 'numba[channel=conda-forge]>=0.57.0' conda search 'pyyaml[channel=conda-forge]>=6.0' conda search 'flake8-bugbear[channel=conda-forge]>=23.5.9' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' From a762caef6e694746988c14945c2cb223b7d1b414 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Thu, 22 Jun 2023 00:52:33 -0500 Subject: [PATCH 07/66] Fix testing fix_test_suitesparse_graphblas (#464) * Fix testing fix_test_suitesparse_graphblas * Support numpy 1.25 (`np.find_common_type` is deprecated) * Ignore `np.find_common_type` DeprecationWarning for now, b/c other dependencies use it * Don't make xfail strict while awkward is failing * Don't install scipy.sparse 1.8 with numpy 1.25 * Retry coveralls again, and don't fail if retry fails --- .github/workflows/test_and_build.yml | 35 ++++++++++++++++++++++------ graphblas/dtypes.py | 20 +++++----------- graphblas/tests/test_io.py | 2 ++ pyproject.toml | 10 ++++++-- scripts/check_versions.sh | 2 +- 5 files changed, 45 insertions(+), 24 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index ac541294f..bfad18a2e 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -85,7 +85,7 @@ jobs: shell: bash -l {0} strategy: # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask` - fail-fast: true + fail-fast: false # Every service seems super-flaky right now... # The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype]. # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. @@ -177,17 +177,17 @@ jobs: pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') else # Python 3.11 - npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", ""]))') @@ -214,7 +214,12 @@ jobs: else psgver="" fi - if [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then + if [[ ${npver} == "=1.25" ]] ; then + numbaver="" + if [[ ${spver} == "=1.8" ]] ; then + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))') + fi + elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.57", ""]))') elif [[ ${npver} == "=1.21" ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.55", "=0.56", "=0.57", ""]))') @@ -246,6 +251,11 @@ jobs: pdver="" yamlver="" fi + elif [[ ${npver} == "=1.25" ]] ; then + numba="" + numbaver=NA + sparse="" + sparsever=NA else numba=numba${numbaver} sparse=sparse${sparsever} @@ -308,7 +318,7 @@ jobs: if [[ $H && $normal ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi)$( \ if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi) echo ${args} - pytest -v --pyargs suitesparse_graphblas + (cd .. && pytest -v --pyargs suitesparse_graphblas) # Don't use our conftest.py set -x # echo on coverage run -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }} @@ -400,7 +410,18 @@ jobs: - name: Coverage2 id: coverageAttempt2 if: steps.coverageAttempt1.outcome == 'failure' - continue-on-error: false + continue-on-error: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} + COVERALLS_PARALLEL: true + run: | + coveralls --service=github + - name: Coverage3 + id: coverageAttempt3 + if: steps.coverageAttempt2.outcome == 'failure' + # Continue even if it failed 3 times... (sheesh! use codecov instead) + continue-on-error: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} diff --git a/graphblas/dtypes.py b/graphblas/dtypes.py index 920610b95..61b297c13 100644 --- a/graphblas/dtypes.py +++ b/graphblas/dtypes.py @@ -1,8 +1,8 @@ import warnings as _warnings import numpy as _np -from numpy import find_common_type as _find_common_type from numpy import promote_types as _promote_types +from numpy import result_type as _result_type from . import backend from .core import NULL as _NULL @@ -389,19 +389,11 @@ def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False): if type1 is type2: return type1 if is_left_scalar: - scalar_types = [type1.np_type] - array_types = [] - elif not is_right_scalar: - # Using `promote_types` is faster than `find_common_type` - return lookup_dtype(_promote_types(type1.np_type, type2.np_type)) - else: - scalar_types = [] - array_types = [type1.np_type] - if is_right_scalar: - scalar_types.append(type2.np_type) - else: - array_types.append(type2.np_type) - return lookup_dtype(_find_common_type(array_types, scalar_types)) + if not is_right_scalar: + return lookup_dtype(_result_type(_np.array(0, type1.np_type), type2.np_type)) + elif is_right_scalar: + return lookup_dtype(_result_type(type1.np_type, _np.array(0, type2.np_type))) + return lookup_dtype(_promote_types(type1.np_type, type2.np_type)) def _default_name(dtype): diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index df94c6469..bda41759b 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -369,6 +369,7 @@ def test_scipy_sparse(): @pytest.mark.skipif("not ak") +@pytest.mark.xfail(reason="Need to investigate test failure") def test_awkward_roundtrip(): # Vector v = gb.Vector.from_coo([1, 3, 5], [20, 21, -5], size=22) @@ -390,6 +391,7 @@ def test_awkward_roundtrip(): @pytest.mark.skipif("not ak") +@pytest.mark.xfail(reason="Need to investigate test failure") def test_awkward_iso_roundtrip(): # Vector v = gb.Vector.from_coo([1, 3, 5], [20, 20, 20], size=22) diff --git a/pyproject.toml b/pyproject.toml index 9d635c778..ba8ec0095 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -170,7 +170,7 @@ line_length = 100 [tool.pytest.ini_options] minversion = "6.0" testpaths = "graphblas/tests" -xfail_strict = true +xfail_strict = false # TODO: re-enable this when awkward test failures are fixed addopts = [ "--strict-config", # Force error if config is mispelled "--strict-markers", # Force error if marker is mispelled (must be defined in config) @@ -184,6 +184,7 @@ filterwarnings = [ # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings "error", + # sparse 0.14.0 (2022-02-24) began raising this warning; it has been reported and fixed upstream. "ignore:coords should be an ndarray. This will raise a ValueError:DeprecationWarning:sparse._coo.core", @@ -191,7 +192,8 @@ filterwarnings = [ # https://setuptools.pypa.io/en/latest/history.html#v67-3-0 # MAINT: check if this is still necessary in 2025 "ignore:Deprecated call to `pkg_resources.declare_namespace:DeprecationWarning:pkg_resources", - # And this deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See: + + # This deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See: # https://setuptools.pypa.io/en/latest/history.html#v67-5-0 "ignore:pkg_resources is deprecated as an API:DeprecationWarning:pkg_resources", @@ -199,6 +201,10 @@ filterwarnings = [ "ignore:module 'sre_parse' is deprecated:DeprecationWarning:", "ignore:module 'sre_constants' is deprecated:DeprecationWarning:", + # numpy 1.25.0 (2023-06-17) deprecated `np.find_common_type`; many other dependencies use it. + # See if we can remove this filter in 2025. + "ignore:np.find_common_type is deprecated:DeprecationWarning:", + # pypy gives this warning "ignore:can't resolve package from __spec__ or __package__:ImportWarning:", ] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index af72f9655..f0e648fd9 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,7 +3,7 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'numpy[channel=conda-forge]>=1.24.3' +conda search 'numpy[channel=conda-forge]>=1.25.0' conda search 'pandas[channel=conda-forge]>=2.0.2' conda search 'scipy[channel=conda-forge]>=1.10.1' conda search 'networkx[channel=conda-forge]>=3.1' From f89c72dc3283a41933afd2ccbcdf82e6a371dd70 Mon Sep 17 00:00:00 2001 From: Sultan Orazbayev Date: Thu, 22 Jun 2023 05:55:33 +0000 Subject: [PATCH 08/66] Add missing CLI keyword. (#462) --- docs/contributor_guide/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/contributor_guide/index.rst b/docs/contributor_guide/index.rst index e8078f933..3b94f2f35 100644 --- a/docs/contributor_guide/index.rst +++ b/docs/contributor_guide/index.rst @@ -58,7 +58,7 @@ Here are instructions for two popular environment managers: :: # Create a conda environment named ``graphblas-dev`` using environment.yml in the repository root - conda create -f environment.yml + conda env create -f environment.yml # Activate it conda activate graphblas-dev # Install python-graphblas from source From 6bbf0cd51f509850565f9c326de2287c090e7538 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 23 Jun 2023 09:09:19 -0500 Subject: [PATCH 09/66] xfail awkward tests when using numpy 1.25 (#467) --- .github/workflows/test_and_build.yml | 2 +- graphblas/tests/test_io.py | 4 ++-- pyproject.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index bfad18a2e..d0c3f71fb 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -421,7 +421,7 @@ jobs: id: coverageAttempt3 if: steps.coverageAttempt2.outcome == 'failure' # Continue even if it failed 3 times... (sheesh! use codecov instead) - continue-on-error: true + continue-on-error: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index bda41759b..671b12bd6 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -369,7 +369,7 @@ def test_scipy_sparse(): @pytest.mark.skipif("not ak") -@pytest.mark.xfail(reason="Need to investigate test failure") +@pytest.mark.xfail(np.__version__[:5] == "1.25.", reason="awkward bug with numpy 1.25") def test_awkward_roundtrip(): # Vector v = gb.Vector.from_coo([1, 3, 5], [20, 21, -5], size=22) @@ -391,7 +391,7 @@ def test_awkward_roundtrip(): @pytest.mark.skipif("not ak") -@pytest.mark.xfail(reason="Need to investigate test failure") +@pytest.mark.xfail(np.__version__[:5] == "1.25.", reason="awkward bug with numpy 1.25") def test_awkward_iso_roundtrip(): # Vector v = gb.Vector.from_coo([1, 3, 5], [20, 20, 20], size=22) diff --git a/pyproject.toml b/pyproject.toml index ba8ec0095..9e57b8296 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -170,7 +170,7 @@ line_length = 100 [tool.pytest.ini_options] minversion = "6.0" testpaths = "graphblas/tests" -xfail_strict = false # TODO: re-enable this when awkward test failures are fixed +xfail_strict = true addopts = [ "--strict-config", # Force error if config is mispelled "--strict-markers", # Force error if marker is mispelled (must be defined in config) From cd451978361504a62b3987afa2504d8a357817bc Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 09:27:56 -0500 Subject: [PATCH 10/66] Backport many of the SS:GB 8 changes to run on 7 (#478) * Backport many of the SS:GB 8 changes to run on 7 * Make fast_matrix_market <1.7 and scipy >=1.11 play nicely together * fix mmread to handle sparse and dense arrays --- .github/workflows/imports.yml | 4 +- .github/workflows/publish_pypi.yml | 2 +- .github/workflows/test_and_build.yml | 21 +-- .pre-commit-config.yaml | 16 +- docs/env.yml | 2 +- graphblas/binary/ss.py | 2 + graphblas/core/base.py | 6 +- graphblas/core/descriptor.py | 1 + graphblas/core/expr.py | 2 +- graphblas/core/matrix.py | 4 +- graphblas/core/operator/binary.py | 6 +- graphblas/core/operator/indexunary.py | 7 +- graphblas/core/operator/semiring.py | 4 +- graphblas/core/operator/unary.py | 6 +- graphblas/core/ss/config.py | 57 +++++-- graphblas/core/ss/descriptor.py | 1 - graphblas/core/ss/matrix.py | 3 +- graphblas/core/ss/vector.py | 3 +- graphblas/core/vector.py | 7 +- graphblas/dtypes/__init__.py | 43 +++++ graphblas/{dtypes.py => dtypes/_core.py} | 196 +++++++++++------------ graphblas/dtypes/ss.py | 0 graphblas/indexunary/__init__.py | 14 +- graphblas/indexunary/ss.py | 5 + graphblas/io/_matrixmarket.py | 15 +- graphblas/monoid/__init__.py | 14 +- graphblas/monoid/ss.py | 5 + graphblas/op/ss.py | 2 + graphblas/select/__init__.py | 14 +- graphblas/select/ss.py | 5 + graphblas/semiring/ss.py | 2 + graphblas/ss/_core.py | 6 +- graphblas/tests/conftest.py | 22 +++ graphblas/tests/test_dtype.py | 27 +++- graphblas/tests/test_io.py | 18 ++- graphblas/tests/test_matrix.py | 2 +- graphblas/tests/test_op.py | 14 +- graphblas/tests/test_vector.py | 11 +- graphblas/unary/ss.py | 2 + pyproject.toml | 4 + scripts/check_versions.sh | 10 +- 41 files changed, 408 insertions(+), 177 deletions(-) create mode 100644 graphblas/dtypes/__init__.py rename graphblas/{dtypes.py => dtypes/_core.py} (69%) create mode 100644 graphblas/dtypes/ss.py create mode 100644 graphblas/indexunary/ss.py create mode 100644 graphblas/monoid/ss.py create mode 100644 graphblas/select/ss.py diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 2b0b0ed9f..18e6f637c 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -54,5 +54,7 @@ jobs: python-version: ${{ needs.rngs.outputs.pyver }} # python-version: ${{ matrix.python-version }} - run: python -m pip install --upgrade pip + # - run: pip install --pre suitesparse-graphblas # Use if we need pre-release - run: pip install -e .[default] - - run: ./scripts/test_imports.sh + - name: Run test imports + run: ./scripts/test_imports.sh diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index eca456c28..ffac645f5 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -35,7 +35,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.6 + uses: pypa/gh-action-pypi-publish@v1.8.7 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index d0c3f71fb..209060521 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -85,7 +85,7 @@ jobs: shell: bash -l {0} strategy: # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask` - fail-fast: false # Every service seems super-flaky right now... + fail-fast: true # The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype]. # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. @@ -170,7 +170,7 @@ jobs: nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') - fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.8') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') @@ -178,17 +178,17 @@ jobs: akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') else # Python 3.11 npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", ""]))') fi @@ -204,20 +204,20 @@ jobs: # But, it's still useful for us to test with different versions! psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2"]))') psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2"]))') elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2"]))') else psgver="" fi if [[ ${npver} == "=1.25" ]] ; then numbaver="" if [[ ${spver} == "=1.8" ]] ; then - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') fi elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.57", ""]))') @@ -374,6 +374,9 @@ jobs: # Tests lazy loading of lib, ffi, and NULL in gb.core echo "from graphblas.core import base" > script.py coverage run -a script.py + # Test another code pathway for loading lib + echo "from graphblas.core import lib" > script.py + coverage run -a script.py rm script.py # Tests whose coverage depend on order of tests :/ # TODO: understand why these are order-dependent and try to fix diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4d0e5c0b6..f0ca307e8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: - id: check-added-large-files - id: check-case-conflict - id: check-merge-conflict - - id: check-symlinks + # - id: check-symlinks - id: check-ast - id: check-toml - id: check-yaml @@ -39,7 +39,7 @@ repos: name: Validate pyproject.toml # I don't yet trust ruff to do what autoflake does - repo: https://github.com/PyCQA/autoflake - rev: v2.1.1 + rev: v2.2.0 hooks: - id: autoflake args: [--in-place] @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.4.0 + rev: v3.7.0 hooks: - id: pyupgrade args: [--py38-plus] @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.275 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -79,22 +79,22 @@ repos: additional_dependencies: &flake8_dependencies # These versions need updated manually - flake8==6.0.0 - - flake8-bugbear==23.5.9 + - flake8-bugbear==23.6.5 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa - rev: v1.4.0 + rev: v1.5.0 hooks: - id: yesqa additional_dependencies: *flake8_dependencies - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 + rev: v2.2.5 hooks: - id: codespell types_or: [python, rst, markdown] additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.275 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/docs/env.yml b/docs/env.yml index c0c4c8999..3636cfa2d 100644 --- a/docs/env.yml +++ b/docs/env.yml @@ -8,7 +8,7 @@ dependencies: # python-graphblas dependencies - donfig - numba - - python-suitesparse-graphblas>=7.4.0.0 + - python-suitesparse-graphblas>=7.4.0.0,<8 - pyyaml # extra dependencies - matplotlib diff --git a/graphblas/binary/ss.py b/graphblas/binary/ss.py index e45cbcda0..97852fc12 100644 --- a/graphblas/binary/ss.py +++ b/graphblas/binary/ss.py @@ -1,3 +1,5 @@ from ..core import operator +_delayed = {} + del operator diff --git a/graphblas/core/base.py b/graphblas/core/base.py index a4e48b612..42a4de9a1 100644 --- a/graphblas/core/base.py +++ b/graphblas/core/base.py @@ -348,7 +348,7 @@ def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, * return if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) self.value = expr return @@ -371,7 +371,7 @@ def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, * else: if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) self.value = expr return else: @@ -571,7 +571,7 @@ def _new(self, dtype, mask, name, is_cscalar=None, **opts): ): if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) if self._is_scalar and self._value._is_cscalar != is_cscalar: return self._value.dup(is_cscalar=is_cscalar, name=name) rv = self._value diff --git a/graphblas/core/descriptor.py b/graphblas/core/descriptor.py index 1e195e3fe..11f634afd 100644 --- a/graphblas/core/descriptor.py +++ b/graphblas/core/descriptor.py @@ -26,6 +26,7 @@ def __init__( self.mask_structure = mask_structure self.transpose_first = transpose_first self.transpose_second = transpose_second + self._context = None # Used by SuiteSparse:GraphBLAS 8 @property def _carg(self): diff --git a/graphblas/core/expr.py b/graphblas/core/expr.py index 48839bcff..d803939a5 100644 --- a/graphblas/core/expr.py +++ b/graphblas/core/expr.py @@ -421,7 +421,7 @@ def _setitem(self, resolved_indexes, obj, *, is_submask): # Fast path using assignElement if self.opts: # Ignore opts for now - descriptor_lookup(**self.opts) + desc = descriptor_lookup(**self.opts) # noqa: F841 (keep desc in scope for context) self.parent._assign_element(resolved_indexes, obj) else: mask = self.kwargs.get("mask") diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 2542ad00e..4696d8ead 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -665,7 +665,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): else: if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) new_mat = ffi_new("GrB_Matrix*") rv = Matrix._from_obj(new_mat, self.dtype, self._nrows, self._ncols, name=name) call("GrB_Matrix_dup", [_Pointer(rv), self]) @@ -2707,7 +2707,7 @@ def _extract_element( result = Scalar(dtype, is_cscalar=is_cscalar, name=name) if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) if is_cscalar: dtype_name = "UDT" if dtype._is_udt else dtype.name if ( diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 406405a80..434ad91cb 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -19,10 +19,10 @@ UINT16, UINT32, UINT64, - _sample_values, _supports_complex, lookup_dtype, ) +from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, _supports_udfs, ffi, lib from ..expr import InfixExprBase @@ -506,7 +506,7 @@ def binary_wrapper(z, x, y): # pragma: no cover (numba) type_.gb_obj, ), "BinaryOp", - new_binary, + new_binary[0], ) op = TypedUserBinaryOp(new_type_obj, name, type_, ret_type, new_binary[0]) new_type_obj._add(op) @@ -611,7 +611,7 @@ def binary_wrapper(z_ptr, x_ptr, y_ptr): # pragma: no cover (numba) new_binary, binary_wrapper.cffi, ret_type._carg, dtype._carg, dtype2._carg ), "BinaryOp", - new_binary, + new_binary[0], ) op = TypedUserBinaryOp( self, diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py index f6637ae6d..8b1211258 100644 --- a/graphblas/core/operator/indexunary.py +++ b/graphblas/core/operator/indexunary.py @@ -3,7 +3,8 @@ from types import FunctionType from ... import _STANDARD_OPERATOR_NAMES, indexunary, select -from ...dtypes import BOOL, FP64, INT8, INT64, UINT64, _sample_values, lookup_dtype +from ...dtypes import BOOL, FP64, INT8, INT64, UINT64, lookup_dtype +from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, ffi, lib from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _deserialize_parameterized @@ -193,7 +194,7 @@ def indexunary_wrapper(z, x, row, col, y): # pragma: no cover (numba) type_.gb_obj, ), "IndexUnaryOp", - new_indexunary, + new_indexunary[0], ) op = cls._typed_user_class(new_type_obj, name, type_, ret_type, new_indexunary[0]) new_type_obj._add(op) @@ -225,7 +226,7 @@ def _compile_udt(self, dtype, dtype2): new_indexunary, indexunary_wrapper.cffi, ret_type._carg, dtype._carg, dtype2._carg ), "IndexUnaryOp", - new_indexunary, + new_indexunary[0], ) op = TypedUserIndexUnaryOp( self, diff --git a/graphblas/core/operator/semiring.py b/graphblas/core/operator/semiring.py index 035a1c43b..d367461f6 100644 --- a/graphblas/core/operator/semiring.py +++ b/graphblas/core/operator/semiring.py @@ -228,7 +228,7 @@ def _build(cls, name, monoid, binaryop, *, anonymous=False): check_status_carg( lib.GrB_Semiring_new(new_semiring, monoid[binary_out].gb_obj, binary_func.gb_obj), "Semiring", - new_semiring, + new_semiring[0], ) ret_type = monoid[binary_out].return_type op = TypedUserSemiring( @@ -254,7 +254,7 @@ def _compile_udt(self, dtype, dtype2): ret_type = monoid.return_type new_semiring = ffi_new("GrB_Semiring*") status = lib.GrB_Semiring_new(new_semiring, monoid.gb_obj, binaryop.gb_obj) - check_status_carg(status, "Semiring", new_semiring) + check_status_carg(status, "Semiring", new_semiring[0]) op = TypedUserSemiring( new_semiring, self.name, diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py index a02445836..11ada4e48 100644 --- a/graphblas/core/operator/unary.py +++ b/graphblas/core/operator/unary.py @@ -15,10 +15,10 @@ UINT16, UINT32, UINT64, - _sample_values, _supports_complex, lookup_dtype, ) +from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, ffi, lib from ..utils import output_type @@ -239,7 +239,7 @@ def unary_wrapper(z, x): new_unary, unary_wrapper.cffi, ret_type.gb_obj, type_.gb_obj ), "UnaryOp", - new_unary, + new_unary[0], ) op = TypedUserUnaryOp(new_type_obj, name, type_, ret_type, new_unary[0]) new_type_obj._add(op) @@ -264,7 +264,7 @@ def _compile_udt(self, dtype, dtype2): check_status_carg( lib.GrB_UnaryOp_new(new_unary, unary_wrapper.cffi, ret_type._carg, dtype._carg), "UnaryOp", - new_unary, + new_unary[0], ) op = TypedUserUnaryOp(self, self.name, dtype, ret_type, new_unary[0]) self._udt_types[dtype] = ret_type diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py index ca91cc198..89536479d 100644 --- a/graphblas/core/ss/config.py +++ b/graphblas/core/ss/config.py @@ -12,6 +12,9 @@ class BaseConfig(MutableMapping): # Subclasses should redefine these _get_function = None _set_function = None + _context_get_function = "GxB_Context_get" + _context_set_function = "GxB_Context_set" + _context_keys = set() _null_valid = {} _options = {} _defaults = {} @@ -28,7 +31,7 @@ class BaseConfig(MutableMapping): "GxB_Format_Value", } - def __init__(self, parent=None): + def __init__(self, parent=None, context=None): cls = type(self) if not cls._initialized: cls._reverse_enumerations = {} @@ -51,6 +54,7 @@ def __init__(self, parent=None): rd[k] = k cls._initialized = True self._parent = parent + self._context = context def __delitem__(self, key): raise TypeError("Configuration options can't be deleted.") @@ -61,19 +65,27 @@ def __getitem__(self, key): raise KeyError(key) key_obj, ctype = self._options[key] is_bool = ctype == "bool" + if is_context := (key in self._context_keys): # pragma: no cover (suitesparse 8) + get_function_base = self._context_get_function + else: + get_function_base = self._get_function if ctype in self._int32_ctypes: ctype = "int32_t" - get_function_name = f"{self._get_function}_INT32" + get_function_name = f"{get_function_base}_INT32" elif ctype.startswith("int64_t"): - get_function_name = f"{self._get_function}_INT64" + get_function_name = f"{get_function_base}_INT64" elif ctype.startswith("double"): - get_function_name = f"{self._get_function}_FP64" + get_function_name = f"{get_function_base}_FP64" + elif ctype.startswith("char"): # pragma: no cover (suitesparse 8) + get_function_name = f"{get_function_base}_CHAR" else: # pragma: no cover (sanity) raise ValueError(ctype) get_function = getattr(lib, get_function_name) is_array = "[" in ctype val_ptr = ffi.new(ctype if is_array else f"{ctype}*") - if self._parent is None: + if is_context: # pragma: no cover (suitesparse 8) + info = get_function(self._context._carg, key_obj, val_ptr) + elif self._parent is None: info = get_function(key_obj, val_ptr) else: info = get_function(self._parent._carg, key_obj, val_ptr) @@ -93,6 +105,8 @@ def __getitem__(self, key): return rv if is_bool: return bool(val_ptr[0]) + if ctype.startswith("char"): # pragma: no cover (suitesparse 8) + return ffi.string(val_ptr[0]).decode() return val_ptr[0] raise _error_code_lookup[info](f"Failed to get info for {key!r}") # pragma: no cover @@ -103,15 +117,21 @@ def __setitem__(self, key, val): if key in self._read_only: raise ValueError(f"Config option {key!r} is read-only") key_obj, ctype = self._options[key] + if is_context := (key in self._context_keys): # pragma: no cover (suitesparse 8) + set_function_base = self._context_set_function + else: + set_function_base = self._set_function if ctype in self._int32_ctypes: ctype = "int32_t" - set_function_name = f"{self._set_function}_INT32" + set_function_name = f"{set_function_base}_INT32" elif ctype == "double": - set_function_name = f"{self._set_function}_FP64" + set_function_name = f"{set_function_base}_FP64" elif ctype.startswith("int64_t["): - set_function_name = f"{self._set_function}_INT64_ARRAY" + set_function_name = f"{set_function_base}_INT64_ARRAY" elif ctype.startswith("double["): - set_function_name = f"{self._set_function}_FP64_ARRAY" + set_function_name = f"{set_function_base}_FP64_ARRAY" + elif ctype.startswith("char"): # pragma: no cover (suitesparse 8) + set_function_name = f"{set_function_base}_CHAR" else: # pragma: no cover (sanity) raise ValueError(ctype) set_function = getattr(lib, set_function_name) @@ -154,9 +174,19 @@ def __setitem__(self, key, val): f"expected {size}, got {vals.size}: {val}" ) val_obj = ffi.from_buffer(ctype, vals) + elif ctype.startswith("char"): # pragma: no cover (suitesparse 8) + val_obj = ffi.new("char[]", val.encode()) else: val_obj = ffi.cast(ctype, val) - if self._parent is None: + if is_context: # pragma: no cover (suitesparse 8) + if self._context is None: + from .context import Context + + self._context = Context(engage=False) + self._context._engage() # Disengage when context goes out of scope + self._parent._context = self._context # Set context to descriptor + info = set_function(self._context._carg, key_obj, val_obj) + elif self._parent is None: info = set_function(key_obj, val_obj) else: info = set_function(self._parent._carg, key_obj, val_obj) @@ -174,7 +204,12 @@ def __len__(self): return len(self._options) def __repr__(self): - return "{" + ",\n ".join(f"{k!r}: {v!r}" for k, v in self.items()) + "}" + return ( + type(self).__name__ + + "({" + + ",\n ".join(f"{k!r}: {v!r}" for k, v in self.items()) + + "})" + ) def _ipython_key_completions_(self): # pragma: no cover (ipython) return list(self) diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index 2f7d11ffa..43553f5ea 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -90,7 +90,6 @@ class _DescriptorConfig(BaseConfig): "sort": False, "secure_import": False, } - _count = 0 def __init__(self): gb_obj = ffi_new("GrB_Descriptor*") diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 64aa43a96..990d692b9 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -7,7 +7,8 @@ import graphblas as gb from ... import binary, monoid -from ...dtypes import _INDEX, BOOL, INT64, UINT64, _string_to_dtype, lookup_dtype +from ...dtypes import _INDEX, BOOL, INT64, UINT64, lookup_dtype +from ...dtypes._core import _string_to_dtype from ...exceptions import _error_code_lookup, check_status, check_status_carg from .. import NULL, _has_numba, ffi, lib from ..base import call diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index 1babc556e..ff9e233eb 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -6,7 +6,8 @@ import graphblas as gb from ... import binary, monoid -from ...dtypes import _INDEX, INT64, UINT64, _string_to_dtype, lookup_dtype +from ...dtypes import _INDEX, INT64, UINT64, lookup_dtype +from ...dtypes._core import _string_to_dtype from ...exceptions import _error_code_lookup, check_status, check_status_carg from .. import NULL, ffi, lib from ..base import call diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index d2ddee372..cd5b992ba 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -612,7 +612,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): else: if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) rv = Vector._from_obj(ffi_new("GrB_Vector*"), self.dtype, self._size, name=name) call("GrB_Vector_dup", [_Pointer(rv), self]) return rv @@ -1757,7 +1757,7 @@ def _extract_element( result = Scalar(dtype, is_cscalar=is_cscalar, name=name) if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) if is_cscalar: dtype_name = "UDT" if dtype._is_udt else dtype.name if ( @@ -2177,6 +2177,9 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): if clear: if dtype is None: dtype = self.dtype + if opts: + # Ignore opts for now + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) return self.output_type(dtype, *self.shape, name=name) return self.new(dtype, mask=mask, name=name, **opts) diff --git a/graphblas/dtypes/__init__.py b/graphblas/dtypes/__init__.py new file mode 100644 index 000000000..0d26a44a0 --- /dev/null +++ b/graphblas/dtypes/__init__.py @@ -0,0 +1,43 @@ +from ._core import ( + _INDEX, + BOOL, + FP32, + FP64, + INT8, + INT16, + INT32, + INT64, + UINT8, + UINT16, + UINT32, + UINT64, + DataType, + _supports_complex, + lookup_dtype, + register_anonymous, + register_new, + unify, +) + +if _supports_complex: + from ._core import FC32, FC64 + + +def __dir__(): + return globals().keys() | {"ss"} + + +def __getattr__(key): + if key == "ss": + from .. import backend + + if backend != "suitesparse": + raise AttributeError( + f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"' + ) + from importlib import import_module + + ss = import_module(".ss", __name__) + globals()["ss"] = ss + return ss + raise AttributeError(f"module {__name__!r} has no attribute {key!r}") diff --git a/graphblas/dtypes.py b/graphblas/dtypes/_core.py similarity index 69% rename from graphblas/dtypes.py rename to graphblas/dtypes/_core.py index 61b297c13..345c1be81 100644 --- a/graphblas/dtypes.py +++ b/graphblas/dtypes/_core.py @@ -1,20 +1,16 @@ -import warnings as _warnings +import warnings -import numpy as _np -from numpy import promote_types as _promote_types -from numpy import result_type as _result_type +import numpy as np +from numpy import promote_types, result_type -from . import backend -from .core import NULL as _NULL -from .core import _has_numba -from .core import ffi as _ffi -from .core import lib as _lib +from .. import backend, dtypes +from ..core import NULL, _has_numba, ffi, lib if _has_numba: - import numba as _numba + import numba # Default assumption unless FC32/FC64 are found in lib -_supports_complex = hasattr(_lib, "GrB_FC64") or hasattr(_lib, "GxB_FC64") +_supports_complex = hasattr(lib, "GrB_FC64") or hasattr(lib, "GxB_FC64") class DataType: @@ -26,7 +22,7 @@ def __init__(self, name, gb_obj, gb_name, c_type, numba_type, np_type): self.gb_name = gb_name self.c_type = c_type self.numba_type = numba_type - self.np_type = _np.dtype(np_type) + self.np_type = np.dtype(np_type) def __repr__(self): return self.name @@ -62,7 +58,7 @@ def _carg(self): @property def _is_anonymous(self): - return globals().get(self.name) is not self + return getattr(dtypes, self.name, None) is not self @property def _is_udt(self): @@ -80,27 +76,29 @@ def _deserialize(name, dtype, is_anonymous): def register_new(name, dtype): if not name.isidentifier(): raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}") - if name in _registry or name in globals(): + if name in _registry or hasattr(dtypes, name): raise ValueError(f"{name!r} name for dtype is unavailable") rv = register_anonymous(dtype, name) _registry[name] = rv - globals()[name] = rv + setattr(dtypes, name, rv) return rv def register_anonymous(dtype, name=None): try: - dtype = _np.dtype(dtype) + dtype = np.dtype(dtype) except TypeError: if isinstance(dtype, dict): # Allow dtypes such as `{'x': int, 'y': float}` for convenience - dtype = _np.dtype([(key, lookup_dtype(val).np_type) for key, val in dtype.items()]) + dtype = np.dtype( + [(key, lookup_dtype(val).np_type) for key, val in dtype.items()], align=True + ) elif isinstance(dtype, str) and "[" in dtype and dtype.endswith("]"): # Allow dtypes such as `"INT64[3, 4]"` for convenience base_dtype, shape = dtype.split("[", 1) base_dtype = lookup_dtype(base_dtype) - shape = _np.lib.format.safe_eval(f"[{shape}") - dtype = _np.dtype((base_dtype.np_type, shape)) + shape = np.lib.format.safe_eval(f"[{shape}") + dtype = np.dtype((base_dtype.np_type, shape)) else: raise if dtype in _registry: @@ -114,36 +112,36 @@ def register_anonymous(dtype, name=None): if dtype.hasobject: raise ValueError("dtype must not allow Python objects") - from .exceptions import check_status_carg + from ..exceptions import check_status_carg - gb_obj = _ffi.new("GrB_Type*") + gb_obj = ffi.new("GrB_Type*") if backend == "suitesparse": # We name this so that we can serialize and deserialize UDTs # We don't yet have C definitions np_repr = _dtype_to_string(dtype).encode() - if len(np_repr) > _lib.GxB_MAX_NAME_LEN: + if len(np_repr) > lib.GxB_MAX_NAME_LEN: msg = ( f"UDT repr is too large to serialize ({len(repr(dtype).encode())} > " - f"{_lib.GxB_MAX_NAME_LEN})." + f"{lib.GxB_MAX_NAME_LEN})." ) if name is not None: - np_repr = name.encode()[: _lib.GxB_MAX_NAME_LEN] + np_repr = name.encode()[: lib.GxB_MAX_NAME_LEN] else: - np_repr = np_repr[: _lib.GxB_MAX_NAME_LEN] - _warnings.warn( + np_repr = np_repr[: lib.GxB_MAX_NAME_LEN] + warnings.warn( f"{msg}. It will use the following name, " f"and the dtype may need to be specified when deserializing: {np_repr}", stacklevel=2, ) - status = _lib.GxB_Type_new(gb_obj, dtype.itemsize, np_repr, _NULL) + status = lib.GxB_Type_new(gb_obj, dtype.itemsize, np_repr, NULL) else: - status = _lib.GrB_Type_new(gb_obj, dtype.itemsize) + status = lib.GrB_Type_new(gb_obj, dtype.itemsize) check_status_carg(status, "Type", gb_obj[0]) # For now, let's use "opaque" unsigned bytes for the c type. if name is None: name = _default_name(dtype) - numba_type = _numba.typeof(dtype).dtype if _has_numba else None + numba_type = numba.typeof(dtype).dtype if _has_numba else None rv = DataType(name, gb_obj, None, f"uint8_t[{dtype.itemsize}]", numba_type, dtype) _registry[gb_obj] = rv _registry[dtype] = rv @@ -155,153 +153,153 @@ def register_anonymous(dtype, name=None): BOOL = DataType( "BOOL", - _lib.GrB_BOOL, + lib.GrB_BOOL, "GrB_BOOL", "_Bool", - _numba.types.bool_ if _has_numba else None, - _np.bool_, + numba.types.bool_ if _has_numba else None, + np.bool_, ) INT8 = DataType( - "INT8", _lib.GrB_INT8, "GrB_INT8", "int8_t", _numba.types.int8 if _has_numba else None, _np.int8 + "INT8", lib.GrB_INT8, "GrB_INT8", "int8_t", numba.types.int8 if _has_numba else None, np.int8 ) UINT8 = DataType( "UINT8", - _lib.GrB_UINT8, + lib.GrB_UINT8, "GrB_UINT8", "uint8_t", - _numba.types.uint8 if _has_numba else None, - _np.uint8, + numba.types.uint8 if _has_numba else None, + np.uint8, ) INT16 = DataType( "INT16", - _lib.GrB_INT16, + lib.GrB_INT16, "GrB_INT16", "int16_t", - _numba.types.int16 if _has_numba else None, - _np.int16, + numba.types.int16 if _has_numba else None, + np.int16, ) UINT16 = DataType( "UINT16", - _lib.GrB_UINT16, + lib.GrB_UINT16, "GrB_UINT16", "uint16_t", - _numba.types.uint16 if _has_numba else None, - _np.uint16, + numba.types.uint16 if _has_numba else None, + np.uint16, ) INT32 = DataType( "INT32", - _lib.GrB_INT32, + lib.GrB_INT32, "GrB_INT32", "int32_t", - _numba.types.int32 if _has_numba else None, - _np.int32, + numba.types.int32 if _has_numba else None, + np.int32, ) UINT32 = DataType( "UINT32", - _lib.GrB_UINT32, + lib.GrB_UINT32, "GrB_UINT32", "uint32_t", - _numba.types.uint32 if _has_numba else None, - _np.uint32, + numba.types.uint32 if _has_numba else None, + np.uint32, ) INT64 = DataType( "INT64", - _lib.GrB_INT64, + lib.GrB_INT64, "GrB_INT64", "int64_t", - _numba.types.int64 if _has_numba else None, - _np.int64, + numba.types.int64 if _has_numba else None, + np.int64, ) # _Index (like UINT64) is for internal use only and shouldn't be exposed to the user _INDEX = DataType( "UINT64", - _lib.GrB_UINT64, + lib.GrB_UINT64, "GrB_Index", "GrB_Index", - _numba.types.uint64 if _has_numba else None, - _np.uint64, + numba.types.uint64 if _has_numba else None, + np.uint64, ) UINT64 = DataType( "UINT64", - _lib.GrB_UINT64, + lib.GrB_UINT64, "GrB_UINT64", "uint64_t", - _numba.types.uint64 if _has_numba else None, - _np.uint64, + numba.types.uint64 if _has_numba else None, + np.uint64, ) FP32 = DataType( "FP32", - _lib.GrB_FP32, + lib.GrB_FP32, "GrB_FP32", "float", - _numba.types.float32 if _has_numba else None, - _np.float32, + numba.types.float32 if _has_numba else None, + np.float32, ) FP64 = DataType( "FP64", - _lib.GrB_FP64, + lib.GrB_FP64, "GrB_FP64", "double", - _numba.types.float64 if _has_numba else None, - _np.float64, + numba.types.float64 if _has_numba else None, + np.float64, ) -if _supports_complex and hasattr(_lib, "GxB_FC32"): +if _supports_complex and hasattr(lib, "GxB_FC32"): FC32 = DataType( "FC32", - _lib.GxB_FC32, + lib.GxB_FC32, "GxB_FC32", "float _Complex", - _numba.types.complex64 if _has_numba else None, - _np.complex64, + numba.types.complex64 if _has_numba else None, + np.complex64, ) -if _supports_complex and hasattr(_lib, "GrB_FC32"): # pragma: no cover (unused) +if _supports_complex and hasattr(lib, "GrB_FC32"): # pragma: no cover (unused) FC32 = DataType( "FC32", - _lib.GrB_FC32, + lib.GrB_FC32, "GrB_FC32", "float _Complex", - _numba.types.complex64 if _has_numba else None, - _np.complex64, + numba.types.complex64 if _has_numba else None, + np.complex64, ) -if _supports_complex and hasattr(_lib, "GxB_FC64"): +if _supports_complex and hasattr(lib, "GxB_FC64"): FC64 = DataType( "FC64", - _lib.GxB_FC64, + lib.GxB_FC64, "GxB_FC64", "double _Complex", - _numba.types.complex128 if _has_numba else None, - _np.complex128, + numba.types.complex128 if _has_numba else None, + np.complex128, ) -if _supports_complex and hasattr(_lib, "GrB_FC64"): # pragma: no cover (unused) +if _supports_complex and hasattr(lib, "GrB_FC64"): # pragma: no cover (unused) FC64 = DataType( "FC64", - _lib.GrB_FC64, + lib.GrB_FC64, "GrB_FC64", "double _Complex", - _numba.types.complex128 if _has_numba else None, - _np.complex128, + numba.types.complex128 if _has_numba else None, + np.complex128, ) # Used for testing user-defined functions _sample_values = { - INT8: _np.int8(1), - UINT8: _np.uint8(1), - INT16: _np.int16(1), - UINT16: _np.uint16(1), - INT32: _np.int32(1), - UINT32: _np.uint32(1), - INT64: _np.int64(1), - UINT64: _np.uint64(1), - FP32: _np.float32(0.5), - FP64: _np.float64(0.5), - BOOL: _np.bool_(True), + INT8: np.int8(1), + UINT8: np.uint8(1), + INT16: np.int16(1), + UINT16: np.uint16(1), + INT32: np.int32(1), + UINT32: np.uint32(1), + INT64: np.int64(1), + UINT64: np.uint64(1), + FP32: np.float32(0.5), + FP64: np.float64(0.5), + BOOL: np.bool_(True), } if _supports_complex: _sample_values.update( { - FC32: _np.complex64(complex(0, 0.5)), - FC64: _np.complex128(complex(0, 0.5)), + FC32: np.complex64(complex(0, 0.5)), + FC64: np.complex128(complex(0, 0.5)), } ) @@ -390,10 +388,10 @@ def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False): return type1 if is_left_scalar: if not is_right_scalar: - return lookup_dtype(_result_type(_np.array(0, type1.np_type), type2.np_type)) + return lookup_dtype(result_type(np.array(0, type1.np_type), type2.np_type)) elif is_right_scalar: - return lookup_dtype(_result_type(type1.np_type, _np.array(0, type2.np_type))) - return lookup_dtype(_promote_types(type1.np_type, type2.np_type)) + return lookup_dtype(result_type(type1.np_type, np.array(0, type2.np_type))) + return lookup_dtype(promote_types(type1.np_type, type2.np_type)) def _default_name(dtype): @@ -423,7 +421,7 @@ def _dtype_to_string(dtype): >>> dtype == new_dtype True """ - if isinstance(dtype, _np.dtype) and dtype not in _registry: + if isinstance(dtype, np.dtype) and dtype not in _registry: np_type = dtype else: dtype = lookup_dtype(dtype) @@ -432,11 +430,11 @@ def _dtype_to_string(dtype): np_type = dtype.np_type s = str(np_type) try: - if _np.dtype(_np.lib.format.safe_eval(s)) == np_type: # pragma: no branch (safety) + if np.dtype(np.lib.format.safe_eval(s)) == np_type: # pragma: no branch (safety) return s except Exception: pass - if _np.dtype(np_type.str) != np_type: # pragma: no cover (safety) + if np.dtype(np_type.str) != np_type: # pragma: no cover (safety) raise ValueError(f"Unable to reliably convert dtype to string and back: {dtype}") return repr(np_type.str) @@ -451,5 +449,5 @@ def _string_to_dtype(s): return lookup_dtype(s) except Exception: pass - np_type = _np.dtype(_np.lib.format.safe_eval(s)) + np_type = np.dtype(np.lib.format.safe_eval(s)) return lookup_dtype(np_type) diff --git a/graphblas/dtypes/ss.py b/graphblas/dtypes/ss.py new file mode 100644 index 000000000..e69de29bb diff --git a/graphblas/indexunary/__init__.py b/graphblas/indexunary/__init__.py index 472231597..a3cb06608 100644 --- a/graphblas/indexunary/__init__.py +++ b/graphblas/indexunary/__init__.py @@ -4,7 +4,7 @@ def __dir__(): - return globals().keys() | _delayed.keys() + return globals().keys() | _delayed.keys() | {"ss"} def __getattr__(key): @@ -13,6 +13,18 @@ def __getattr__(key): rv = func(**kwargs) globals()[key] = rv return rv + if key == "ss": + from .. import backend + + if backend != "suitesparse": + raise AttributeError( + f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"' + ) + from importlib import import_module + + ss = import_module(".ss", __name__) + globals()["ss"] = ss + return ss raise AttributeError(f"module {__name__!r} has no attribute {key!r}") diff --git a/graphblas/indexunary/ss.py b/graphblas/indexunary/ss.py new file mode 100644 index 000000000..97852fc12 --- /dev/null +++ b/graphblas/indexunary/ss.py @@ -0,0 +1,5 @@ +from ..core import operator + +_delayed = {} + +del operator diff --git a/graphblas/io/_matrixmarket.py b/graphblas/io/_matrixmarket.py index 294bcfa1e..558605328 100644 --- a/graphblas/io/_matrixmarket.py +++ b/graphblas/io/_matrixmarket.py @@ -36,7 +36,6 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs): try: # scipy is currently needed for *all* engines from scipy.io import mmread - from scipy.sparse import isspmatrix_coo except ImportError: # pragma: no cover (import) raise ImportError("scipy is required to read Matrix Market files") from None engine = engine.lower() @@ -54,7 +53,7 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs): f'Bad engine value: {engine!r}. Must be "auto", "scipy", "fmm", or "fast_matrix_market"' ) array = mmread(source, **kwargs) - if isspmatrix_coo(array): + if getattr(array, "format", None) == "coo": nrows, ncols = array.shape return Matrix.from_coo( array.row, array.col, array.data, nrows=nrows, ncols=ncols, dup_op=dup_op, name=name @@ -105,13 +104,17 @@ def mmwrite( engine = engine.lower() if engine in {"auto", "fmm", "fast_matrix_market"}: try: - from fast_matrix_market import mmwrite # noqa: F811 + from fast_matrix_market import __version__, mmwrite # noqa: F811 except ImportError: # pragma: no cover (import) if engine != "auto": raise ImportError( "fast_matrix_market is required to write Matrix Market files " f'using the "{engine}" engine' ) from None + else: + import scipy as sp + + engine = "fast_matrix_market" elif engine != "scipy": raise ValueError( f'Bad engine value: {engine!r}. Must be "auto", "scipy", "fmm", or "fast_matrix_market"' @@ -120,6 +123,12 @@ def mmwrite( array = matrix.ss.export()["values"] else: array = to_scipy_sparse(matrix, format="coo") + if engine == "fast_matrix_market" and __version__ < "1.7." and sp.__version__ > "1.11.": + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_coo`. + # fast_matrix_market updated to handle this in version 1.7.0 + # Also, it looks like fast_matrix_market has special writers for csr and csc; + # should we see if using those are faster? + array = sp.sparse.coo_matrix(array) # FLAKY COVERAGE mmwrite( target, array, diff --git a/graphblas/monoid/__init__.py b/graphblas/monoid/__init__.py index 007aba416..ed028c5d9 100644 --- a/graphblas/monoid/__init__.py +++ b/graphblas/monoid/__init__.py @@ -4,7 +4,7 @@ def __dir__(): - return globals().keys() | _delayed.keys() + return globals().keys() | _delayed.keys() | {"ss"} def __getattr__(key): @@ -17,6 +17,18 @@ def __getattr__(key): rv = func(**kwargs) globals()[key] = rv return rv + if key == "ss": + from .. import backend + + if backend != "suitesparse": + raise AttributeError( + f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"' + ) + from importlib import import_module + + ss = import_module(".ss", __name__) + globals()["ss"] = ss + return ss raise AttributeError(f"module {__name__!r} has no attribute {key!r}") diff --git a/graphblas/monoid/ss.py b/graphblas/monoid/ss.py new file mode 100644 index 000000000..97852fc12 --- /dev/null +++ b/graphblas/monoid/ss.py @@ -0,0 +1,5 @@ +from ..core import operator + +_delayed = {} + +del operator diff --git a/graphblas/op/ss.py b/graphblas/op/ss.py index e45cbcda0..97852fc12 100644 --- a/graphblas/op/ss.py +++ b/graphblas/op/ss.py @@ -1,3 +1,5 @@ from ..core import operator +_delayed = {} + del operator diff --git a/graphblas/select/__init__.py b/graphblas/select/__init__.py index 72aa8d226..aaf8e12d0 100644 --- a/graphblas/select/__init__.py +++ b/graphblas/select/__init__.py @@ -8,7 +8,7 @@ def __dir__(): - return globals().keys() | _delayed.keys() + return globals().keys() | _delayed.keys() | {"ss"} def __getattr__(key): @@ -17,6 +17,18 @@ def __getattr__(key): rv = func(**kwargs) globals()[key] = rv return rv + if key == "ss": + from .. import backend + + if backend != "suitesparse": + raise AttributeError( + f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"' + ) + from importlib import import_module + + ss = import_module(".ss", __name__) + globals()["ss"] = ss + return ss raise AttributeError(f"module {__name__!r} has no attribute {key!r}") diff --git a/graphblas/select/ss.py b/graphblas/select/ss.py new file mode 100644 index 000000000..97852fc12 --- /dev/null +++ b/graphblas/select/ss.py @@ -0,0 +1,5 @@ +from ..core import operator + +_delayed = {} + +del operator diff --git a/graphblas/semiring/ss.py b/graphblas/semiring/ss.py index e45cbcda0..97852fc12 100644 --- a/graphblas/semiring/ss.py +++ b/graphblas/semiring/ss.py @@ -1,3 +1,5 @@ from ..core import operator +_delayed = {} + del operator diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index ec5a89504..53287f1a5 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -2,6 +2,7 @@ from ..core import ffi, lib from ..core.base import _expect_type +from ..core.descriptor import lookup as descriptor_lookup from ..core.matrix import Matrix, TransposedMatrix from ..core.scalar import _as_scalar from ..core.ss.config import BaseConfig @@ -52,6 +53,9 @@ def diag(x, k=0, dtype=None, *, name=None, **opts): dtype = x.dtype typ = type(x) if typ is Vector: + if opts: + # Ignore opts for now + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) size = x._size + abs(k.value) rv = Matrix(dtype, nrows=size, ncols=size, name=name) rv.ss.build_diag(x, k) @@ -120,7 +124,7 @@ class GlobalConfig(BaseConfig): memory_pool : List[int] burble : bool Enable diagnostic printing from SuiteSparse:GraphBLAS - print_1based: bool + print_1based : bool gpu_control : str, {"always", "never"} gpu_chunk : double diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index 0d1f4008a..ce9e6488f 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -1,4 +1,5 @@ import atexit +import contextlib import functools import itertools import platform @@ -114,6 +115,27 @@ def ic(): # pragma: no cover (debug) return icecream.ic +@contextlib.contextmanager +def burble(): # pragma: no cover (debug) + """Show the burble diagnostics within a context.""" + if gb.backend != "suitesparse": + yield + return + prev = gb.ss.config["burble"] + gb.ss.config["burble"] = True + try: + yield + finally: + gb.ss.config["burble"] = prev + + +@pytest.fixture(scope="session") +def burble_all(): # pragma: no cover (debug) + """Show the burble diagnostics for the entire test.""" + with burble(): + yield burble + + def autocompute(func): @functools.wraps(func) def inner(*args, **kwargs): diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index 66c19cce5..47a226313 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -123,7 +123,7 @@ def test_dtype_bad_comparison(): def test_dtypes_match_numpy(): - for key, val in dtypes._registry.items(): + for key, val in dtypes._core._registry.items(): try: if key is int or (isinstance(key, str) and key == "int"): # For win64, numpy treats int as int32, not int64 @@ -137,7 +137,7 @@ def test_dtypes_match_numpy(): def test_pickle(): - for val in dtypes._registry.values(): + for val in dtypes._core._registry.values(): s = pickle.dumps(val) val2 = pickle.loads(s) if val._is_udt: # pragma: no cover @@ -205,7 +205,7 @@ def test_auto_register(): def test_default_names(): - from graphblas.dtypes import _default_name + from graphblas.dtypes._core import _default_name assert _default_name(np.dtype([("x", np.int32), ("y", np.float64)], align=True)) == ( "{'x': INT32, 'y': FP64}" @@ -230,9 +230,9 @@ def test_dtype_to_from_string(): except Exception: pass for dtype in types: - s = dtypes._dtype_to_string(dtype) + s = dtypes._core._dtype_to_string(dtype) try: - dtype2 = dtypes._string_to_dtype(s) + dtype2 = dtypes._core._string_to_dtype(s) except Exception: with pytest.raises(ValueError, match="Unknown dtype"): lookup_dtype(dtype) @@ -253,3 +253,20 @@ def test_has_complex(): from packaging.version import parse assert dtypes._supports_complex == (parse(ssgb.__version__) >= parse("7.4.3.1")) + + +def test_has_ss_attribute(): + if suitesparse: + assert dtypes.ss is not None + else: + with pytest.raises(AttributeError): + dtypes.ss + + +def test_dir(): + must_have = {"DataType", "lookup_dtype", "register_anonymous", "register_new", "ss", "unify"} + must_have.update({"FP32", "FP64", "INT8", "INT16", "INT32", "INT64"}) + must_have.update({"BOOL", "UINT8", "UINT16", "UINT32", "UINT64"}) + if dtypes._supports_complex: + must_have.update({"FC32", "FC64"}) + assert set(dir(dtypes)) & must_have == must_have diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index 671b12bd6..bf2ca2015 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -59,18 +59,24 @@ def test_vector_to_from_numpy(): csr = gb.io.to_scipy_sparse(v, "csr") assert csr.nnz == 2 - assert ss.isspmatrix_csr(csr) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csr` + assert isinstance(csr, getattr(ss, "sparray", ss.spmatrix)) + assert csr.format == "csr" np.testing.assert_array_equal(csr.toarray(), np.array([[0.0, 2.0, 4.1]])) csc = gb.io.to_scipy_sparse(v, "csc") assert csc.nnz == 2 - assert ss.isspmatrix_csc(csc) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csc` + assert isinstance(csc, getattr(ss, "sparray", ss.spmatrix)) + assert csc.format == "csc" np.testing.assert_array_equal(csc.toarray(), np.array([[0.0, 2.0, 4.1]]).T) # default to csr-like coo = gb.io.to_scipy_sparse(v, "coo") assert coo.shape == csr.shape - assert ss.isspmatrix_coo(coo) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_coo` + assert isinstance(coo, getattr(ss, "sparray", ss.spmatrix)) + assert coo.format == "coo" assert coo.nnz == 2 np.testing.assert_array_equal(coo.toarray(), np.array([[0.0, 2.0, 4.1]])) @@ -99,7 +105,9 @@ def test_matrix_to_from_numpy(): for format in ["csr", "csc", "coo"]: sparse = gb.io.to_scipy_sparse(M, format) - assert getattr(ss, f"isspmatrix_{format}")(sparse) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csr` + assert isinstance(sparse, getattr(ss, "sparray", ss.spmatrix)) + assert sparse.format == format assert sparse.nnz == 3 np.testing.assert_array_equal(sparse.toarray(), a) M2 = gb.io.from_scipy_sparse(sparse) @@ -435,6 +443,7 @@ def test_awkward_errors(): @pytest.mark.skipif("not sparse") +@pytest.mark.slow def test_vector_to_from_pydata_sparse(): coords = np.array([0, 1, 2, 3, 4], dtype="int64") data = np.array([10, 20, 30, 40, 50], dtype="int64") @@ -448,6 +457,7 @@ def test_vector_to_from_pydata_sparse(): @pytest.mark.skipif("not sparse") +@pytest.mark.slow def test_matrix_to_from_pydata_sparse(): coords = np.array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], dtype="int64") data = np.array([10, 20, 30, 40, 50], dtype="int64") diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 26017f364..bc942bc49 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -4298,7 +4298,7 @@ def test_ss_descriptors(A): A(nthreads=4, axb_method="dot", sort=True) << A @ A assert A.isequal(C2) # Bad option should show list of valid options - with pytest.raises(ValueError, match="nthreads"): + with pytest.raises(ValueError, match="axb_method"): C1(bad_opt=True) << A with pytest.raises(ValueError, match="Duplicate descriptor"): (A @ A).new(nthreads=4, Nthreads=5) diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py index a80012ab7..b54ea76c4 100644 --- a/graphblas/tests/test_op.py +++ b/graphblas/tests/test_op.py @@ -1006,7 +1006,7 @@ def myplus(x, y): def test_create_semiring(): # stress test / sanity check - monoid_names = {x for x in dir(monoid) if not x.startswith("_")} + monoid_names = {x for x in dir(monoid) if not x.startswith("_") and x != "ss"} binary_names = {x for x in dir(binary) if not x.startswith("_") and x != "ss"} for monoid_name, binary_name in itertools.product(monoid_names, binary_names): cur_monoid = getattr(monoid, monoid_name) @@ -1433,6 +1433,7 @@ def test_deprecated(): import graphblas.core.agg # noqa: F401 +@pytest.mark.slow def test_is_idempotent(): assert monoid.min.is_idempotent assert monoid.max[int].is_idempotent @@ -1446,3 +1447,14 @@ def test_is_idempotent(): assert not monoid.numpy.equal.is_idempotent with pytest.raises(AttributeError): binary.min.is_idempotent + + +def test_ops_have_ss(): + modules = [unary, binary, monoid, semiring, indexunary, select, op] + if suitesparse: + for mod in modules: + assert mod.ss is not None + else: + for mod in modules: + with pytest.raises(AttributeError): + mod.ss diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index 36ab346b8..a1aabd183 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -1448,14 +1448,14 @@ def test_diag(v): expected = Matrix.from_coo(rows, cols, values, nrows=size, ncols=size, dtype=v.dtype) # Construct diagonal matrix A if suitesparse: - A = gb.ss.diag(v, k=k) + A = gb.ss.diag(v, k=k, nthreads=2) assert expected.isequal(A) A = v.diag(k) assert expected.isequal(A) # Extract diagonal from A if suitesparse: - w = gb.ss.diag(A, Scalar.from_value(k)) + w = gb.ss.diag(A, Scalar.from_value(k), nthreads=2) assert v.isequal(w) assert w.dtype == "INT64" @@ -1737,6 +1737,13 @@ def test_dup_expr(v): assert result.isequal(b) result = (b | b).dup(clear=True) assert result.isequal(b.dup(clear=True)) + result = v[:5].dup() + assert result.isequal(v[:5].new()) + if suitesparse: + result = v[:5].dup(nthreads=2) + assert result.isequal(v[:5].new()) + result = v[:5].dup(clear=True, nthreads=2) + assert result.isequal(Vector(v.dtype, size=5)) @pytest.mark.skipif("not suitesparse") diff --git a/graphblas/unary/ss.py b/graphblas/unary/ss.py index e45cbcda0..97852fc12 100644 --- a/graphblas/unary/ss.py +++ b/graphblas/unary/ss.py @@ -1,3 +1,5 @@ from ..core import operator +_delayed = {} + del operator diff --git a/pyproject.toml b/pyproject.toml index 9e57b8296..ddd718ef6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -138,6 +138,7 @@ packages = [ "graphblas.core", "graphblas.core.operator", "graphblas.core.ss", + "graphblas.dtypes", "graphblas.indexunary", "graphblas.io", "graphblas.monoid", @@ -311,6 +312,7 @@ ignore = [ # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) "TRY200", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) + "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) # Intentionally ignored "COM812", # Trailing comma missing @@ -322,6 +324,7 @@ ignore = [ "N806", # Variable ... in function should be lowercase "N807", # Function name should not start and end with `__` "N818", # Exception name ... should be named with an Error suffix (Note: good advice) + "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict) "PLR0911", # Too many return statements "PLR0912", # Too many branches "PLR0913", # Too many arguments to function call @@ -356,6 +359,7 @@ ignore = [ "TCH", # flake8-type-checking (Note: figure out type checking later) "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) "TD", # flake8-todos (Maybe okay to add some of these) + "FIX", # flake8-fixme (like flake8-todos) "ERA", # eradicate (We like code in comments!) "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index f0e648fd9..22f0b3cca 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -5,13 +5,13 @@ # Tip: add `--json` for more information. conda search 'numpy[channel=conda-forge]>=1.25.0' conda search 'pandas[channel=conda-forge]>=2.0.2' -conda search 'scipy[channel=conda-forge]>=1.10.1' +conda search 'scipy[channel=conda-forge]>=1.11.0' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.2.1' +conda search 'awkward[channel=conda-forge]>=2.2.4' conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.6.0' -conda search 'numba[channel=conda-forge]>=0.57.0' +conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' +conda search 'numba[channel=conda-forge]>=0.57.1' conda search 'pyyaml[channel=conda-forge]>=6.0' -conda search 'flake8-bugbear[channel=conda-forge]>=23.5.9' +conda search 'flake8-bugbear[channel=conda-forge]>=23.6.5' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' # conda search 'python[channel=conda-forge]>=3.8 *pypy*' From da016617bd8413fe2dc28f6f9c041983b81512b2 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 09:28:23 -0500 Subject: [PATCH 11/66] Update copyright to include contributors (#470) --- LICENSE | 2 +- docs/conf.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LICENSE b/LICENSE index 74a8ba6c6..935875c92 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2020 Anaconda, Inc + Copyright 2020-2023 Anaconda, Inc. and contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/docs/conf.py b/docs/conf.py index 3e1a8c85b..07a373203 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,7 +19,7 @@ # -- Project information ----------------------------------------------------- project = "python-graphblas" -copyright = "2022, Anaconda, Inc" +copyright = "2020-2023, Anaconda, Inc. and contributors" author = "Anaconda, Inc" # The full version, including alpha/beta/rc tags From 5e18a9c5dae8cecd40c6e08419cd30c54299db8f Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 09:54:46 -0500 Subject: [PATCH 12/66] Clarify in docs that monoids are commutative and associative (#469) * Clarify in docs that monoids are commutative and associative --- docs/user_guide/operators.rst | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/user_guide/operators.rst b/docs/user_guide/operators.rst index 9499562f2..ec28e2fba 100644 --- a/docs/user_guide/operators.rst +++ b/docs/user_guide/operators.rst @@ -89,9 +89,12 @@ registered from numpy are located in ``graphblas.binary.numpy``. Monoids ------- -Monoids extend the concept of a binary operator to require a single domain for all inputs and -the output. Monoids are also associative, so the order of the inputs does not matter. And finally, -monoids have a default identity such that ``A op identity == A``. +Monoids extend the concept of a binary operator to require a single domain for all inputs and the output. +Monoids are also associative so the order of operations does not matter +(for example, ``(a + b) + c == a + (b + c)``). +GraphBLAS primarily uses *commutative monoids* (for example, ``a + b == b + a``), +and all standard monoids in python-graphblas commute. +And finally, monoids have a default identity such that ``A op identity == A``. Monoids are commonly for reductions, collapsing all elements down to a single value. From b7b25b7a26c54451a871be9d8488d76580e5c08f Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Thu, 29 Jun 2023 12:15:08 -0500 Subject: [PATCH 13/66] Move `dtypes._core` to `core.dtypes` (#479) * Move `dtypes._core` to `core.dtypes` I think this follows established patterns that we typically use. --- graphblas/{dtypes/_core.py => core/dtypes.py} | 0 graphblas/core/operator/binary.py | 2 +- graphblas/core/operator/indexunary.py | 2 +- graphblas/core/operator/unary.py | 2 +- graphblas/core/ss/matrix.py | 2 +- graphblas/core/ss/vector.py | 2 +- graphblas/dtypes/__init__.py | 4 ++-- graphblas/tests/test_dtype.py | 12 ++++++------ pyproject.toml | 2 +- 9 files changed, 14 insertions(+), 14 deletions(-) rename graphblas/{dtypes/_core.py => core/dtypes.py} (100%) diff --git a/graphblas/dtypes/_core.py b/graphblas/core/dtypes.py similarity index 100% rename from graphblas/dtypes/_core.py rename to graphblas/core/dtypes.py diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 434ad91cb..88191c39b 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -22,9 +22,9 @@ _supports_complex, lookup_dtype, ) -from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, _supports_udfs, ffi, lib +from ..dtypes import _sample_values from ..expr import InfixExprBase from .base import ( _SS_OPERATORS, diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py index 8b1211258..b5351e916 100644 --- a/graphblas/core/operator/indexunary.py +++ b/graphblas/core/operator/indexunary.py @@ -4,9 +4,9 @@ from ... import _STANDARD_OPERATOR_NAMES, indexunary, select from ...dtypes import BOOL, FP64, INT8, INT64, UINT64, lookup_dtype -from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, ffi, lib +from ..dtypes import _sample_values from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _deserialize_parameterized if _has_numba: diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py index 11ada4e48..437334ccc 100644 --- a/graphblas/core/operator/unary.py +++ b/graphblas/core/operator/unary.py @@ -18,9 +18,9 @@ _supports_complex, lookup_dtype, ) -from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, ffi, lib +from ..dtypes import _sample_values from ..utils import output_type from .base import ( _SS_OPERATORS, diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 990d692b9..56c28f52f 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -8,10 +8,10 @@ from ... import binary, monoid from ...dtypes import _INDEX, BOOL, INT64, UINT64, lookup_dtype -from ...dtypes._core import _string_to_dtype from ...exceptions import _error_code_lookup, check_status, check_status_carg from .. import NULL, _has_numba, ffi, lib from ..base import call +from ..dtypes import _string_to_dtype from ..operator import get_typed_op from ..scalar import Scalar, _as_scalar, _scalar_index from ..utils import ( diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index ff9e233eb..a8bff4ee5 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -7,10 +7,10 @@ from ... import binary, monoid from ...dtypes import _INDEX, INT64, UINT64, lookup_dtype -from ...dtypes._core import _string_to_dtype from ...exceptions import _error_code_lookup, check_status, check_status_carg from .. import NULL, ffi, lib from ..base import call +from ..dtypes import _string_to_dtype from ..operator import get_typed_op from ..scalar import Scalar, _as_scalar from ..utils import ( diff --git a/graphblas/dtypes/__init__.py b/graphblas/dtypes/__init__.py index 0d26a44a0..49e46d787 100644 --- a/graphblas/dtypes/__init__.py +++ b/graphblas/dtypes/__init__.py @@ -1,4 +1,4 @@ -from ._core import ( +from ..core.dtypes import ( _INDEX, BOOL, FP32, @@ -20,7 +20,7 @@ ) if _supports_complex: - from ._core import FC32, FC64 + from ..core.dtypes import FC32, FC64 def __dir__(): diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index 47a226313..5797dda10 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -7,7 +7,7 @@ import pytest import graphblas as gb -from graphblas import dtypes +from graphblas import core, dtypes from graphblas.core import lib from graphblas.dtypes import lookup_dtype @@ -123,7 +123,7 @@ def test_dtype_bad_comparison(): def test_dtypes_match_numpy(): - for key, val in dtypes._core._registry.items(): + for key, val in core.dtypes._registry.items(): try: if key is int or (isinstance(key, str) and key == "int"): # For win64, numpy treats int as int32, not int64 @@ -137,7 +137,7 @@ def test_dtypes_match_numpy(): def test_pickle(): - for val in dtypes._core._registry.values(): + for val in core.dtypes._registry.values(): s = pickle.dumps(val) val2 = pickle.loads(s) if val._is_udt: # pragma: no cover @@ -205,7 +205,7 @@ def test_auto_register(): def test_default_names(): - from graphblas.dtypes._core import _default_name + from graphblas.core.dtypes import _default_name assert _default_name(np.dtype([("x", np.int32), ("y", np.float64)], align=True)) == ( "{'x': INT32, 'y': FP64}" @@ -230,9 +230,9 @@ def test_dtype_to_from_string(): except Exception: pass for dtype in types: - s = dtypes._core._dtype_to_string(dtype) + s = core.dtypes._dtype_to_string(dtype) try: - dtype2 = dtypes._core._string_to_dtype(s) + dtype2 = core.dtypes._string_to_dtype(s) except Exception: with pytest.raises(ValueError, match="Unknown dtype"): lookup_dtype(dtype) diff --git a/pyproject.toml b/pyproject.toml index ddd718ef6..8d3f0b213 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -196,7 +196,7 @@ filterwarnings = [ # This deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See: # https://setuptools.pypa.io/en/latest/history.html#v67-5-0 - "ignore:pkg_resources is deprecated as an API:DeprecationWarning:pkg_resources", + "ignore:pkg_resources is deprecated as an API:DeprecationWarning:", # sre_parse deprecated in 3.11; this is triggered by awkward 0.10 "ignore:module 'sre_parse' is deprecated:DeprecationWarning:", From e3118dadf1abfde29d2ec3360cfd5de81420a39b Mon Sep 17 00:00:00 2001 From: Sultan Orazbayev Date: Wed, 5 Jul 2023 14:54:54 +0000 Subject: [PATCH 14/66] Update README.md (#442) * Update README.md Follows up on #438: >Regarding "comparing to similar packages in the ecosystem", I think it would actually be informative to compare to e.g. scipy.sparse, networkx, and igraph. I think such a comparison could go in the README and/or documentation. * Update README.md * Update README.md Co-authored-by: Erik Welch --- README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README.md b/README.md index 13067df6e..3756fbb0c 100644 --- a/README.md +++ b/README.md @@ -204,6 +204,22 @@ w # indexes=[0, 1, 3], values=[1, 3, 3] ``` Similar methods exist for BinaryOp, Monoid, and Semiring. +## Relation to other network analysis libraries +Python-graphblas aims to provide an efficient and consistent expression +of graph operations using linear algebra. This allows the development of +high-performance implementations of existing and new graph algorithms +(also see [`graphblas-algorithms`](https://github.com/python-graphblas/graphblas-algorithms)). + +While end-to-end analysis can be done using `python-graphblas`, users +might find that other libraries in the Python ecosystem provide a more +convenient high-level interface for data pre-processing and transformation +(e.g. `pandas`, `scipy.sparse`), visualization (e.g. `networkx`, `igraph`), +interactive exploration and analysis (e.g. `networkx`, `igraph`) or for +algorithms that are not (yet) implemented in `graphblas-algorithms` (e.g. +`networkx`, `igraph`, `scipy.sparse.csgraph`). To facilitate communication with +other libraries, `graphblas.io` contains multiple connectors, see the +following section. + ## Import/Export connectors to the Python ecosystem `graphblas.io` contains functions for converting to and from: ```python From 79cf3fadc281e21a6fcbffd02c3bb0ab5d21060a Mon Sep 17 00:00:00 2001 From: Sultan Orazbayev Date: Wed, 5 Jul 2023 14:55:41 +0000 Subject: [PATCH 15/66] doc: update io.rst for awkward array (#457) * Update io.rst As raised in #436 * Update documentation for `awkward-array`-related io functions. * Update the doc string and change the order of the functions to reflect their intended use case. remove trailing blanks * Use double backticks --- docs/api_reference/io.rst | 17 ++++++ graphblas/io/_awkward.py | 110 ++++++++++++++++++++------------------ 2 files changed, 75 insertions(+), 52 deletions(-) diff --git a/docs/api_reference/io.rst b/docs/api_reference/io.rst index 1b42c0648..e8f1748fd 100644 --- a/docs/api_reference/io.rst +++ b/docs/api_reference/io.rst @@ -49,6 +49,23 @@ These methods require `scipy `_ to be installed. .. autofunction:: graphblas.io.mmwrite +Awkward Array +~~~~~~~~~~~~~ + +`Awkward Array `_ is a library for nested, +variable-sized data, including arbitrary-length lists, records, mixed types, +and missing data, using NumPy-like idioms. Note that the intended use of the +``awkward-array``-related ``io`` functions is to convert ``graphblas`` objects to awkward, +perform necessary computations/transformations and, if required, convert the +awkward array back to ``graphblas`` format. To facilitate this conversion process, +``graphblas.io.to_awkward`` adds top-level attribute ``format``, describing the +format of the ``graphblas`` object (this attributed is used by the +``graphblas.io.from_awkward`` function to reconstruct the ``graphblas`` object). + +.. autofunction:: graphblas.io.to_awkward + +.. autofunction:: graphblas.io.from_awkward + Visualization ~~~~~~~~~~~~~ diff --git a/graphblas/io/_awkward.py b/graphblas/io/_awkward.py index 3119bdf3b..6c476817f 100644 --- a/graphblas/io/_awkward.py +++ b/graphblas/io/_awkward.py @@ -7,58 +7,6 @@ _AwkwardDoublyCompressedMatrix = None -def from_awkward(A, *, name=None): - """Create a Matrix or Vector from an Awkward Array. - - The Awkward Array must have top-level parameters: format, shape - - The Awkward Array must have top-level attributes based on format: - - vec/csr/csc: values, indices - - hypercsr/hypercsc: values, indices, offset_labels - - Parameters - ---------- - A : awkward.Array - Awkward Array with values and indices - name : str, optional - Name of resulting Matrix or Vector - - Returns - ------- - Vector or Matrix - """ - params = A.layout.parameters - if missing := {"format", "shape"} - params.keys(): - raise ValueError(f"Missing parameters: {missing}") - format = params["format"] - shape = params["shape"] - - if len(shape) == 1: - if format != "vec": - raise ValueError(f"Invalid format for Vector: {format}") - return Vector.from_coo( - A.indices.layout.data, A.values.layout.data, size=shape[0], name=name - ) - nrows, ncols = shape - values = A.values.layout.content.data - indptr = A.values.layout.offsets.data - if format == "csr": - cols = A.indices.layout.content.data - return Matrix.from_csr(indptr, cols, values, ncols=ncols, name=name) - if format == "csc": - rows = A.indices.layout.content.data - return Matrix.from_csc(indptr, rows, values, nrows=nrows, name=name) - if format == "hypercsr": - rows = A.offset_labels.layout.data - cols = A.indices.layout.content.data - return Matrix.from_dcsr(rows, indptr, cols, values, nrows=nrows, ncols=ncols, name=name) - if format == "hypercsc": - cols = A.offset_labels.layout.data - rows = A.indices.layout.content.data - return Matrix.from_dcsc(cols, indptr, rows, values, nrows=nrows, ncols=ncols, name=name) - raise ValueError(f"Invalid format for Matrix: {format}") - - def to_awkward(A, format=None): """Create an Awkward Array from a GraphBLAS Matrix. @@ -179,3 +127,61 @@ def indices(self): # pragma: no branch (???) if classname: ret = ak.with_name(ret, classname) return ret + + +def from_awkward(A, *, name=None): + """Create a Matrix or Vector from an Awkward Array. + + The Awkward Array must have top-level parameters: format, shape + + The Awkward Array must have top-level attributes based on format: + - vec/csr/csc: values, indices + - hypercsr/hypercsc: values, indices, offset_labels + + Parameters + ---------- + A : awkward.Array + Awkward Array with values and indices + name : str, optional + Name of resulting Matrix or Vector + + Returns + ------- + Vector or Matrix + + Note: the intended purpose of this function is to facilitate + conversion of an `awkward-array` that was created via `to_awkward` + function. If attempting to convert an arbitrary `awkward-array`, + make sure that the top-level attributes and parameters contain + the expected values. + """ + params = A.layout.parameters + if missing := {"format", "shape"} - params.keys(): + raise ValueError(f"Missing parameters: {missing}") + format = params["format"] + shape = params["shape"] + + if len(shape) == 1: + if format != "vec": + raise ValueError(f"Invalid format for Vector: {format}") + return Vector.from_coo( + A.indices.layout.data, A.values.layout.data, size=shape[0], name=name + ) + nrows, ncols = shape + values = A.values.layout.content.data + indptr = A.values.layout.offsets.data + if format == "csr": + cols = A.indices.layout.content.data + return Matrix.from_csr(indptr, cols, values, ncols=ncols, name=name) + if format == "csc": + rows = A.indices.layout.content.data + return Matrix.from_csc(indptr, rows, values, nrows=nrows, name=name) + if format == "hypercsr": + rows = A.offset_labels.layout.data + cols = A.indices.layout.content.data + return Matrix.from_dcsr(rows, indptr, cols, values, nrows=nrows, ncols=ncols, name=name) + if format == "hypercsc": + cols = A.offset_labels.layout.data + rows = A.indices.layout.content.data + return Matrix.from_dcsc(cols, indptr, rows, values, nrows=nrows, ncols=ncols, name=name) + raise ValueError(f"Invalid format for Matrix: {format}") From f14cbace199a33b10ea010ba5cdcb5b3b5037ac9 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 5 Jul 2023 17:26:30 -0500 Subject: [PATCH 16/66] Update to support SuiteSparse:GraphBLAS 7 and 8 (#456) --- .github/workflows/test_and_build.yml | 36 ++-- .pre-commit-config.yaml | 6 +- docs/env.yml | 2 +- graphblas/binary/ss.py | 1 + graphblas/core/dtypes.py | 2 +- graphblas/core/ss/__init__.py | 3 + graphblas/core/ss/binary.py | 72 +++++++ graphblas/core/ss/config.py | 16 +- graphblas/core/ss/context.py | 146 +++++++++++++++ graphblas/core/ss/descriptor.py | 27 ++- graphblas/core/ss/dtypes.py | 88 +++++++++ graphblas/core/ss/indexunary.py | 77 ++++++++ graphblas/core/ss/select.py | 45 +++++ graphblas/core/ss/unary.py | 62 ++++++ graphblas/dtypes/ss.py | 1 + graphblas/indexunary/ss.py | 1 + graphblas/select/ss.py | 1 + graphblas/ss/__init__.py | 6 +- graphblas/ss/_core.py | 64 ++++++- graphblas/tests/test_ssjit.py | 269 +++++++++++++++++++++++++++ graphblas/unary/ss.py | 1 + pyproject.toml | 5 +- scripts/check_versions.sh | 6 +- 23 files changed, 890 insertions(+), 47 deletions(-) create mode 100644 graphblas/core/ss/binary.py create mode 100644 graphblas/core/ss/context.py create mode 100644 graphblas/core/ss/dtypes.py create mode 100644 graphblas/core/ss/indexunary.py create mode 100644 graphblas/core/ss/select.py create mode 100644 graphblas/core/ss/unary.py create mode 100644 graphblas/tests/test_ssjit.py diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 209060521..d93b4c25c 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -131,9 +131,9 @@ jobs: source upstream weights: | - 1000000 - 1000000 - 1000000 + 1 + 1 + 1 1 - name: Setup mamba uses: conda-incubator/setup-miniconda@v2 @@ -175,22 +175,22 @@ jobs: npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') else # Python 3.11 npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when @@ -204,13 +204,13 @@ jobs: # But, it's still useful for us to test with different versions! psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2"]))') + psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", ""]))') psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2"]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", ""]))') elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2"]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", ""]))') else psgver="" fi @@ -260,17 +260,18 @@ jobs: numba=numba${numbaver} sparse=sparse${sparsever} fi - echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" + echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psg${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7"' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ - ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas=7.4"' || '' }} \ - ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} + ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4"' || '' }} \ + ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \ + ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} - name: Build extension module run: | if [[ ${{ steps.sourcetype.outputs.selected }} == "wheel" ]]; then @@ -291,6 +292,12 @@ jobs: pip install --no-deps git+https://github.com/GraphBLAS/python-suitesparse-graphblas.git@main#egg=suitesparse-graphblas fi pip install --no-deps -e . + - name: python-suitesparse-graphblas tests + run: | + # Don't use our conftest.py ; allow `test_print_jit_config` to fail if it doesn't exist + (cd .. + pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config || true + pytest -v --pyargs suitesparse_graphblas) - name: Unit tests run: | A=${{ needs.rngs.outputs.mapnumpy == 'A' || '' }} ; B=${{ needs.rngs.outputs.mapnumpy == 'B' || '' }} @@ -318,7 +325,6 @@ jobs: if [[ $H && $normal ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi)$( \ if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi) echo ${args} - (cd .. && pytest -v --pyargs suitesparse_graphblas) # Don't use our conftest.py set -x # echo on coverage run -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f0ca307e8..726538e16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.7.0 + rev: v3.8.0 hooks: - id: pyupgrade args: [--py38-plus] @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.275 + rev: v0.0.277 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.275 + rev: v0.0.277 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/docs/env.yml b/docs/env.yml index 3636cfa2d..c0c4c8999 100644 --- a/docs/env.yml +++ b/docs/env.yml @@ -8,7 +8,7 @@ dependencies: # python-graphblas dependencies - donfig - numba - - python-suitesparse-graphblas>=7.4.0.0,<8 + - python-suitesparse-graphblas>=7.4.0.0 - pyyaml # extra dependencies - matplotlib diff --git a/graphblas/binary/ss.py b/graphblas/binary/ss.py index 97852fc12..0c294e322 100644 --- a/graphblas/binary/ss.py +++ b/graphblas/binary/ss.py @@ -1,4 +1,5 @@ from ..core import operator +from ..core.ss.binary import register_new # noqa: F401 _delayed = {} diff --git a/graphblas/core/dtypes.py b/graphblas/core/dtypes.py index 345c1be81..d7a83c99b 100644 --- a/graphblas/core/dtypes.py +++ b/graphblas/core/dtypes.py @@ -22,7 +22,7 @@ def __init__(self, name, gb_obj, gb_name, c_type, numba_type, np_type): self.gb_name = gb_name self.c_type = c_type self.numba_type = numba_type - self.np_type = np.dtype(np_type) + self.np_type = np.dtype(np_type) if np_type is not None else None def __repr__(self): return self.name diff --git a/graphblas/core/ss/__init__.py b/graphblas/core/ss/__init__.py index e69de29bb..c2e83ddcc 100644 --- a/graphblas/core/ss/__init__.py +++ b/graphblas/core/ss/__init__.py @@ -0,0 +1,3 @@ +import suitesparse_graphblas as _ssgb + +_IS_SSGB7 = _ssgb.__version__.split(".", 1)[0] == "7" diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py new file mode 100644 index 000000000..898257fac --- /dev/null +++ b/graphblas/core/ss/binary.py @@ -0,0 +1,72 @@ +from ... import backend +from ...dtypes import lookup_dtype +from ...exceptions import check_status_carg +from .. import NULL, ffi, lib +from ..operator.base import TypedOpBase +from ..operator.binary import BinaryOp, TypedUserBinaryOp +from . import _IS_SSGB7 + +ffi_new = ffi.new + + +class TypedJitBinaryOp(TypedOpBase): + __slots__ = "_monoid", "_jit_c_definition" + opclass = "BinaryOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None): + super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2) + self._monoid = None + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + monoid = TypedUserBinaryOp.monoid + commutes_to = TypedUserBinaryOp.commutes_to + _semiring_commutes_to = TypedUserBinaryOp._semiring_commutes_to + is_commutative = TypedUserBinaryOp.is_commutative + type2 = TypedUserBinaryOp.type2 + __call__ = TypedUserBinaryOp.__call__ + + +def register_new(name, jit_c_definition, left_type, right_type, ret_type): + if backend != "suitesparse": # pragma: no cover (safety) + raise RuntimeError( + "`gb.binary.ss.register_new` invalid when not using 'suitesparse' backend" + ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) + left_type = lookup_dtype(left_type) + right_type = lookup_dtype(right_type) + ret_type = lookup_dtype(ret_type) + name = name if name.startswith("ss.") else f"ss.{name}" + module, funcname = BinaryOp._remove_nesting(name) + + rv = BinaryOp(name) + gb_obj = ffi_new("GrB_BinaryOp*") + check_status_carg( + lib.GxB_BinaryOp_new( + gb_obj, + NULL, + ret_type._carg, + left_type._carg, + right_type._carg, + ffi_new("char[]", funcname.encode()), + ffi_new("char[]", jit_c_definition.encode()), + ), + "BinaryOp", + gb_obj[0], + ) + op = TypedJitBinaryOp( + rv, funcname, left_type, ret_type, gb_obj[0], jit_c_definition, dtype2=right_type + ) + rv._add(op) + setattr(module, funcname, rv) + return rv diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py index 89536479d..433716bb3 100644 --- a/graphblas/core/ss/config.py +++ b/graphblas/core/ss/config.py @@ -65,7 +65,7 @@ def __getitem__(self, key): raise KeyError(key) key_obj, ctype = self._options[key] is_bool = ctype == "bool" - if is_context := (key in self._context_keys): # pragma: no cover (suitesparse 8) + if is_context := (key in self._context_keys): get_function_base = self._context_get_function else: get_function_base = self._get_function @@ -76,14 +76,14 @@ def __getitem__(self, key): get_function_name = f"{get_function_base}_INT64" elif ctype.startswith("double"): get_function_name = f"{get_function_base}_FP64" - elif ctype.startswith("char"): # pragma: no cover (suitesparse 8) + elif ctype.startswith("char"): get_function_name = f"{get_function_base}_CHAR" else: # pragma: no cover (sanity) raise ValueError(ctype) get_function = getattr(lib, get_function_name) is_array = "[" in ctype val_ptr = ffi.new(ctype if is_array else f"{ctype}*") - if is_context: # pragma: no cover (suitesparse 8) + if is_context: info = get_function(self._context._carg, key_obj, val_ptr) elif self._parent is None: info = get_function(key_obj, val_ptr) @@ -105,7 +105,7 @@ def __getitem__(self, key): return rv if is_bool: return bool(val_ptr[0]) - if ctype.startswith("char"): # pragma: no cover (suitesparse 8) + if ctype.startswith("char"): return ffi.string(val_ptr[0]).decode() return val_ptr[0] raise _error_code_lookup[info](f"Failed to get info for {key!r}") # pragma: no cover @@ -117,7 +117,7 @@ def __setitem__(self, key, val): if key in self._read_only: raise ValueError(f"Config option {key!r} is read-only") key_obj, ctype = self._options[key] - if is_context := (key in self._context_keys): # pragma: no cover (suitesparse 8) + if is_context := (key in self._context_keys): set_function_base = self._context_set_function else: set_function_base = self._set_function @@ -130,7 +130,7 @@ def __setitem__(self, key, val): set_function_name = f"{set_function_base}_INT64_ARRAY" elif ctype.startswith("double["): set_function_name = f"{set_function_base}_FP64_ARRAY" - elif ctype.startswith("char"): # pragma: no cover (suitesparse 8) + elif ctype.startswith("char"): set_function_name = f"{set_function_base}_CHAR" else: # pragma: no cover (sanity) raise ValueError(ctype) @@ -174,11 +174,11 @@ def __setitem__(self, key, val): f"expected {size}, got {vals.size}: {val}" ) val_obj = ffi.from_buffer(ctype, vals) - elif ctype.startswith("char"): # pragma: no cover (suitesparse 8) + elif ctype.startswith("char"): val_obj = ffi.new("char[]", val.encode()) else: val_obj = ffi.cast(ctype, val) - if is_context: # pragma: no cover (suitesparse 8) + if is_context: if self._context is None: from .context import Context diff --git a/graphblas/core/ss/context.py b/graphblas/core/ss/context.py new file mode 100644 index 000000000..9b48bcaa4 --- /dev/null +++ b/graphblas/core/ss/context.py @@ -0,0 +1,146 @@ +import threading + +from ...exceptions import InvalidValue, check_status, check_status_carg +from .. import ffi, lib +from . import _IS_SSGB7 +from .config import BaseConfig + +ffi_new = ffi.new +if _IS_SSGB7: + # Context was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise ImportError( + "Context was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) + + +class Context(BaseConfig): + _context_keys = {"chunk", "gpu_id", "nthreads"} + _options = { + "chunk": (lib.GxB_CONTEXT_CHUNK, "double"), + "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"), + "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"), + } + _defaults = { + "nthreads": 0, + "chunk": 0, + "gpu_id": -1, # -1 means no GPU + } + + def __init__(self, engage=True, *, stack=True, nthreads=None, chunk=None, gpu_id=None): + super().__init__() + self.gb_obj = ffi_new("GxB_Context*") + check_status_carg(lib.GxB_Context_new(self.gb_obj), "Context", self.gb_obj[0]) + if stack: + context = threadlocal.context + self["nthreads"] = context["nthreads"] if nthreads is None else nthreads + self["chunk"] = context["chunk"] if chunk is None else chunk + self["gpu_id"] = context["gpu_id"] if gpu_id is None else gpu_id + else: + if nthreads is not None: + self["nthreads"] = nthreads + if chunk is not None: + self["chunk"] = chunk + if gpu_id is not None: + self["gpu_id"] = gpu_id + self._prev_context = None + if engage: + self.engage() + + @classmethod + def _from_obj(cls, gb_obj=None): + self = object.__new__(cls) + self.gb_obj = gb_obj + self._prev_context = None + super().__init__(self) + return self + + @property + def _carg(self): + return self.gb_obj[0] + + def dup(self, engage=True, *, nthreads=None, chunk=None, gpu_id=None): + if nthreads is None: + nthreads = self["nthreads"] + if chunk is None: + chunk = self["chunk"] + if gpu_id is None: + gpu_id = self["gpu_id"] + return type(self)(engage, stack=False, nthreads=nthreads, chunk=chunk, gpu_id=gpu_id) + + def __del__(self): + gb_obj = getattr(self, "gb_obj", None) + if gb_obj is not None and lib is not None: # pragma: no branch (safety) + try: + self.disengage() + except InvalidValue: + pass + lib.GxB_Context_free(gb_obj) + + def engage(self): + if self._prev_context is None and (context := threadlocal.context) is not self: + self._prev_context = context + check_status(lib.GxB_Context_engage(self._carg), self) + threadlocal.context = self + + def _engage(self): + """Like engage, but don't set to threadlocal.context. + + This is useful if you want to disengage when the object is deleted by going out of scope. + """ + if self._prev_context is None and (context := threadlocal.context) is not self: + self._prev_context = context + check_status(lib.GxB_Context_engage(self._carg), self) + + def disengage(self): + prev_context = self._prev_context + self._prev_context = None + if threadlocal.context is self: + if prev_context is not None: + threadlocal.context = prev_context + prev_context.engage() + else: + threadlocal.context = global_context + check_status(lib.GxB_Context_disengage(self._carg), self) + elif prev_context is not None and threadlocal.context is prev_context: + prev_context.engage() + else: + check_status(lib.GxB_Context_disengage(self._carg), self) + + def __enter__(self): + self.engage() + + def __exit__(self, exc_type, exc, exc_tb): + self.disengage() + + @property + def _context(self): + return self + + @_context.setter + def _context(self, val): + if val is not None and val is not self: + raise AttributeError("'_context' attribute is read-only") + + +class GlobalContext(Context): + @property + def _carg(self): + return self.gb_obj + + def __del__(self): # pragma: no cover (safety) + pass + + +global_context = GlobalContext._from_obj(lib.GxB_CONTEXT_WORLD) + + +class ThreadLocal(threading.local): + """Hold the active context for the current thread.""" + + context = global_context + + +threadlocal = ThreadLocal() diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index 43553f5ea..52c43b95d 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -1,6 +1,7 @@ from ...exceptions import check_status, check_status_carg from .. import ffi, lib from ..descriptor import Descriptor +from . import _IS_SSGB7 from .config import BaseConfig ffi_new = ffi.new @@ -18,6 +19,8 @@ class _DescriptorConfig(BaseConfig): _get_function = "GxB_Desc_get" _set_function = "GxB_Desc_set" + if not _IS_SSGB7: + _context_keys = {"chunk", "gpu_id", "nthreads"} _options = { # GrB "output_replace": (lib.GrB_OUTP, "GrB_Desc_Value"), @@ -26,13 +29,25 @@ class _DescriptorConfig(BaseConfig): "transpose_first": (lib.GrB_INP0, "GrB_Desc_Value"), "transpose_second": (lib.GrB_INP1, "GrB_Desc_Value"), # GxB - "nthreads": (lib.GxB_DESCRIPTOR_NTHREADS, "int"), - "chunk": (lib.GxB_DESCRIPTOR_CHUNK, "double"), "axb_method": (lib.GxB_AxB_METHOD, "GrB_Desc_Value"), "sort": (lib.GxB_SORT, "int"), "secure_import": (lib.GxB_IMPORT, "int"), - # "gpu_control": (GxB_DESCRIPTOR_GPU_CONTROL, "GrB_Desc_Value"), # Coming soon... } + if _IS_SSGB7: + _options.update( + { + "nthreads": (lib.GxB_DESCRIPTOR_NTHREADS, "int"), + "chunk": (lib.GxB_DESCRIPTOR_CHUNK, "double"), + } + ) + else: + _options.update( + { + "chunk": (lib.GxB_CONTEXT_CHUNK, "double"), + "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"), + "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"), + } + ) _enumerations = { # GrB "output_replace": { @@ -71,10 +86,6 @@ class _DescriptorConfig(BaseConfig): False: False, True: lib.GxB_SORT, }, - # "gpu_control": { # Coming soon... - # "always": lib.GxB_GPU_ALWAYS, - # "never": lib.GxB_GPU_NEVER, - # }, } _defaults = { # GrB @@ -90,6 +101,8 @@ class _DescriptorConfig(BaseConfig): "sort": False, "secure_import": False, } + if not _IS_SSGB7: + _defaults["gpu_id"] = -1 def __init__(self): gb_obj = ffi_new("GrB_Descriptor*") diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py new file mode 100644 index 000000000..d2eb5b416 --- /dev/null +++ b/graphblas/core/ss/dtypes.py @@ -0,0 +1,88 @@ +import numpy as np + +from ... import backend, core, dtypes +from ...exceptions import check_status_carg +from .. import _has_numba, ffi, lib +from . import _IS_SSGB7 + +ffi_new = ffi.new +if _has_numba: + import numba + from cffi import FFI + from numba.core.typing import cffi_utils + + jit_ffi = FFI() + + +def register_new(name, jit_c_definition, *, np_type=None): + if backend != "suitesparse": # pragma: no cover (safety) + raise RuntimeError( + "`gb.dtypes.ss.register_new` invalid when not using 'suitesparse' backend" + ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) + if not name.isidentifier(): + raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}") + if name in core.dtypes._registry or hasattr(dtypes.ss, name): + raise ValueError(f"{name!r} name for dtype is unavailable") + if len(name) > lib.GxB_MAX_NAME_LEN: + raise ValueError( + f"`name` argument is too large. Max size is {lib.GxB_MAX_NAME_LEN}; got {len(name)}" + ) + if name not in jit_c_definition: + raise ValueError("`name` argument must be same name as the typedef in `jit_c_definition`") + if "struct" not in jit_c_definition: + raise ValueError("Only struct typedefs are currently allowed for JIT dtypes") + + gb_obj = ffi.new("GrB_Type*") + status = lib.GxB_Type_new( + gb_obj, 0, ffi_new("char[]", name.encode()), ffi_new("char[]", jit_c_definition.encode()) + ) + check_status_carg(status, "Type", gb_obj[0]) + + # Let SuiteSparse:GraphBLAS determine the size (we gave 0 as size above) + size_ptr = ffi_new("size_t*") + check_status_carg(lib.GxB_Type_size(size_ptr, gb_obj[0]), "Type", gb_obj[0]) + size = size_ptr[0] + + save_np_type = True + if np_type is None and _has_numba and numba.__version__[:5] > "0.56.": + jit_ffi.cdef(jit_c_definition) + numba_type = cffi_utils.map_type(jit_ffi.typeof(name), use_record_dtype=True) + np_type = numba_type.dtype + if np_type.itemsize != size: # pragma: no cover + raise RuntimeError( + "Size of compiled user-defined type does not match size of inferred numpy type: " + f"{size} != {np_type.itemsize} != {size}.\n\n" + f"UDT C definition: {jit_c_definition}\n" + f"numpy dtype: {np_type}\n\n" + "To get around this, you may pass `np_type=` keyword argument." + ) + else: + if np_type is not None: + np_type = np.dtype(np_type) + else: + # Not an ideal numpy type, but minimally useful + np_type = np.dtype((np.uint8, size)) + save_np_type = False + if _has_numba: + numba_type = numba.typeof(np_type).dtype + else: + numba_type = None + + # For now, let's use "opaque" unsigned bytes for the c type. + rv = core.dtypes.DataType(name, gb_obj, None, f"uint8_t[{size}]", numba_type, np_type) + core.dtypes._registry[gb_obj] = rv + if save_np_type or np_type not in core.dtypes._registry: + core.dtypes._registry[np_type] = rv + if numba_type is not None and (save_np_type or numba_type not in core.dtypes._registry): + core.dtypes._registry[numba_type] = rv + core.dtypes._registry[numba_type.name] = rv + setattr(dtypes.ss, name, rv) + return rv diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py new file mode 100644 index 000000000..c0f185737 --- /dev/null +++ b/graphblas/core/ss/indexunary.py @@ -0,0 +1,77 @@ +from ... import backend +from ...dtypes import BOOL, lookup_dtype +from ...exceptions import check_status_carg +from .. import NULL, ffi, lib +from ..operator.base import TypedOpBase +from ..operator.indexunary import IndexUnaryOp, TypedUserIndexUnaryOp +from . import _IS_SSGB7 + +ffi_new = ffi.new + + +class TypedJitIndexUnaryOp(TypedOpBase): + __slots__ = "_jit_c_definition" + opclass = "IndexUnaryOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None): + super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2) + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + __call__ = TypedUserIndexUnaryOp.__call__ + + +def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): + if backend != "suitesparse": # pragma: no cover (safety) + raise RuntimeError( + "`gb.indexunary.ss.register_new` invalid when not using 'suitesparse' backend" + ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) + input_type = lookup_dtype(input_type) + thunk_type = lookup_dtype(thunk_type) + ret_type = lookup_dtype(ret_type) + name = name if name.startswith("ss.") else f"ss.{name}" + module, funcname = IndexUnaryOp._remove_nesting(name) + + rv = IndexUnaryOp(name) + gb_obj = ffi_new("GrB_IndexUnaryOp*") + check_status_carg( + lib.GxB_IndexUnaryOp_new( + gb_obj, + NULL, + ret_type._carg, + input_type._carg, + thunk_type._carg, + ffi_new("char[]", funcname.encode()), + ffi_new("char[]", jit_c_definition.encode()), + ), + "IndexUnaryOp", + gb_obj[0], + ) + op = TypedJitIndexUnaryOp( + rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type + ) + rv._add(op) + if ret_type == BOOL: + from ..operator.select import SelectOp + from .select import TypedJitSelectOp + + select_module, funcname = SelectOp._remove_nesting(name, strict=False) + selectop = SelectOp(name) + op2 = TypedJitSelectOp( + rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type + ) + selectop._add(op2) + setattr(select_module, funcname, selectop) + setattr(module, funcname, rv) + return rv diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py new file mode 100644 index 000000000..37c352b67 --- /dev/null +++ b/graphblas/core/ss/select.py @@ -0,0 +1,45 @@ +from ... import backend, indexunary +from ...dtypes import BOOL, lookup_dtype +from .. import ffi +from ..operator.base import TypedOpBase +from ..operator.select import SelectOp, TypedUserSelectOp +from . import _IS_SSGB7 + +ffi_new = ffi.new + + +class TypedJitSelectOp(TypedOpBase): + __slots__ = "_jit_c_definition" + opclass = "SelectOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None): + super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2) + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + __call__ = TypedUserSelectOp.__call__ + + +def register_new(name, jit_c_definition, input_type, thunk_type): + if backend != "suitesparse": # pragma: no cover (safety) + raise RuntimeError( + "`gb.select.ss.register_new` invalid when not using 'suitesparse' backend" + ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) + input_type = lookup_dtype(input_type) + thunk_type = lookup_dtype(thunk_type) + name = name if name.startswith("ss.") else f"ss.{name}" + # Register to both `gb.indexunary.ss` and `gb.select.ss.` + indexunary.ss.register_new(name, jit_c_definition, input_type, thunk_type, BOOL) + module, funcname = SelectOp._remove_nesting(name, strict=False) + return getattr(module, funcname) diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py new file mode 100644 index 000000000..97c4614c0 --- /dev/null +++ b/graphblas/core/ss/unary.py @@ -0,0 +1,62 @@ +from ... import backend +from ...dtypes import lookup_dtype +from ...exceptions import check_status_carg +from .. import NULL, ffi, lib +from ..operator.base import TypedOpBase +from ..operator.unary import TypedUserUnaryOp, UnaryOp +from . import _IS_SSGB7 + +ffi_new = ffi.new + + +class TypedJitUnaryOp(TypedOpBase): + __slots__ = "_jit_c_definition" + opclass = "UnaryOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition): + super().__init__(parent, name, type_, return_type, gb_obj, name) + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + __call__ = TypedUserUnaryOp.__call__ + + +def register_new(name, jit_c_definition, input_type, ret_type): + if backend != "suitesparse": # pragma: no cover (safety) + raise RuntimeError( + "`gb.unary.ss.register_new` invalid when not using 'suitesparse' backend" + ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) + input_type = lookup_dtype(input_type) + ret_type = lookup_dtype(ret_type) + name = name if name.startswith("ss.") else f"ss.{name}" + module, funcname = UnaryOp._remove_nesting(name) + + rv = UnaryOp(name) + gb_obj = ffi_new("GrB_UnaryOp*") + check_status_carg( + lib.GxB_UnaryOp_new( + gb_obj, + NULL, + ret_type._carg, + input_type._carg, + ffi_new("char[]", funcname.encode()), + ffi_new("char[]", jit_c_definition.encode()), + ), + "UnaryOp", + gb_obj[0], + ) + op = TypedJitUnaryOp(rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition) + rv._add(op) + setattr(module, funcname, rv) + return rv diff --git a/graphblas/dtypes/ss.py b/graphblas/dtypes/ss.py index e69de29bb..9f6083e01 100644 --- a/graphblas/dtypes/ss.py +++ b/graphblas/dtypes/ss.py @@ -0,0 +1 @@ +from ..core.ss.dtypes import register_new # noqa: F401 diff --git a/graphblas/indexunary/ss.py b/graphblas/indexunary/ss.py index 97852fc12..58218df6f 100644 --- a/graphblas/indexunary/ss.py +++ b/graphblas/indexunary/ss.py @@ -1,4 +1,5 @@ from ..core import operator +from ..core.ss.indexunary import register_new # noqa: F401 _delayed = {} diff --git a/graphblas/select/ss.py b/graphblas/select/ss.py index 97852fc12..173067382 100644 --- a/graphblas/select/ss.py +++ b/graphblas/select/ss.py @@ -1,4 +1,5 @@ from ..core import operator +from ..core.ss.select import register_new # noqa: F401 _delayed = {} diff --git a/graphblas/ss/__init__.py b/graphblas/ss/__init__.py index b36bc1bdc..b723d9cb8 100644 --- a/graphblas/ss/__init__.py +++ b/graphblas/ss/__init__.py @@ -1 +1,5 @@ -from ._core import about, concat, config, diag +from ._core import _IS_SSGB7, about, concat, config, diag + +if not _IS_SSGB7: + # Context was introduced in SuiteSparse:GraphBLAS 8.0 + from ..core.ss.context import Context, global_context diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index 53287f1a5..2639a7709 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -5,6 +5,7 @@ from ..core.descriptor import lookup as descriptor_lookup from ..core.matrix import Matrix, TransposedMatrix from ..core.scalar import _as_scalar +from ..core.ss import _IS_SSGB7 from ..core.ss.config import BaseConfig from ..core.ss.matrix import _concat_mn from ..core.vector import Vector @@ -126,13 +127,23 @@ class GlobalConfig(BaseConfig): Enable diagnostic printing from SuiteSparse:GraphBLAS print_1based : bool gpu_control : str, {"always", "never"} + Only available for SuiteSparse:GraphBLAS 7 + **GPU support is a work in progress--not recommended to use** gpu_chunk : double + Only available for SuiteSparse:GraphBLAS 7 + **GPU support is a work in progress--not recommended to use** + gpu_id : int + Which GPU to use; default is -1, which means do not run on the GPU. + Only available for SuiteSparse:GraphBLAS 8 + **GPU support is a work in progress--not recommended to use** Setting values to None restores the default value for most configurations. """ _get_function = "GxB_Global_Option_get" _set_function = "GxB_Global_Option_set" + if not _IS_SSGB7: + _context_keys = {"chunk", "gpu_id", "nthreads"} _null_valid = {"bitmap_switch"} _options = { # Matrix/Vector format @@ -147,10 +158,32 @@ class GlobalConfig(BaseConfig): # Diagnostics (skipping "printf" and "flush" for now) "burble": (lib.GxB_BURBLE, "bool"), "print_1based": (lib.GxB_PRINT_1BASED, "bool"), - # CUDA GPU control - "gpu_control": (lib.GxB_GLOBAL_GPU_CONTROL, "GrB_Desc_Value"), - "gpu_chunk": (lib.GxB_GLOBAL_GPU_CHUNK, "double"), } + if _IS_SSGB7: + _options.update( + { + "gpu_control": (lib.GxB_GLOBAL_GPU_CONTROL, "GrB_Desc_Value"), + "gpu_chunk": (lib.GxB_GLOBAL_GPU_CHUNK, "double"), + } + ) + else: + _options.update( + { + # JIT control + "jit_c_control": (lib.GxB_JIT_C_CONTROL, "int"), + "jit_use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"), + "jit_c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"), + "jit_c_compiler_flags": (lib.GxB_JIT_C_COMPILER_FLAGS, "char*"), + "jit_c_linker_flags": (lib.GxB_JIT_C_LINKER_FLAGS, "char*"), + "jit_c_libraries": (lib.GxB_JIT_C_LIBRARIES, "char*"), + "jit_c_cmake_libs": (lib.GxB_JIT_C_CMAKE_LIBS, "char*"), + "jit_c_preface": (lib.GxB_JIT_C_PREFACE, "char*"), + "jit_error_log": (lib.GxB_JIT_ERROR_LOG, "char*"), + "jit_cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"), + # CUDA GPU control + "gpu_id": (lib.GxB_GLOBAL_GPU_ID, "int"), + } + ) # Values to restore defaults _defaults = { "hyper_switch": lib.GxB_HYPER_DEFAULT, @@ -161,17 +194,28 @@ class GlobalConfig(BaseConfig): "burble": 0, "print_1based": 0, } + if not _IS_SSGB7: + _defaults["gpu_id"] = -1 # -1 means no GPU _enumerations = { "format": { "by_row": lib.GxB_BY_ROW, "by_col": lib.GxB_BY_COL, # "no_format": lib.GxB_NO_FORMAT, # Used by iterators; not valid here }, - "gpu_control": { + } + if _IS_SSGB7: + _enumerations["gpu_control"] = { "always": lib.GxB_GPU_ALWAYS, "never": lib.GxB_GPU_NEVER, - }, - } + } + else: + _enumerations["jit_c_control"] = { + "off": lib.GxB_JIT_OFF, + "pause": lib.GxB_JIT_PAUSE, + "run": lib.GxB_JIT_RUN, + "load": lib.GxB_JIT_LOAD, + "on": lib.GxB_JIT_ON, + } class About(Mapping): @@ -258,4 +302,10 @@ def __len__(self): about = About() -config = GlobalConfig() +if _IS_SSGB7: + config = GlobalConfig() +else: + # Context was introduced in SuiteSparse:GraphBLAS 8.0 + from ..core.ss.context import global_context + + config = GlobalConfig(context=global_context) diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py new file mode 100644 index 000000000..57cb2bbba --- /dev/null +++ b/graphblas/tests/test_ssjit.py @@ -0,0 +1,269 @@ +import os +import sys + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +import graphblas as gb +from graphblas import backend, binary, dtypes, indexunary, select, unary +from graphblas.core import _supports_udfs as supports_udfs +from graphblas.core.ss import _IS_SSGB7 + +from .conftest import autocompute, burble + +from graphblas import Vector # isort:skip (for dask-graphblas) + +try: + import numba +except ImportError: + numba = None + +if backend != "suitesparse": + pytest.skip("not suitesparse backend", allow_module_level=True) + + +@pytest.fixture(scope="module", autouse=True) +def _setup_jit(): + # Configuration values below were obtained from the output of the JIT config + # in CI, but with paths changed to use `{conda_prefix}` where appropriate. + if "CONDA_PREFIX" not in os.environ or _IS_SSGB7: + return + conda_prefix = os.environ["CONDA_PREFIX"] + gb.ss.config["jit_c_control"] = "on" + if sys.platform == "linux": + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/x86_64-conda-linux-gnu-cc" + gb.ss.config["jit_c_compiler_flags"] = ( + "-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong " + f"-fno-plt -O2 -ffunction-sections -pipe -isystem {conda_prefix}/include -Wundef " + "-std=c11 -lm -Wno-pragmas -fexcess-precision=fast -fcx-limited-range " + "-fno-math-errno -fwrapv -O3 -DNDEBUG -fopenmp -fPIC" + ) + gb.ss.config["jit_c_linker_flags"] = ( + "-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now " + "-Wl,--disable-new-dtags -Wl,--gc-sections -Wl,--allow-shlib-undefined " + f"-Wl,-rpath,{conda_prefix}/lib -Wl,-rpath-link,{conda_prefix}/lib " + f"-L{conda_prefix}/lib -shared" + ) + gb.ss.config["jit_c_libraries"] = ( + f"-lm -ldl {conda_prefix}/lib/libgomp.so " + f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" + ) + gb.ss.config["jit_c_cmake_libs"] = ( + f"m;dl;{conda_prefix}/lib/libgomp.so;" + f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" + ) + elif sys.platform == "darwin": + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/clang" + gb.ss.config["jit_c_compiler_flags"] = ( + "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " + f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " + "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64" + ) + gb.ss.config["jit_c_linker_flags"] = ( + "-Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs " + f"-Wl,-rpath,{conda_prefix}/lib -L{conda_prefix}/lib -dynamiclib" + ) + gb.ss.config["jit_c_libraries"] = f"-lm -ldl {conda_prefix}/lib/libomp.dylib" + gb.ss.config["jit_c_cmake_libs"] = f"m;dl;{conda_prefix}/lib/libomp.dylib" + elif sys.platform == "win32": # pragma: no branch (sanity) + if "mingw" in gb.ss.config["jit_c_libraries"]: + # This probably means we're testing a `python-suitesparse-graphblas` wheel + # in a conda environment. This is not yet working. + gb.ss.config["jit_c_control"] = "off" + return + + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + gb.ss.config["jit_c_compiler_flags"] = ( + '/DWIN32 /D_WINDOWS -DGBNCPUFEAT /O2 -wd"4244" -wd"4146" -wd"4018" ' + '-wd"4996" -wd"4047" -wd"4554" /O2 /Ob2 /DNDEBUG -openmp' + ) + gb.ss.config["jit_c_linker_flags"] = "/machine:x64" + gb.ss.config["jit_c_libraries"] = "" + gb.ss.config["jit_c_cmake_libs"] = "" + + +@pytest.fixture +def v(): + return Vector.from_coo([1, 3, 4, 6], [1, 1, 2, 0]) + + +@autocompute +def test_jit_udt(): + if _IS_SSGB7: + with pytest.raises(RuntimeError, match="JIT was added"): + dtypes.ss.register_new( + "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" + ) + return + if gb.ss.config["jit_c_control"] == "off": + return + with burble(): + dtype = dtypes.ss.register_new( + "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" + ) + assert not hasattr(dtypes, "myquaternion") + assert dtypes.ss.myquaternion is dtype + assert dtype.name == "myquaternion" + assert str(dtype) == "myquaternion" + assert dtype.gb_name is None + v = Vector(dtype, 2) + np_type = np.dtype([("x", "=1.25.0' -conda search 'pandas[channel=conda-forge]>=2.0.2' -conda search 'scipy[channel=conda-forge]>=1.11.0' +conda search 'pandas[channel=conda-forge]>=2.0.3' +conda search 'scipy[channel=conda-forge]>=1.11.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.2.4' +conda search 'awkward[channel=conda-forge]>=2.3.0' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' conda search 'numba[channel=conda-forge]>=0.57.1' From cdd9bb40a008d8be79ebb0dd5d415f39eef969b5 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 16 Jul 2023 10:59:57 -0500 Subject: [PATCH 17/66] Add pyopensci badge to README (#482) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 3756fbb0c..4581ef54a 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ [![Tests](https://github.com/python-graphblas/python-graphblas/workflows/Tests/badge.svg?branch=main)](https://github.com/python-graphblas/python-graphblas/actions) [![Docs](https://readthedocs.org/projects/python-graphblas/badge/?version=latest)](https://python-graphblas.readthedocs.io/en/latest/) [![Coverage](https://coveralls.io/repos/python-graphblas/python-graphblas/badge.svg?branch=main)](https://coveralls.io/r/python-graphblas/python-graphblas) +[![pyOpenSci](https://tinyurl.com/y22nb8up)](https://github.com/pyOpenSci/software-review/issues/81)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7328791.svg)](https://doi.org/10.5281/zenodo.7328791) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/python-graphblas/python-graphblas/HEAD?filepath=notebooks%2FIntro%20to%20GraphBLAS%20%2B%20SSSP%20example.ipynb) From 8bd80f6e6c9fa60da31347bc3de8cdab4a2a35a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 13:37:44 -0500 Subject: [PATCH 18/66] Bump pypa/gh-action-pypi-publish from 1.8.7 to 1.8.8 (#484) Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.7 to 1.8.8. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.8.7...v1.8.8) --- .github/workflows/publish_pypi.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index ffac645f5..cbe403724 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -35,7 +35,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.7 + uses: pypa/gh-action-pypi-publish@v1.8.8 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From ae0366ad72ed2d24c6d8990c728945e0266b6390 Mon Sep 17 00:00:00 2001 From: Paul Nguyen Date: Wed, 26 Jul 2023 09:39:57 -0500 Subject: [PATCH 19/66] Removed deprecated draw (#485) --- docs/api_reference/io.rst | 2 +- graphblas/io/__init__.py | 1 - graphblas/io/_viz.py | 21 --------------------- 3 files changed, 1 insertion(+), 23 deletions(-) delete mode 100644 graphblas/io/_viz.py diff --git a/docs/api_reference/io.rst b/docs/api_reference/io.rst index e8f1748fd..cd6057a31 100644 --- a/docs/api_reference/io.rst +++ b/docs/api_reference/io.rst @@ -69,4 +69,4 @@ format of the ``graphblas`` object (this attributed is used by the Visualization ~~~~~~~~~~~~~ -.. autofunction:: graphblas.io.draw +.. autofunction:: graphblas.viz.draw diff --git a/graphblas/io/__init__.py b/graphblas/io/__init__.py index 0eafd45c8..b21b20963 100644 --- a/graphblas/io/__init__.py +++ b/graphblas/io/__init__.py @@ -4,4 +4,3 @@ from ._numpy import from_numpy, to_numpy # deprecated from ._scipy import from_scipy_sparse, to_scipy_sparse from ._sparse import from_pydata_sparse, to_pydata_sparse -from ._viz import draw # deprecated diff --git a/graphblas/io/_viz.py b/graphblas/io/_viz.py deleted file mode 100644 index 19211573f..000000000 --- a/graphblas/io/_viz.py +++ /dev/null @@ -1,21 +0,0 @@ -from warnings import warn - - -def draw(m): # pragma: no cover (deprecated) - """Draw a square adjacency Matrix as a graph. - - Requires `networkx `_ and - `matplotlib `_ to be installed. - - Example output: - - .. image:: /_static/img/draw-example.png - """ - from .. import viz - - warn( - "`graphblas.io.draw` is deprecated; it has been moved to `graphblas.viz.draw`", - DeprecationWarning, - stacklevel=2, - ) - viz.draw(m) From 347673157ee8afcb6abd9dff2bdca41c0df7560f Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 26 Jul 2023 11:40:20 -0500 Subject: [PATCH 20/66] Allow `__index__` only for integral dtypes on Scalars (#481) --- .pre-commit-config.yaml | 14 ++++++------- graphblas/core/scalar.py | 8 ++++++-- graphblas/core/ss/config.py | 7 +++---- graphblas/core/utils.py | 36 ++++++++++++++++++++++------------ graphblas/dtypes/__init__.py | 3 +++ graphblas/tests/test_scalar.py | 2 ++ scripts/check_versions.sh | 4 ++-- 7 files changed, 46 insertions(+), 28 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 726538e16..b8d767f05 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ ci: # See: https://pre-commit.ci/#configuration autofix_prs: false - autoupdate_schedule: monthly + autoupdate_schedule: quarterly autoupdate_commit_msg: "chore: update pre-commit hooks" autofix_commit_msg: "style: pre-commit fixes" skip: [pylint, no-commit-to-branch] @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.8.0 + rev: v3.9.0 hooks: - id: pyupgrade args: [--py38-plus] @@ -61,12 +61,12 @@ repos: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 23.7.0 hooks: - id: black - id: black-jupyter - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.277 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.278 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -93,8 +93,8 @@ repos: types_or: [python, rst, markdown] additional_dependencies: [tomli] files: ^(graphblas|docs)/ - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.277 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.278 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index b55d601af..8a95e1d71 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -3,7 +3,7 @@ import numpy as np from .. import backend, binary, config, monoid -from ..dtypes import _INDEX, FP64, lookup_dtype, unify +from ..dtypes import _INDEX, FP64, _index_dtypes, lookup_dtype, unify from ..exceptions import EmptyObject, check_status from . import _has_numba, _supports_udfs, automethods, ffi, lib, utils from .base import BaseExpression, BaseType, call @@ -158,7 +158,11 @@ def __int__(self): def __complex__(self): return complex(self.value) - __index__ = __int__ + @property + def __index__(self): + if self.dtype in _index_dtypes: + return self.__int__ + raise AttributeError("Scalar object only has `__index__` for integral dtypes") def __array__(self, dtype=None): if dtype is None: diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py index 433716bb3..20cf318e8 100644 --- a/graphblas/core/ss/config.py +++ b/graphblas/core/ss/config.py @@ -1,10 +1,9 @@ from collections.abc import MutableMapping -from numbers import Integral from ...dtypes import lookup_dtype from ...exceptions import _error_code_lookup, check_status from .. import NULL, ffi, lib -from ..utils import values_to_numpy_buffer +from ..utils import maybe_integral, values_to_numpy_buffer class BaseConfig(MutableMapping): @@ -147,8 +146,8 @@ def __setitem__(self, key, val): bitwise = self._bitwise[key] if isinstance(val, str): val = bitwise[val.lower()] - elif isinstance(val, Integral): - val = bitwise.get(val, val) + elif (x := maybe_integral(val)) is not None: + val = bitwise.get(x, x) else: bits = 0 for x in val: diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 74e03f2f9..7bb1a1fb0 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -1,4 +1,4 @@ -from numbers import Integral, Number +from operator import index import numpy as np @@ -158,6 +158,17 @@ def get_order(order): ) +def maybe_integral(val): + """Ensure ``val`` is an integer or return None if it's not.""" + try: + return index(val) + except TypeError: + pass + if isinstance(val, float) and val.is_integer(): + return int(val) + return None + + def normalize_chunks(chunks, shape): """Normalize chunks argument for use by ``Matrix.ss.split``. @@ -175,8 +186,8 @@ def normalize_chunks(chunks, shape): """ if isinstance(chunks, (list, tuple)): pass - elif isinstance(chunks, Number): - chunks = (chunks,) * len(shape) + elif (chunk := maybe_integral(chunks)) is not None: + chunks = (chunk,) * len(shape) elif isinstance(chunks, np.ndarray): chunks = chunks.tolist() else: @@ -192,22 +203,21 @@ def normalize_chunks(chunks, shape): for size, chunk in zip(shape, chunks): if chunk is None: cur_chunks = [size] - elif isinstance(chunk, Integral) or isinstance(chunk, float) and chunk.is_integer(): - chunk = int(chunk) - if chunk < 0: - raise ValueError(f"Chunksize must be greater than 0; got: {chunk}") - div, mod = divmod(size, chunk) - cur_chunks = [chunk] * div + elif (c := maybe_integral(chunk)) is not None: + if c < 0: + raise ValueError(f"Chunksize must be greater than 0; got: {c}") + div, mod = divmod(size, c) + cur_chunks = [c] * div if mod: cur_chunks.append(mod) elif isinstance(chunk, (list, tuple)): cur_chunks = [] none_index = None for c in chunk: - if isinstance(c, Integral) or isinstance(c, float) and c.is_integer(): - c = int(c) - if c < 0: - raise ValueError(f"Chunksize must be greater than 0; got: {c}") + if (val := maybe_integral(c)) is not None: + if val < 0: + raise ValueError(f"Chunksize must be greater than 0; got: {val}") + c = val elif c is None: if none_index is not None: raise TypeError( diff --git a/graphblas/dtypes/__init__.py b/graphblas/dtypes/__init__.py index 49e46d787..f9c144f13 100644 --- a/graphblas/dtypes/__init__.py +++ b/graphblas/dtypes/__init__.py @@ -41,3 +41,6 @@ def __getattr__(key): globals()["ss"] = ss return ss raise AttributeError(f"module {__name__!r} has no attribute {key!r}") + + +_index_dtypes = {BOOL, INT8, UINT8, INT16, UINT16, INT32, UINT32, INT64, UINT64, _INDEX} diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py index 7b7c77177..cf4c6fd41 100644 --- a/graphblas/tests/test_scalar.py +++ b/graphblas/tests/test_scalar.py @@ -132,6 +132,8 @@ def test_casting(s): assert float(s) == 5.0 assert type(float(s)) is float assert range(s) == range(5) + with pytest.raises(AttributeError, match="Scalar .* only .*__index__.*integral"): + range(s.dup(float)) assert complex(s) == complex(5) assert type(complex(s)) is complex diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index ef1a76135..263b1d8f7 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,11 +3,11 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'numpy[channel=conda-forge]>=1.25.0' +conda search 'numpy[channel=conda-forge]>=1.25.1' conda search 'pandas[channel=conda-forge]>=2.0.3' conda search 'scipy[channel=conda-forge]>=1.11.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.3.0' +conda search 'awkward[channel=conda-forge]>=2.3.1' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' conda search 'numba[channel=conda-forge]>=0.57.1' From 8e42ded1b9b4965ad413073e348a9cdab7c58a3b Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 26 Jul 2023 17:39:08 -0500 Subject: [PATCH 21/66] Add `matrix.power` to compute e.g. `A @ A @ A @ ...` (#483) --- .github/workflows/test_and_build.yml | 2 +- .pre-commit-config.yaml | 6 +- graphblas/core/automethods.py | 5 ++ graphblas/core/infix.py | 1 + graphblas/core/matrix.py | 122 ++++++++++++++++++++++++++- graphblas/tests/test_matrix.py | 31 +++++++ graphblas/tests/test_vector.py | 4 +- scripts/check_versions.sh | 2 +- 8 files changed, 164 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index d93b4c25c..4c1c0e312 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -430,7 +430,7 @@ jobs: id: coverageAttempt3 if: steps.coverageAttempt2.outcome == 'failure' # Continue even if it failed 3 times... (sheesh! use codecov instead) - continue-on-error: false + continue-on-error: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b8d767f05..fef625a70 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.278 + rev: v0.0.280 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -79,7 +79,7 @@ repos: additional_dependencies: &flake8_dependencies # These versions need updated manually - flake8==6.0.0 - - flake8-bugbear==23.6.5 + - flake8-bugbear==23.7.10 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa rev: v1.5.0 @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.278 + rev: v0.0.280 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py index 937e331fd..0a2aa208a 100644 --- a/graphblas/core/automethods.py +++ b/graphblas/core/automethods.py @@ -213,6 +213,10 @@ def outer(self): return self._get_value("outer") +def power(self): + return self._get_value("power") + + def reduce(self): return self._get_value("reduce") @@ -410,6 +414,7 @@ def _main(): "kronecker", "mxm", "mxv", + "power", "reduce_columnwise", "reduce_rowwise", "reduce_scalar", diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py index bd1d10a92..88fc52dbe 100644 --- a/graphblas/core/infix.py +++ b/graphblas/core/infix.py @@ -330,6 +330,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv)) name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name) nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals)) + power = wrapdoc(Matrix.power)(property(automethods.power)) reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise)) reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise)) reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar)) diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 4696d8ead..d820ca424 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -28,6 +28,7 @@ class_property, get_order, ints_to_numpy_buffer, + maybe_integral, normalize_values, output_type, values_to_numpy_buffer, @@ -91,6 +92,68 @@ def _reposition(updater, indices, chunk): updater[indices] = chunk +def _power(updater, A, n, op): + opts = updater.opts + if n == 1: + updater << A + return + # Use repeated squaring: compute A^2, A^4, A^8, etc., and combine terms as needed. + # See `numpy.linalg.matrix_power` for a simpler implementation to understand how this works. + # We reuse `result` and `square` outputs, and use `square_expr` so masks can be applied. + result = square = square_expr = None + n, bit = divmod(n, 2) + while True: + if bit != 0: + # Need to multiply `square_expr` or `A` into the result + if square_expr is not None: + # Need to evaluate `square_expr`; either into final result, or into `square` + if n == 0 and result is None: + # Handle `updater << A @ A` without an intermediate value + updater << square_expr + return + if square is None: + # Create `square = A @ A` + square = square_expr.new(name="Squares", **opts) + else: + # Compute `square << square @ square` + square(**opts) << square_expr + square_expr = None + if result is None: + # First time needing the intermediate result! + if square is None: + # Use `A` if possible to avoid unnecessary copying + # We will detect and handle `result is A` below + result = A + else: + # Copy square as intermediate result + result = square.dup(name="Power", **opts) + elif n == 0: + # All done! No more terms to compute + updater << op(result @ square) + return + elif result is A: + # Now we need to create a new matrix for the intermediate result + result = op(result @ square).new(name="Power", **opts) + else: + # Main branch: multiply `square` into `result` + result(**opts) << op(result @ square) + n, bit = divmod(n, 2) + if square_expr is not None: + # We need to perform another squaring, so evaluate current `square_expr` first + if square is None: + # Create `square` + square = square_expr.new(name="Squares", **opts) + else: + # Compute `square` + square << square_expr + if square is None: + # First iteration! Create expression for first square + square_expr = op(A @ A) + else: + # Expression for repeated squaring + square_expr = op(square @ square) + + class Matrix(BaseType): """Create a new GraphBLAS Sparse Matrix. @@ -155,8 +218,6 @@ def _as_vector(self, *, name=None): This is SuiteSparse-specific and may change in the future. This does not copy the matrix. """ - from .vector import Vector - if self._ncols != 1: raise ValueError( f"Matrix must have a single column (not {self._ncols}) to be cast to a Vector" @@ -2690,6 +2751,60 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None): dtype=self.dtype, ) + def power(self, n, op=semiring.plus_times): + """Raise a square Matrix to the (positive integer) power ``n``. + + Matrix power is computed by repeated matrix squaring and matrix multiplication. + For a graph as an adjacency matrix, matrix power with default ``plus_times`` + semiring computes the number of walks connecting each pair of nodes. + The result can grow very quickly for large matrices and with larger ``n``. + + Parameters + ---------- + n : int + The exponent must be a positive integer. + op : :class:`~graphblas.core.operator.Semiring` + Semiring used in the computation + + Returns + ------- + MatrixExpression + + Examples + -------- + .. code-block:: python + + C << A.power(4, op=semiring.plus_times) + + # Is equivalent to: + tmp = (A @ A).new() + tmp << tmp @ tmp + C << tmp @ tmp + + # And is more efficient than the naive implementation: + C = A.dup() + for i in range(1, 4): + C << A @ C + """ + method_name = "power" + if self._nrows != self._ncols: + raise DimensionMismatch(f"power only works for square Matrix; shape is {self.shape}") + if (N := maybe_integral(n)) is None: + raise TypeError(f"n must be a positive integer; got bad type: {type(n)}") + if N <= 0: + raise ValueError(f"n must be a positive integer; got: {N}") + op = get_typed_op(op, self.dtype, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + return MatrixExpression( + "power", + None, + [self, _power, (self, N, op)], # [*expr_args, func, args] + expr_repr=f"{{0.name}}.power({N}, op={op})", + nrows=self._nrows, + ncols=self._ncols, + dtype=self.dtype, + ) + ################################## # Extract and Assign index methods ################################## @@ -3358,6 +3473,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv)) name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name) nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals)) + power = wrapdoc(Matrix.power)(property(automethods.power)) reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise)) reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise)) reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar)) @@ -3458,6 +3574,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv)) name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name) nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals)) + power = wrapdoc(Matrix.power)(property(automethods.power)) reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise)) reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise)) reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar)) @@ -3619,6 +3736,7 @@ def to_dicts(self, order="rowwise"): reduce_columnwise = Matrix.reduce_columnwise reduce_scalar = Matrix.reduce_scalar reposition = Matrix.reposition + power = Matrix.power # Operator sugar __or__ = Matrix.__or__ diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index bc942bc49..80a66a524 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -4375,3 +4375,34 @@ def test_subarray_dtypes(): if suitesparse: Full2 = Matrix.ss.import_fullr(b2) assert Full1.isequal(Full2, check_dtype=True) + + +def test_power(A): + expected = A.dup() + for i in range(1, 50): + result = A.power(i).new() + assert result.isequal(expected) + expected << A @ expected + # Test transpose + expected = A.T.new() + for i in range(1, 10): + result = A.T.power(i).new() + assert result.isequal(expected) + expected << A.T @ expected + # Test other semiring + expected = A.dup() + for i in range(1, 10): + result = A.power(i, semiring.min_plus).new() + assert result.isequal(expected) + expected << semiring.min_plus(A @ expected) + # Exceptional + with pytest.raises(TypeError, match="must be a positive integer"): + A.power(1.5) + with pytest.raises(ValueError, match="must be a positive integer"): + A.power(-1) + with pytest.raises(ValueError, match="must be a positive integer"): + # Not implemented yet... could create identity matrix + A.power(0) + B = A[:2, :3].new() + with pytest.raises(DimensionMismatch): + B.power(2) diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index a1aabd183..e321d3e9b 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -999,10 +999,10 @@ def test_reduce_agg_firstlast_index(v): def test_reduce_agg_empty(): v = Vector("UINT8", size=3) - for _attr, aggr in vars(agg).items(): + for attr, aggr in vars(agg).items(): if not isinstance(aggr, agg.Aggregator): continue - s = v.reduce(aggr).new() + s = v.reduce(aggr).new(name=attr) assert compute(s.value) is None diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 263b1d8f7..ffa440c22 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -12,6 +12,6 @@ conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' conda search 'numba[channel=conda-forge]>=0.57.1' conda search 'pyyaml[channel=conda-forge]>=6.0' -conda search 'flake8-bugbear[channel=conda-forge]>=23.6.5' +conda search 'flake8-bugbear[channel=conda-forge]>=23.7.10' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' # conda search 'python[channel=conda-forge]>=3.8 *pypy*' From c753947fe82f99aae2d8e19e0ce47e9f7e9b7338 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 2 Aug 2023 09:59:28 -0500 Subject: [PATCH 22/66] Remove deprecated `{scan,selectk,compactify}_{row,column}wise` (#486) --- graphblas/core/ss/matrix.py | 225 --------------------------------- graphblas/tests/test_matrix.py | 13 -- 2 files changed, 238 deletions(-) diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 56c28f52f..64914cf02 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -1,5 +1,4 @@ import itertools -import warnings import numpy as np from suitesparse_graphblas.utils import claim_buffer, claim_buffer_2d, unclaim_buffer @@ -3715,51 +3714,6 @@ def scan(self, op=monoid.plus, order="rowwise", *, name=None, **opts): parent = parent.T return prefix_scan(parent, op, name=name, within="scan", **opts) - def scan_columnwise(self, op=monoid.plus, *, name=None, **opts): - """Perform a prefix scan across columns with the given monoid. - - .. deprecated:: 2022.11.1 - ``Matrix.ss.scan_columnwise`` will be removed in a future release. - Use ``Matrix.ss.scan(order="columnwise")`` instead. - Will be removed in version 2023.7.0 or later - - For example, use ``monoid.plus`` (the default) to perform a cumulative sum, - and ``monoid.times`` for cumulative product. Works with any monoid. - - Returns - ------- - Matrix - """ - warnings.warn( - "`Matrix.ss.scan_columnwise` is deprecated; " - 'please use `Matrix.ss.scan(order="columnwise")` instead.', - DeprecationWarning, - stacklevel=2, - ) - return prefix_scan(self._parent.T, op, name=name, within="scan_columnwise", **opts) - - def scan_rowwise(self, op=monoid.plus, *, name=None, **opts): - """Perform a prefix scan across rows with the given monoid. - - .. deprecated:: 2022.11.1 - ``Matrix.ss.scan_rowwise`` will be removed in a future release. - Use ``Matrix.ss.scan`` instead. - Will be removed in version 2023.7.0 or later - - For example, use ``monoid.plus`` (the default) to perform a cumulative sum, - and ``monoid.times`` for cumulative product. Works with any monoid. - - Returns - ------- - Matrix - """ - warnings.warn( - "`Matrix.ss.scan_rowwise` is deprecated; please use `Matrix.ss.scan` instead.", - DeprecationWarning, - stacklevel=2, - ) - return prefix_scan(self._parent, op, name=name, within="scan_rowwise", **opts) - def flatten(self, order="rowwise", *, name=None, **opts): """Return a copy of the Matrix collapsed into a Vector. @@ -3901,99 +3855,6 @@ def selectk(self, how, k, order="rowwise", *, name=None): k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name ) - def selectk_rowwise(self, how, k, *, name=None): # pragma: no cover (deprecated) - """Select (up to) k elements from each row. - - .. deprecated:: 2022.11.1 - ``Matrix.ss.selectk_rowwise`` will be removed in a future release. - Use ``Matrix.ss.selectk`` instead. - Will be removed in version 2023.7.0 or later - - Parameters - ---------- - how : str - "random": choose k elements with equal probability - "first": choose the first k elements - "last": choose the last k elements - k : int - The number of elements to choose from each row - - **THIS API IS EXPERIMENTAL AND MAY CHANGE** - """ - warnings.warn( - "`Matrix.ss.selectk_rowwise` is deprecated; please use `Matrix.ss.selectk` instead.", - DeprecationWarning, - stacklevel=2, - ) - how = how.lower() - fmt = "hypercsr" - indices = "col_indices" - sort_axis = "sorted_cols" - if how == "random": - choose_func = choose_random - is_random = True - do_sort = False - elif how == "first": - choose_func = choose_first - is_random = False - do_sort = True - elif how == "last": - choose_func = choose_last - is_random = False - do_sort = True - else: - raise ValueError('`how` argument must be one of: "random", "first", "last"') - return self._select_random( - k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name - ) - - def selectk_columnwise(self, how, k, *, name=None): # pragma: no cover (deprecated) - """Select (up to) k elements from each column. - - .. deprecated:: 2022.11.1 - ``Matrix.ss.selectk_columnwise`` will be removed in a future release. - Use ``Matrix.ss.selectk(order="columnwise")`` instead. - Will be removed in version 2023.7.0 or later - - Parameters - ---------- - how : str - - "random": choose elements with equal probability - - "first": choose the first k elements - - "last": choose the last k elements - k : int - The number of elements to choose from each column - - **THIS API IS EXPERIMENTAL AND MAY CHANGE** - """ - warnings.warn( - "`Matrix.ss.selectk_columnwise` is deprecated; " - 'please use `Matrix.ss.selectk(order="columnwise")` instead.', - DeprecationWarning, - stacklevel=2, - ) - how = how.lower() - fmt = "hypercsc" - indices = "row_indices" - sort_axis = "sorted_rows" - if how == "random": - choose_func = choose_random - is_random = True - do_sort = False - elif how == "first": - choose_func = choose_first - is_random = False - do_sort = True - elif how == "last": - choose_func = choose_last - is_random = False - do_sort = True - else: - raise ValueError('`how` argument must be one of: "random", "first", "last"') - return self._select_random( - k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name - ) - def _select_random(self, k, fmt, indices, sort_axis, choose_func, is_random, do_sort, name): if k < 0: raise ValueError("negative k is not allowed") @@ -4058,92 +3919,6 @@ def compactify( indices = "row_indices" return self._compactify(how, reverse, asindex, dimname, k, fmt, indices, name) - def compactify_rowwise( - self, how="first", ncols=None, *, reverse=False, asindex=False, name=None - ): - """Shift all values to the left so all values in a row are contiguous. - - This returns a new Matrix. - - Parameters - ---------- - how : {"first", "last", "smallest", "largest", "random"}, optional - How to compress the values: - - first : take the values furthest to the left - - last : take the values furthest to the right - - smallest : take the smallest values (if tied, may take any) - - largest : take the largest values (if tied, may take any) - - random : take values randomly with equal probability and without replacement - Chosen values may not be ordered randomly - reverse : bool, default False - Reverse the values in each row when True - asindex : bool, default False - Return the column index of the value when True. If there are ties for - "smallest" and "largest", then any valid index may be returned. - ncols : int, optional - The number of columns of the returned Matrix. If not specified, then - the Matrix will be "compacted" to the smallest ncols that doesn't lose - values. - - **THIS API IS EXPERIMENTAL AND MAY CHANGE** - - See Also - -------- - Matrix.ss.sort - """ - warnings.warn( - "`Matrix.ss.compactify_rowwise` is deprecated; " - "please use `Matrix.ss.compactify` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._compactify( - how, reverse, asindex, "ncols", ncols, "hypercsr", "col_indices", name - ) - - def compactify_columnwise( - self, how="first", nrows=None, *, reverse=False, asindex=False, name=None - ): - """Shift all values to the top so all values in a column are contiguous. - - This returns a new Matrix. - - Parameters - ---------- - how : {"first", "last", "smallest", "largest", "random"}, optional - How to compress the values: - - first : take the values furthest to the top - - last : take the values furthest to the bottom - - smallest : take the smallest values (if tied, may take any) - - largest : take the largest values (if tied, may take any) - - random : take values randomly with equal probability and without replacement - Chosen values may not be ordered randomly - reverse : bool, default False - Reverse the values in each column when True - asindex : bool, default False - Return the row index of the value when True. If there are ties for - "smallest" and "largest", then any valid index may be returned. - nrows : int, optional - The number of rows of the returned Matrix. If not specified, then - the Matrix will be "compacted" to the smallest nrows that doesn't lose - values. - - **THIS API IS EXPERIMENTAL AND MAY CHANGE** - - See Also - -------- - Matrix.ss.sort - """ - warnings.warn( - "`Matrix.ss.compactify_columnwise` is deprecated; " - 'please use `Matrix.ss.compactify(order="columnwise")` instead.', - DeprecationWarning, - stacklevel=2, - ) - return self._compactify( - how, reverse, asindex, "nrows", nrows, "hypercsc", "row_indices", name - ) - def _compactify(self, how, reverse, asindex, nkey, nval, fmt, indices_name, name): how = how.lower() if how not in {"first", "last", "smallest", "largest", "random"}: diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 80a66a524..cd70479cc 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -3538,19 +3538,6 @@ def compare(A, expected, isequal=True, **kwargs): def test_deprecated(A): - if suitesparse: - with pytest.warns(DeprecationWarning): - A.ss.compactify_rowwise() - with pytest.warns(DeprecationWarning): - A.ss.compactify_columnwise() - with pytest.warns(DeprecationWarning): - A.ss.scan_rowwise() - with pytest.warns(DeprecationWarning): - A.ss.scan_columnwise() - with pytest.warns(DeprecationWarning): - A.ss.selectk_rowwise("first", 3) - with pytest.warns(DeprecationWarning): - A.ss.selectk_columnwise("first", 3) with pytest.warns(DeprecationWarning): A.to_values() with pytest.warns(DeprecationWarning): From 9e1a390130fcece898bf66b852edd2540f4c88b0 Mon Sep 17 00:00:00 2001 From: William Zijie Zhang <89562186+Transurgeon@users.noreply.github.com> Date: Wed, 9 Aug 2023 17:07:27 -0400 Subject: [PATCH 23/66] dropping support for python3.8 according to wiki (#489) Co-authored-by: Transurgeon --- .github/workflows/debug.yml | 2 +- .github/workflows/imports.yml | 4 +--- .github/workflows/publish_pypi.yml | 2 +- .github/workflows/test_and_build.yml | 10 +--------- .pre-commit-config.yaml | 2 +- pyproject.toml | 9 ++++----- scripts/check_versions.sh | 2 +- 7 files changed, 10 insertions(+), 21 deletions(-) diff --git a/.github/workflows/debug.yml b/.github/workflows/debug.yml index 794746f77..389905db5 100644 --- a/.github/workflows/debug.yml +++ b/.github/workflows/debug.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - pyver: [3.8] + pyver: [3.9] testopts: - "--blocking" # - "--non-blocking --record --runslow" diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 18e6f637c..de9a7361d 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -30,7 +30,6 @@ jobs: id: pyver with: contents: | - 3.8 3.9 3.10 3.11 @@ -38,14 +37,13 @@ jobs: 1 1 1 - 1 test_imports: needs: rngs runs-on: ${{ needs.rngs.outputs.os }} # runs-on: ${{ matrix.os }} # strategy: # matrix: - # python-version: ["3.8", "3.9", "3.10", "3.11"] + # python-version: ["3.9", "3.10", "3.11"] # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index cbe403724..abf3057ac 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: "3.8" + python-version: "3.9" - name: Install build dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 4c1c0e312..cc2eb27b6 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -107,10 +107,8 @@ jobs: with: # We should support major Python versions for at least 36-42 months # We may be able to support pypy if anybody asks for it - # 3.8.16 0_73_pypy # 3.9.16 0_73_pypy contents: | - 3.8 3.9 3.10 3.11 @@ -118,7 +116,6 @@ jobs: 1 1 1 - 1 - name: RNG for source of python-suitesparse-graphblas uses: ddradar/choose-random-action@v2.0.2 id: sourcetype @@ -171,12 +168,7 @@ jobs: yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') - if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.8') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') - elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fef625a70..5fffc6f8f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -54,7 +54,7 @@ repos: rev: v3.9.0 hooks: - id: pyupgrade - args: [--py38-plus] + args: [--py39-plus] - repo: https://github.com/MarcoGorelli/auto-walrus rev: v0.2.2 hooks: diff --git a/pyproject.toml b/pyproject.toml index fdd3a7a94..499faa2c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ name = "python-graphblas" dynamic = ["version"] description = "Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics" readme = "README.md" -requires-python = ">=3.8" +requires-python = ">=3.9" license = {file = "LICENSE"} authors = [ {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, @@ -44,7 +44,6 @@ classifiers = [ "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -157,7 +156,7 @@ dirty_template = "{tag}+{ccount}.g{sha}.dirty" [tool.black] line-length = 100 -target-version = ["py38", "py39", "py310", "py311"] +target-version = ["py39", "py310", "py311"] [tool.isort] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] @@ -235,7 +234,7 @@ ignore-words-list = "coo,ba" [tool.ruff] # https://github.com/charliermarsh/ruff/ line-length = 100 -target-version = "py38" +target-version = "py39" select = [ # Have we enabled too many checks that they'll become a nuisance? We'll see... "F", # pyflakes @@ -396,7 +395,7 @@ convention = "numpy" [tool.pylint.messages_control] # To run a single check, do: pylint graphblas --disable E,W,R,C,I --enable assignment-from-no-return max-line-length = 100 -py-version = "3.8" +py-version = "3.9" enable = ["I"] disable = [ # Error diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index ffa440c22..f849c1329 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -14,4 +14,4 @@ conda search 'numba[channel=conda-forge]>=0.57.1' conda search 'pyyaml[channel=conda-forge]>=6.0' conda search 'flake8-bugbear[channel=conda-forge]>=23.7.10' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' -# conda search 'python[channel=conda-forge]>=3.8 *pypy*' +# conda search 'python[channel=conda-forge]>=3.9 *pypy*' From c8f391757df4cd50420a0b7f16b29bf2e22e1853 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 10:40:25 -0500 Subject: [PATCH 24/66] Bump pypa/gh-action-pypi-publish from 1.8.8 to 1.8.10 (#491) Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.8 to 1.8.10. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.8.8...v1.8.10) --- .github/workflows/publish_pypi.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index abf3057ac..e99dc2c0f 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -35,7 +35,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.8 + uses: pypa/gh-action-pypi-publish@v1.8.10 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From 45ccf4b37d2ba6b70c8a8f47ee2d1603605e8497 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 15 Aug 2023 11:32:31 -0500 Subject: [PATCH 25/66] Add tests for `gb.ss.context` (#488) --- .pre-commit-config.yaml | 12 +++---- graphblas/core/operator/agg.py | 4 +-- graphblas/core/operator/binary.py | 2 +- graphblas/core/operator/utils.py | 2 +- graphblas/core/ss/context.py | 1 + graphblas/core/utils.py | 2 +- graphblas/monoid/__init__.py | 2 +- graphblas/monoid/numpy.py | 5 ++- graphblas/semiring/__init__.py | 4 +-- graphblas/semiring/numpy.py | 4 +-- graphblas/tests/test_matrix.py | 2 +- graphblas/tests/test_scalar.py | 8 ++--- graphblas/tests/test_ss_utils.py | 60 +++++++++++++++++++++++++++++++ graphblas/tests/test_vector.py | 2 +- scripts/check_versions.sh | 4 +-- 15 files changed, 87 insertions(+), 27 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5fffc6f8f..e80d3e817 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.9.0 + rev: v3.10.1 hooks: - id: pyupgrade args: [--py39-plus] @@ -66,19 +66,19 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.280 + rev: v0.0.284 hooks: - id: ruff args: [--fix-only, --show-fixes] # Let's keep `flake8` even though `ruff` does much of the same. # `flake8-bugbear` and `flake8-simplify` have caught things missed by `ruff`. - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 6.1.0 hooks: - id: flake8 additional_dependencies: &flake8_dependencies # These versions need updated manually - - flake8==6.0.0 + - flake8==6.1.0 - flake8-bugbear==23.7.10 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa @@ -94,11 +94,11 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.280 + rev: v0.0.284 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.6.7 + rev: v0.6.8 hooks: - id: sphinx-lint args: [--enable, all, "--disable=line-too-long,leaked-markup"] diff --git a/graphblas/core/operator/agg.py b/graphblas/core/operator/agg.py index 09d644c32..6b463a8a6 100644 --- a/graphblas/core/operator/agg.py +++ b/graphblas/core/operator/agg.py @@ -76,9 +76,9 @@ def __init__( @property def types(self): if self._types is None: - if type(self._semiring) is str: + if isinstance(self._semiring, str): self._semiring = semiring.from_string(self._semiring) - if type(self._types_orig[0]) is str: # pragma: no branch + if isinstance(self._types_orig[0], str): # pragma: no branch self._types_orig[0] = semiring.from_string(self._types_orig[0]) self._types = _get_types( self._types_orig, None if self._initval_orig is None else self._initdtype diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 88191c39b..77a686868 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -200,7 +200,7 @@ def monoid(self): @property def commutes_to(self): - if type(self._commutes_to) is str: + if isinstance(self._commutes_to, str): self._commutes_to = BinaryOp._find(self._commutes_to) return self._commutes_to diff --git a/graphblas/core/operator/utils.py b/graphblas/core/operator/utils.py index 00bc86cea..00df31db8 100644 --- a/graphblas/core/operator/utils.py +++ b/graphblas/core/operator/utils.py @@ -340,7 +340,7 @@ def _from_string(string, module, mapping, example): ) if base in mapping: op = mapping[base] - if type(op) is str: + if isinstance(op, str): op = mapping[base] = module.from_string(op) elif hasattr(module, base): op = getattr(module, base) diff --git a/graphblas/core/ss/context.py b/graphblas/core/ss/context.py index 9b48bcaa4..f93d1ec1c 100644 --- a/graphblas/core/ss/context.py +++ b/graphblas/core/ss/context.py @@ -111,6 +111,7 @@ def disengage(self): def __enter__(self): self.engage() + return self def __exit__(self, exc_type, exc, exc_tb): self.disengage() diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 7bb1a1fb0..42fcf0685 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -11,7 +11,7 @@ def libget(name): try: return getattr(lib, name) except AttributeError: - if name[-4:] not in {"FC32", "FC64", "error"}: + if name[-4:] not in {"FC32", "FC64", "rror"}: raise ext_name = f"GxB_{name[4:]}" try: diff --git a/graphblas/monoid/__init__.py b/graphblas/monoid/__init__.py index ed028c5d9..027fc0afe 100644 --- a/graphblas/monoid/__init__.py +++ b/graphblas/monoid/__init__.py @@ -10,7 +10,7 @@ def __dir__(): def __getattr__(key): if key in _delayed: func, kwargs = _delayed.pop(key) - if type(kwargs["binaryop"]) is str: + if isinstance(kwargs["binaryop"], str): from ..binary import from_string kwargs["binaryop"] = from_string(kwargs["binaryop"]) diff --git a/graphblas/monoid/numpy.py b/graphblas/monoid/numpy.py index f46d57143..5f6895e5d 100644 --- a/graphblas/monoid/numpy.py +++ b/graphblas/monoid/numpy.py @@ -90,8 +90,7 @@ if ( _config.get("mapnumpy") or _has_numba - and type(_numba.njit(lambda x, y: _np.fmax(x, y))(1, 2)) # pragma: no branch (numba) - is not float + and not isinstance(_numba.njit(lambda x, y: _np.fmax(x, y))(1, 2), float) # pragma: no branch ): # Incorrect behavior was introduced in numba 0.56.2 and numpy 1.23 # See: https://github.com/numba/numba/issues/8478 @@ -170,7 +169,7 @@ def __dir__(): def __getattr__(name): if name in _delayed: func, kwargs = _delayed.pop(name) - if type(kwargs["binaryop"]) is str: + if isinstance(kwargs["binaryop"], str): from ..binary import from_string kwargs["binaryop"] = from_string(kwargs["binaryop"]) diff --git a/graphblas/semiring/__init__.py b/graphblas/semiring/__init__.py index 538136406..95a44261a 100644 --- a/graphblas/semiring/__init__.py +++ b/graphblas/semiring/__init__.py @@ -46,11 +46,11 @@ def __getattr__(key): return rv if key in _delayed: func, kwargs = _delayed.pop(key) - if type(kwargs["binaryop"]) is str: + if isinstance(kwargs["binaryop"], str): from ..binary import from_string kwargs["binaryop"] = from_string(kwargs["binaryop"]) - if type(kwargs["monoid"]) is str: + if isinstance(kwargs["monoid"], str): from ..monoid import from_string kwargs["monoid"] = from_string(kwargs["monoid"]) diff --git a/graphblas/semiring/numpy.py b/graphblas/semiring/numpy.py index 3a59090cc..97b90874b 100644 --- a/graphblas/semiring/numpy.py +++ b/graphblas/semiring/numpy.py @@ -151,11 +151,11 @@ def __getattr__(name): if name in _delayed: func, kwargs = _delayed.pop(name) - if type(kwargs["binaryop"]) is str: + if isinstance(kwargs["binaryop"], str): from ..binary import from_string kwargs["binaryop"] = from_string(kwargs["binaryop"]) - if type(kwargs["monoid"]) is str: + if isinstance(kwargs["monoid"], str): from ..monoid import from_string kwargs["monoid"] = from_string(kwargs["monoid"]) diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index cd70479cc..fe85bb9bf 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -3878,7 +3878,7 @@ def test_get(A): assert compute(A.T.get(0, 1)) is None assert A.T.get(1, 0) == 2 assert A.get(0, 1, "mittens") == 2 - assert type(compute(A.get(0, 1))) is int + assert isinstance(compute(A.get(0, 1)), int) with pytest.raises(ValueError, match="Bad row, col"): # Not yet supported A.get(0, [0, 1]) diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py index cf4c6fd41..ba9903169 100644 --- a/graphblas/tests/test_scalar.py +++ b/graphblas/tests/test_scalar.py @@ -128,14 +128,14 @@ def test_equal(s): def test_casting(s): assert int(s) == 5 - assert type(int(s)) is int + assert isinstance(int(s), int) assert float(s) == 5.0 - assert type(float(s)) is float + assert isinstance(float(s), float) assert range(s) == range(5) with pytest.raises(AttributeError, match="Scalar .* only .*__index__.*integral"): range(s.dup(float)) assert complex(s) == complex(5) - assert type(complex(s)) is complex + assert isinstance(complex(s), complex) def test_truthy(s): @@ -580,7 +580,7 @@ def test_record_from_dict(): def test_get(s): assert s.get() == 5 assert s.get("mittens") == 5 - assert type(compute(s.get())) is int + assert isinstance(compute(s.get()), int) s.clear() assert compute(s.get()) is None assert s.get("mittens") == "mittens" diff --git a/graphblas/tests/test_ss_utils.py b/graphblas/tests/test_ss_utils.py index 12c8c6329..81abe5804 100644 --- a/graphblas/tests/test_ss_utils.py +++ b/graphblas/tests/test_ss_utils.py @@ -4,6 +4,7 @@ import graphblas as gb from graphblas import Matrix, Vector, backend +from graphblas.exceptions import InvalidValue if backend != "suitesparse": pytest.skip("gb.ss and A.ss only available with suitesparse backend", allow_module_level=True) @@ -234,3 +235,62 @@ def test_global_config(): with pytest.raises(ValueError, match="Wrong number"): config["memory_pool"] = [1, 2] assert "format" in repr(config) + + +@pytest.mark.skipif("gb.core.ss._IS_SSGB7") +def test_context(): + context = gb.ss.Context() + prev = dict(context) + context["chunk"] += 1 + context["nthreads"] += 1 + assert context["chunk"] == prev["chunk"] + 1 + assert context["nthreads"] == prev["nthreads"] + 1 + context2 = gb.ss.Context(stack=True) + assert context2 == context + context3 = gb.ss.Context(stack=False) + assert context3 == prev + context4 = gb.ss.Context( + chunk=context["chunk"] + 1, nthreads=context["nthreads"] + 1, stack=False + ) + assert context4["chunk"] == context["chunk"] + 1 + assert context4["nthreads"] == context["nthreads"] + 1 + assert context == context.dup() + assert context4 == context.dup(chunk=context["chunk"] + 1, nthreads=context["nthreads"] + 1) + assert context.dup(gpu_id=-1)["gpu_id"] == -1 + + context.engage() + assert gb.core.ss.context.threadlocal.context is context + with gb.ss.Context(nthreads=1) as ctx: + assert gb.core.ss.context.threadlocal.context is ctx + v = Vector(int, 5) + v(nthreads=2) << v + v + assert gb.core.ss.context.threadlocal.context is ctx + assert gb.core.ss.context.threadlocal.context is context + with pytest.raises(InvalidValue): + # Wait, why does this raise?! + ctx.disengage() + assert gb.core.ss.context.threadlocal.context is context + context.disengage() + assert gb.core.ss.context.threadlocal.context is gb.core.ss.context.global_context + assert context._prev_context is None + + # hackery + gb.core.ss.context.threadlocal.context = context + context.disengage() + context.disengage() + context.disengage() + assert gb.core.ss.context.threadlocal.context is gb.core.ss.context.global_context + + # Actually engaged, but not set in threadlocal + context._engage() + assert gb.core.ss.context.threadlocal.context is gb.core.ss.context.global_context + context.disengage() + + context.engage() + context._engage() + assert gb.core.ss.context.threadlocal.context is context + context.disengage() + + context._context = context # This is allowed to work with config + with pytest.raises(AttributeError, match="_context"): + context._context = ctx # This is not diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index e321d3e9b..2571f288b 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -2440,7 +2440,7 @@ def test_get(v): assert v.get(0, "mittens") == "mittens" assert v.get(1) == 1 assert v.get(1, "mittens") == 1 - assert type(compute(v.get(1))) is int + assert isinstance(compute(v.get(1)), int) with pytest.raises(ValueError, match="Bad index in Vector.get"): # Not yet supported v.get([0, 1]) diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index f849c1329..56bac1b64 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,11 +3,11 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'numpy[channel=conda-forge]>=1.25.1' +conda search 'numpy[channel=conda-forge]>=1.25.2' conda search 'pandas[channel=conda-forge]>=2.0.3' conda search 'scipy[channel=conda-forge]>=1.11.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.3.1' +conda search 'awkward[channel=conda-forge]>=2.3.2' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' conda search 'numba[channel=conda-forge]>=0.57.1' From 578cab9e2f76dff79f3c3fc9835b822d380396d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:51:35 -0500 Subject: [PATCH 26/66] Bump actions/checkout from 3 to 4 (#499) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. --- .github/workflows/debug.yml | 2 +- .github/workflows/imports.yml | 2 +- .github/workflows/lint.yml | 2 +- .github/workflows/publish_pypi.yml | 2 +- .github/workflows/test_and_build.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/debug.yml b/.github/workflows/debug.yml index 389905db5..c9dc231fe 100644 --- a/.github/workflows/debug.yml +++ b/.github/workflows/debug.yml @@ -26,7 +26,7 @@ jobs: # - "conda-forge" steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Setup conda env diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index de9a7361d..753ce5162 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -46,7 +46,7 @@ jobs: # python-version: ["3.9", "3.10", "3.11"] # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: ${{ needs.rngs.outputs.pyver }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 81d9415ad..e0945022c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -16,7 +16,7 @@ jobs: name: pre-commit-hooks runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: "3.10" diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index e99dc2c0f..45a2b7880 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -14,7 +14,7 @@ jobs: shell: bash -l {0} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Python diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index cc2eb27b6..b1f0cfdba 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -98,7 +98,7 @@ jobs: KMP_DUPLICATE_LIB_OK: ${{ contains(matrix.os, 'macos') && 'TRUE' || 'FALSE' }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: RNG for Python version From 36a25badc1bf15f3b015422279af072e02c64c94 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 5 Sep 2023 13:59:52 -0500 Subject: [PATCH 27/66] MAINT: update versions of pandas, awkward, and pre-commit linting (#500) --- .github/workflows/test_and_build.yml | 12 ++++++------ .pre-commit-config.yaml | 8 ++++---- graphblas/exceptions.py | 2 +- pyproject.toml | 1 + scripts/check_versions.sh | 8 ++++---- 5 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index b1f0cfdba..2f48048de 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -171,18 +171,18 @@ jobs: if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') else # Python 3.11 npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e80d3e817..5a499e8f8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,13 +33,13 @@ repos: - id: name-tests-test args: ["--pytest-test-first"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.13 + rev: v0.14 hooks: - id: validate-pyproject name: Validate pyproject.toml # I don't yet trust ruff to do what autoflake does - repo: https://github.com/PyCQA/autoflake - rev: v2.2.0 + rev: v2.2.1 hooks: - id: autoflake args: [--in-place] @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.284 + rev: v0.0.287 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.284 + rev: v0.0.287 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/exceptions.py b/graphblas/exceptions.py index 0acc9ed0b..e7f3b3a83 100644 --- a/graphblas/exceptions.py +++ b/graphblas/exceptions.py @@ -121,7 +121,7 @@ def check_status(response_code, args): return if response_code == GrB_NO_VALUE: return NoValue - if type(args) is list: + if isinstance(args, list): arg = args[0] else: arg = args diff --git a/pyproject.toml b/pyproject.toml index 499faa2c3..619ce18f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -291,6 +291,7 @@ select = [ ] external = [ # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external + "F811", ] ignore = [ # Would be nice to fix these diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 56bac1b64..1a3e894a6 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -4,14 +4,14 @@ # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. conda search 'numpy[channel=conda-forge]>=1.25.2' -conda search 'pandas[channel=conda-forge]>=2.0.3' -conda search 'scipy[channel=conda-forge]>=1.11.1' +conda search 'pandas[channel=conda-forge]>=2.1.0' +conda search 'scipy[channel=conda-forge]>=1.11.2' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.3.2' +conda search 'awkward[channel=conda-forge]>=2.4.1' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' conda search 'numba[channel=conda-forge]>=0.57.1' -conda search 'pyyaml[channel=conda-forge]>=6.0' +conda search 'pyyaml[channel=conda-forge]>=6.0.1' conda search 'flake8-bugbear[channel=conda-forge]>=23.7.10' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' # conda search 'python[channel=conda-forge]>=3.9 *pypy*' From 717cbdac9efda857ebb3756d20a7a673646d473e Mon Sep 17 00:00:00 2001 From: William Zijie Zhang <89562186+Transurgeon@users.noreply.github.com> Date: Thu, 7 Sep 2023 14:57:28 -0400 Subject: [PATCH 28/66] fixing broken link to graphblas.org (#503) Co-authored-by: Transurgeon --- docs/getting_started/primer.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started/primer.rst b/docs/getting_started/primer.rst index 104eb5738..b5bec26ee 100644 --- a/docs/getting_started/primer.rst +++ b/docs/getting_started/primer.rst @@ -263,7 +263,7 @@ and showing that linear algebra can be used to compute graph algorithms with the of semirings. This is a somewhat new field of research, so many academic papers and talks are being given every year. -`Graphblas.org `_ remains the best source for keeping up-to-date with the latest +`Graphblas.org `_ remains the best source for keeping up-to-date with the latest developments in this area. Many people will benefit from faster graph algorithms written in GraphBLAS, but for those that want From ad6c2da98cb309e5ca640f0f376b970a594a5f9c Mon Sep 17 00:00:00 2001 From: Adam Lugowski Date: Thu, 7 Sep 2023 12:44:08 -0700 Subject: [PATCH 29/66] DOC: add a matrix style (#502) --- docs/_static/matrix.css | 104 ++++++++++++++++++++++++++ docs/conf.py | 2 +- docs/user_guide/io.rst | 1 + docs/user_guide/operations.rst | 132 ++++++++++++++++++++------------- docs/user_guide/operators.rst | 11 +++ docs/user_guide/udf.rst | 2 + 6 files changed, 200 insertions(+), 52 deletions(-) create mode 100644 docs/_static/matrix.css diff --git a/docs/_static/matrix.css b/docs/_static/matrix.css new file mode 100644 index 000000000..5700ea3fc --- /dev/null +++ b/docs/_static/matrix.css @@ -0,0 +1,104 @@ +/* Based on the stylesheet used by matrepr (https://github.com/alugowski/matrepr) and modified for sphinx */ + +table.matrix { + border-collapse: collapse; + border: 0px; +} + +/* Disable a horizintal line from the default stylesheet */ +.table.matrix > :not(caption) > * > * { + border-bottom-width: 0px; +} + +/* row indices */ +table.matrix > tbody tr th { + font-size: smaller; + font-weight: bolder; + vertical-align: middle; + text-align: right; +} +/* row indices are often made bold in the source data; here make them match the boldness of the th column label style*/ +table.matrix strong { + font-weight: bold; +} + +/* column indices */ +table.matrix > thead tr th { + font-size: smaller; + font-weight: bolder; + vertical-align: middle; + text-align: center; +} + +/* cells */ +table.matrix > tbody tr td { + vertical-align: middle; + text-align: center; + position: relative; +} + +/* left border */ +table.matrix > tbody tr td:first-of-type { + border-left: solid 2px var(--pst-color-text-base); +} +/* right border */ +table.matrix > tbody tr td:last-of-type { + border-right: solid 2px var(--pst-color-text-base); +} + +/* prevents empty cells from collapsing, especially empty rows */ +table.matrix > tbody tr td:empty::before { + /* basicaly fills empty cells with   */ + content: "\00a0\00a0\00a0"; + visibility: hidden; +} +table.matrix > tbody tr td:empty::after { + content: "\00a0\00a0\00a0"; + visibility: hidden; +} + +/* matrix bracket ticks */ +table.matrix > tbody > tr:first-child > td:first-of-type::before { + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: 0; + right: auto; + border-top: solid 2px var(--pst-color-text-base); +} +table.matrix > tbody > tr:last-child > td:first-of-type::before { + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: 0; + right: auto; + border-bottom: solid 2px var(--pst-color-text-base); +} +table.matrix > tbody > tr:first-child > td:last-of-type::after { + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: auto; + right: 0; + border-top: solid 2px var(--pst-color-text-base); +} +table.matrix > tbody > tr:last-child > td:last-of-type::after { + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: auto; + right: 0; + border-bottom: solid 2px var(--pst-color-text-base); +} diff --git a/docs/conf.py b/docs/conf.py index 07a373203..2e6f616d8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -36,7 +36,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc", "numpydoc", "sphinx_panels", "nbsphinx"] -html_css_files = ["custom.css"] +html_css_files = ["custom.css", "matrix.css"] html_js_files = ["custom.js"] # Add any paths that contain templates here, relative to this directory. diff --git a/docs/user_guide/io.rst b/docs/user_guide/io.rst index c13fda5d6..ecb4c0862 100644 --- a/docs/user_guide/io.rst +++ b/docs/user_guide/io.rst @@ -29,6 +29,7 @@ array will match the collection dtype. v = gb.Vector.from_coo([1, 3, 6], [2, 3, 4], float, size=10) .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5,6,7,8,9,10 ,2.0,,3.0,,,4.0,,, diff --git a/docs/user_guide/operations.rst b/docs/user_guide/operations.rst index ede2efb06..3f710dc23 100644 --- a/docs/user_guide/operations.rst +++ b/docs/user_guide/operations.rst @@ -45,8 +45,9 @@ a Vector is treated as an nx1 column matrix. C << gb.semiring.min_plus(A @ B) # functional style .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2,3 + :stub-columns: 1 **0**,,2.0,5.0, **1**,,,1.5,4.25 @@ -54,8 +55,9 @@ a Vector is treated as an nx1 column matrix. **3**,,,, .. csv-table:: B - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,3.0,2.0 **1**,9.0,6.0, @@ -63,8 +65,9 @@ a Vector is treated as an nx1 column matrix. **3**,0.0,5.0, .. csv-table:: C << min_plus(A @ B) - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,11.0,8.0,6.0 **1**,4.25,4.5,2.5 @@ -90,8 +93,9 @@ a Vector is treated as an nx1 column matrix. w << gb.semiring.plus_times(A @ v) # functional style .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2,3 + :stub-columns: 1 **0**,,2.0,5.0, **1**,,,1.5,4.25 @@ -99,13 +103,13 @@ a Vector is treated as an nx1 column matrix. **3**,,,, .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3 10.0,20.0,,40.0 .. csv-table:: w << plus_times(A @ v) - :class: inline + :class: inline matrix :header: 0,1,2,3 40.0,170.0,20.0, @@ -127,14 +131,15 @@ a Vector is treated as an nx1 column matrix. u << gb.semiring.plus_plus(v @ B) # functional style .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3 10.0,20.0,,40.0 .. csv-table:: B - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,3.0,2.0 **1**,9.0,6.0, @@ -142,7 +147,7 @@ a Vector is treated as an nx1 column matrix. **3**,0.0,5.0, .. csv-table:: u << plus_plus(v @ B) - :class: inline + :class: inline matrix :header: 0,1,2 69.0,84.0,12.0 @@ -181,24 +186,27 @@ Example usage: C << gb.binary.min(A & B) # functional style .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,1.5,,4.0 **2**,,0.5, .. csv-table:: B - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,3.0,-2.0 **1**,0.0,6.0, **2**,,3.0,1.0 .. csv-table:: C << min(A & B) - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,-2.0 **1**,0.0,, @@ -265,24 +273,27 @@ should be used with the functional syntax, ``left_default`` and ``right_default` C << gb.binary.minus(A | B) # functional style .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,9.0,2.0,5.0 **1**,1.5,,4.0 **2**,,, .. csv-table:: B - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,4.0,0.0,-2.0 **1**,,, **2**,6.0,3.0,1.0 .. csv-table:: C << A.ewise_add(B, 'minus') - :class: inline + :class: inline matrix :header: ,0,1,2, + :stub-columns: 1 **0**,5.0,2.0,7.0 **1**,1.5,,4.0 @@ -310,24 +321,27 @@ should be used with the functional syntax, ``left_default`` and ``right_default` C << gb.binary.minus(A | B, left_default=0, right_default=0) # functional style .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,9.0,2.0,5.0 **1**,1.5,,4.0 **2**,,, .. csv-table:: B - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,4.0,0.0,-2.0 **1**,,, **2**,6.0,3.0,1.0 .. csv-table:: C << A.ewise_union(B, 'minus', 0, 0) - :class: inline + :class: inline matrix :header: ,0,1,2, + :stub-columns: 1 **0**,5.0,2.0,7.0 **1**,1.5,,4.0 @@ -362,13 +376,13 @@ Vector Slice Example: w << v[:4] .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3,4,5,6 10.0,2.0,,40.0,-5.0,,24.0 .. csv-table:: w << v[:4] - :class: inline + :class: inline matrix :header: 0,1,2,3 10.0,2.0,,40.0 @@ -387,16 +401,18 @@ Matrix List Example: C << A[[0, 2], :] .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,1.5,4.0, **2**,0.5,,-7.0 .. csv-table:: C << A[[0, 2], :] - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,0.5,,-7.0 @@ -434,23 +450,26 @@ Matrix-Matrix Assignment Example: A[::2, ::2] << B .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,1.5,4.0, **2**,0.5,,-7.0 .. csv-table:: B - :class: inline + :class: inline matrix :header: ,0,1 + :stub-columns: 1 **0**,-99.0,-98.0 **1**,-97.0,-96.0 .. csv-table:: A[::2, ::2] << B - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,-99.0,2.0,-98.0 **1**,1.5,4.0, @@ -470,22 +489,24 @@ Matrix-Vector Assignment Example: A[1, :] << v .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,1.5,4.0, **2**,0.5,,-7.0 .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2 ,,-99.0 .. csv-table:: A[1, :] << v - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,,,-99.0 @@ -500,13 +521,13 @@ Vector-Scalar Assignment Example: v[:4] << 99 .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3,4,5,6 10,2,,40,-5,,24 .. csv-table:: v[:4] << 99 - :class: inline + :class: inline matrix :header: 0,1,2,3,4,5,6 99,99,99,99,-5,,24 @@ -535,13 +556,13 @@ function with the collection as the argument. w << gb.unary.minv(v) .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3 10.0,20.0,,40.0 .. csv-table:: w << minv(v) - :class: inline + :class: inline matrix :header: 0,1,2,3 0.1,0.05,,0.025 @@ -558,13 +579,13 @@ function with the collection as the argument. w << gb.indexunary.index(v) .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3 10.0,20.0,,40.0 .. csv-table:: w << index(v) - :class: inline + :class: inline matrix :header: 0,1,2,3 0,1,,3 @@ -582,13 +603,13 @@ function with the collection as the argument. w << v - 15 .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3 10.0,20.0,,40.0 .. csv-table:: w << v.apply('minus', right=15) - :class: inline + :class: inline matrix :header: 0,1,2,3, -5.0,5.0,,25.0 @@ -616,16 +637,18 @@ Upper Triangle Example: C << gb.select.triu(A) .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,1.5,,4.0 **2**,,0.5,-7.0 .. csv-table:: C << select.triu(A) - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,,,4.0 @@ -643,13 +666,13 @@ Select by Value Example: w << gb.select.value(v >= 5) .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3,4,5,6 10.0,2.0,,40.0,-5.0,,24.0 .. csv-table:: w << select.value(v >= 5) - :class: inline + :class: inline matrix :header: 0,1,2,3,4,5,6 10.0,,,40.0,,,24.0 @@ -678,15 +701,16 @@ A monoid or aggregator is used to perform the reduction. w << A.reduce_columnwise("times") .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2,3 + :stub-columns: 1 **0**,,2.0,,5.0 **1**,1.5,4.0,, **2**,0.5,-7.0,, .. csv-table:: w << A.reduce_columnwise('times') - :class: inline + :class: inline matrix :header: ,0,1,2,3 ,0.75,-56.0,,5.0 @@ -705,15 +729,16 @@ A monoid or aggregator is used to perform the reduction. s << A.reduce_scalar("max") .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2,3 + :stub-columns: 1 **0**,,2.0,,5.0 **1**,1.5,4.0,, **2**,0.5,-7.0,, .. csv-table:: s << A.reduce_scalar('max') - :class: inline + :class: inline matrix :header: ,,,, 5.0 @@ -730,13 +755,13 @@ A monoid or aggregator is used to perform the reduction. s << gb.agg.argmin(v) .. csv-table:: v - :class: inline + :class: inline matrix :header: 0,1,2,3,4,5,6 10.0,2.0,,40.0,-5.0,,24.0 .. csv-table:: s << argmin(v) - :class: inline + :class: inline matrix :header: ,,, 4 @@ -761,16 +786,18 @@ To force the transpose to be computed by itself, use it by itself as the right-h C << A.T .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1,2,3 + :stub-columns: 1 **0**,,2.0,,5.0 **1**,1.5,4.0,, **2**,0.5,,-7.0, .. csv-table:: C << A.T - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,1.5,0.5 **1**,2.0,4.0, @@ -802,23 +829,26 @@ The Kronecker product uses a binary operator. C << A.kronecker(B, "times") .. csv-table:: A - :class: inline + :class: inline matrix :header: ,0,1 + :stub-columns: 1 **0**,1.0,-2.0 **1**,3.0, .. csv-table:: B - :class: inline + :class: inline matrix :header: ,0,1,2 + :stub-columns: 1 **0**,,2.0,5.0 **1**,1.5,4.0, **2**,0.5,,-7.0 .. csv-table:: C << A.kronecker(B, 'times') - :class: inline + :class: inline matrix :header: ,0,1,2,3,4,5 + :stub-columns: 1 **0**,,2.0,5.0,,-4.0,-10.0 **1**,1.5,4.0,,-3.0,-8.0, diff --git a/docs/user_guide/operators.rst b/docs/user_guide/operators.rst index ec28e2fba..8bb5e9fa8 100644 --- a/docs/user_guide/operators.rst +++ b/docs/user_guide/operators.rst @@ -314,12 +314,14 @@ each symbol. Each is detailed below. The following objects will be used to demonstrate the behavior. .. csv-table:: Vector v + :class: matrix :header: 0,1,2,3,4,5 1.0,,2.0,3.5,,9.0 .. csv-table:: Vector w + :class: matrix :header: 0,1,2,3,4,5 7.0,5.2,,3.0,,2.5 @@ -343,6 +345,7 @@ Addition performs an element-wise union between collections, adding overlapping v + w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 8.0,5.2,2.0,6.5,,11.5 @@ -358,6 +361,7 @@ and negating any standalone elements from the right-hand object. v - w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 -6.0,-5.2,2.0,0.5,,6.5 @@ -373,6 +377,7 @@ overlapping elements. v * w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 7.0,,,10.5,,22.5 @@ -392,6 +397,7 @@ elements and always results in a floating-point dtype. v / w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 0.142857,,,1.166667,,3.6 @@ -407,6 +413,7 @@ Dividing by zero with floor division will raise a ``ZeroDivisionError``. v // w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 0.0,,,1.0,,3.0 @@ -422,6 +429,7 @@ of dividing overlapping elements. v % w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 1.0,,,0.5,,1.5 @@ -437,6 +445,7 @@ the power of y for overlapping elements. v**w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 1.0,,,42.875,,243.0 @@ -455,6 +464,7 @@ rather than ``all(A == B)`` v > w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 False,,,True,,True @@ -464,6 +474,7 @@ rather than ``all(A == B)`` v == w .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 False,,,False,,False diff --git a/docs/user_guide/udf.rst b/docs/user_guide/udf.rst index b96097a85..e7b984b44 100644 --- a/docs/user_guide/udf.rst +++ b/docs/user_guide/udf.rst @@ -27,6 +27,7 @@ Example user-defined UnaryOp: w = v.apply(unary.force_odd).new() .. csv-table:: w + :class: matrix :header: 0,1,2,3,4,5 1,3,,3,9,15 @@ -48,6 +49,7 @@ Example lambda usage: v.apply(lambda x: x % 5 - 2).new() .. csv-table:: + :class: matrix :header: 0,1,2,3,4,5 -1,0,,1,1,2 From bf28a799fdb0f9f91179036df469b26b3f04c5e6 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 22 Sep 2023 09:56:58 -0500 Subject: [PATCH 30/66] Add docstring for SuiteSparse:GraphBLAS 8 configs (#504) --- graphblas/ss/_core.py | 43 +++++++++++++++++++++++++++++--- graphblas/tests/test_ss_utils.py | 4 +-- 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index 2639a7709..29a67e08b 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -122,7 +122,9 @@ class GlobalConfig(BaseConfig): Threshold that determines when to switch to bitmap format nthreads : int Maximum number of OpenMP threads to use - memory_pool : List[int] + chunk : double + Control the number of threads used for small problems. + For example, ``nthreads = floor(work / chunk)``. burble : bool Enable diagnostic printing from SuiteSparse:GraphBLAS print_1based : bool @@ -134,8 +136,43 @@ class GlobalConfig(BaseConfig): **GPU support is a work in progress--not recommended to use** gpu_id : int Which GPU to use; default is -1, which means do not run on the GPU. - Only available for SuiteSparse:GraphBLAS 8 + Only available for SuiteSparse:GraphBLAS >=8 **GPU support is a work in progress--not recommended to use** + jit_c_control : {"off", "pause", "run", "load", "on} + Control the CPU JIT: + "off" : do not use the JIT and free all JIT kernels if loaded + "pause" : do not run JIT kernels, but keep any loaded + "run" : run JIT kernels if already loaded, but don't load or compile + "load" : able to load and run JIT kernels; may not compile + "on" : full JIT: able to compile, load, and run + Only available for SuiteSparse:GraphBLAS >=8 + jit_use_cmake : bool + Whether to use cmake to compile the JIT kernels. + Only available for SuiteSparse:GraphBLAS >=8 + jit_c_compiler_name : str + C compiler for JIT kernels. + Only available for SuiteSparse:GraphBLAS >=8 + jit_c_compiler_flags : str + Flags for the C compiler. + Only available for SuiteSparse:GraphBLAS >=8 + jit_c_linker_flags : str + Link flags for the C compiler + Only available for SuiteSparse:GraphBLAS >=8 + jit_c_libraries : str + Libraries to link against. + Only available for SuiteSparse:GraphBLAS >=8 + jit_c_cmake_libs : str + Libraries to link against when cmake is used. + Only available for SuiteSparse:GraphBLAS >=8 + jit_c_preface : str + C code as preface to JIT kernels. + Only available for SuiteSparse:GraphBLAS >=8 + jit_error_log : str + Error log file. + Only available for SuiteSparse:GraphBLAS >=8 + jit_cache_path : str + The folder with the compiled kernels. + Only available for SuiteSparse:GraphBLAS >=8 Setting values to None restores the default value for most configurations. """ @@ -154,7 +191,7 @@ class GlobalConfig(BaseConfig): "nthreads": (lib.GxB_GLOBAL_NTHREADS, "int"), "chunk": (lib.GxB_GLOBAL_CHUNK, "double"), # Memory pool control - "memory_pool": (lib.GxB_MEMORY_POOL, "int64_t[64]"), + # "memory_pool": (lib.GxB_MEMORY_POOL, "int64_t[64]"), # No longer used # Diagnostics (skipping "printf" and "flush" for now) "burble": (lib.GxB_BURBLE, "bool"), "print_1based": (lib.GxB_PRINT_1BASED, "bool"), diff --git a/graphblas/tests/test_ss_utils.py b/graphblas/tests/test_ss_utils.py index 81abe5804..2df7ab939 100644 --- a/graphblas/tests/test_ss_utils.py +++ b/graphblas/tests/test_ss_utils.py @@ -232,8 +232,8 @@ def test_global_config(): else: with pytest.raises(ValueError, match="Unable to set default value for"): config[k] = None - with pytest.raises(ValueError, match="Wrong number"): - config["memory_pool"] = [1, 2] + # with pytest.raises(ValueError, match="Wrong number"): + # config["memory_pool"] = [1, 2] # No longer used assert "format" in repr(config) From 2d6faf2c26144a56d170506709f404de54ebe056 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 22 Sep 2023 10:37:03 -0500 Subject: [PATCH 31/66] Add `A.setdiag(x, k)` (#493) Also, support numpy 1.26.0 --- .github/workflows/test_and_build.yml | 11 ++- .pre-commit-config.yaml | 10 +- graphblas/core/matrix.py | 113 ++++++++++++++++++++++ graphblas/tests/test_matrix.py | 136 ++++++++++++++++++++++++++- graphblas/tests/test_ssjit.py | 5 + pyproject.toml | 3 + scripts/check_versions.sh | 12 +-- 7 files changed, 273 insertions(+), 17 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 2f48048de..5f1ab7dde 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -169,17 +169,17 @@ jobs: sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') else # Python 3.11 - npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') @@ -206,7 +206,7 @@ jobs: else psgver="" fi - if [[ ${npver} == "=1.25" ]] ; then + if [[ ${npver} == "=1.25" || ${npver} == "=1.26" ]] ; then numbaver="" if [[ ${spver} == "=1.8" ]] ; then spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') @@ -243,7 +243,8 @@ jobs: pdver="" yamlver="" fi - elif [[ ${npver} == "=1.25" ]] ; then + elif [[ ${npver} == "=1.25" || ${npver} == "=1.26" ]] ; then + # Don't install numba for unsupported versions of numpy numba="" numbaver=NA sparse="" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5a499e8f8..a945fe49a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.10.1 + rev: v3.12.0 hooks: - id: pyupgrade args: [--py39-plus] @@ -61,12 +61,12 @@ repos: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.287 + rev: v0.0.290 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -79,7 +79,7 @@ repos: additional_dependencies: &flake8_dependencies # These versions need updated manually - flake8==6.1.0 - - flake8-bugbear==23.7.10 + - flake8-bugbear==23.9.16 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa rev: v1.5.0 @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.287 + rev: v0.0.290 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index d820ca424..aed98f57d 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -2805,6 +2805,119 @@ def power(self, n, op=semiring.plus_times): dtype=self.dtype, ) + def setdiag(self, values, k=0, *, mask=None, accum=None, **opts): + """Set k'th diagonal with a Scalar, Vector, or array. + + This is not a built-in GraphBLAS operation. It is implemented as a recipe. + + Parameters + ---------- + values : Vector or list or np.ndarray or scalar + New values to assign to the diagonal. The length of Vector and array + values must match the size of the diagonal being assigned to. + k : int, default=0 + Which diagonal or off-diagonal to set. For example, set the elements + ``A[i, i+k] = values[i]``. The default, k=0, is the main diagonal. + mask : Mask, optional + Vector or Matrix Mask to control which diagonal elements to set. + If it is Matrix Mask, then only the diagonal is used as the mask. + accum : Monoid or BinaryOp, optional + Operator to use to combine existing diagonal values and new values. + """ + if (K := maybe_integral(k)) is None: + raise TypeError(f"k must be an integer; got bad type: {type(k)}") + k = K + if k < 0: + if (size := min(self._nrows + k, self._ncols)) <= 0 and k <= -self._nrows: + raise IndexError( + f"k={k} is too small; the k'th diagonal is out of range. " + f"Valid k for Matrix with shape {self._nrows}x{self._ncols}: " + f"{-self._nrows} {'<' if self._nrows else '<='} k " + f"{'<' if self._ncols else '<='} {self._ncols}" + ) + elif (size := min(self._ncols - k, self._nrows)) <= 0 and k > 0 and k >= self._ncols: + raise IndexError( + f"k={k} is too large; the k'th diagonal is out of range. " + f"Valid k for Matrix with shape {self._nrows}x{self._ncols}: " + f"{-self._nrows} {'<' if self._nrows else '<='} k " + f"{'<' if self._ncols else '<='} {self._ncols}" + ) + + # Convert `values` to Vector if necessary (i.e., it's scalar or array) + is_scalar = clear_diag = False + if output_type(values) is Vector: + v = values + clear_diag = accum is None and v._nvals != v._size + elif type(values) is Scalar: + is_scalar = True + else: + dtype = self.dtype if self.dtype._is_udt else None + try: + # Try to make it a Scalar + values = Scalar.from_value(values, dtype, is_cscalar=None, name="") + is_scalar = True + except (TypeError, ValueError): + try: + # Else try to make it a numpy array + values, dtype = values_to_numpy_buffer(values, dtype) + except Exception: + self._expect_type( + values, + (Scalar, Vector, np.ndarray), + within="setdiag", + argname="values", + extra_message="Literal scalars also accepted.", + ) + else: + v = Vector.from_dense(values, dtype=dtype, **opts) + + if is_scalar: + v = Vector.from_scalar(values, size, **opts) + elif v._size != size: + raise DimensionMismatch( + f"Dimensions not compatible for assigning length {v._size} Vector " + f"to {k}'th diagonal of Matrix with shape {self._nrows}x{self._ncols}." + f"The Vector should be size {size}." + ) + + if mask is not None: + mask = _check_mask(mask) + if mask.parent.ndim == 2: + if mask.parent.shape != self.shape: + raise DimensionMismatch( + "Matrix mask in setdiag is the wrong shape; " + f"expected shape {self._nrows}x{self._ncols}, " + f"got {mask.parent._nrows}x{mask.parent._ncols}" + ) + if mask.complement: + mval = type(mask)(mask.parent.diag(k)).new(**opts) + mask = mval.S + M = mval.diag() + else: + M = select.diag(mask.parent, k).new(**opts) + elif mask.parent._size != size: + raise DimensionMismatch( + "Vector mask in setdiag is the wrong length; " + f"expected size {size}, got size {mask.parent._size}." + ) + else: + if mask.complement: + mask = mask.new(**opts).S + M = mask.parent.diag() + if M.shape != self.shape: + M.resize(self._nrows, self._ncols) + mask = type(mask)(M) + + if clear_diag: + self(mask=mask, **opts) << select.offdiag(self, k) + + Diag = v.diag(k) + if Diag.shape != self.shape: + Diag.resize(self._nrows, self._ncols) + if mask is None: + mask = Diag.S + self(accum=accum, mask=mask, **opts) << Diag + ################################## # Extract and Assign index methods ################################## diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index fe85bb9bf..e08f96b32 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -2940,6 +2940,7 @@ def test_expr_is_like_matrix(A): "from_scalar", "from_values", "resize", + "setdiag", "update", } ignore = {"__sizeof__"} @@ -3002,9 +3003,10 @@ def test_index_expr_is_like_matrix(A): "from_dense", "from_dicts", "from_edgelist", - "from_values", "from_scalar", + "from_values", "resize", + "setdiag", } ignore = {"__sizeof__"} assert attrs - expr_attrs - ignore == expected, ( @@ -4393,3 +4395,135 @@ def test_power(A): B = A[:2, :3].new() with pytest.raises(DimensionMismatch): B.power(2) + + +def test_setdiag(): + A = Matrix(int, 2, 3) + A.setdiag(1) + expected = Matrix(int, 2, 3) + expected[0, 0] = 1 + expected[1, 1] = 1 + assert A.isequal(expected) + A.setdiag(Scalar.from_value(2), 2) + expected[0, 2] = 2 + assert A.isequal(expected) + A.setdiag(3, k=-1) + expected[1, 0] = 3 + assert A.isequal(expected) + # List (or array) is treated as dense + A.setdiag([10, 20], 1) + expected[0, 1] = 10 + expected[1, 2] = 20 + assert A.isequal(expected) + # Size 0 diagonals, which does not set anything. + # This could be valid (esp. given a size 0 vector), but let's raise for now. + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(-1, 3) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(-1, -2) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag([], 3) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(Vector(int, 0), -2) + # Now we're definitely out of bounds + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(-1, 4) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(-1, -3) + with pytest.raises(TypeError, match="k must be an integer"): + A.setdiag(-1, 0.5) + with pytest.raises(TypeError, match="Bad type for argument `values` in Matrix.setdiag"): + A.setdiag(object()) + with pytest.raises(DimensionMismatch, match="Dimensions not compatible"): + A.setdiag([10, 20, 30], 1) + with pytest.raises(DimensionMismatch, match="Dimensions not compatible"): + A.setdiag([10], 1) + + # Special care for dimensions of length 0 + A = Matrix(int, 0, 2, name="A") + A.setdiag(0, 0) + A.setdiag(0, 1) + A.setdiag([], 0) + A.setdiag([], 1) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(0, -1) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag([], -1) + A = Matrix(int, 2, 0, name="A") + A.setdiag(0, 0) + A.setdiag(0, -1) + A.setdiag([], 0) + A.setdiag([], -1) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(0, 1) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag([], 1) + A = Matrix(int, 0, 0, name="A") + A.setdiag(0, 0) + A.setdiag([], 0) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(0, 1) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag([], 1) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag(0, -1) + with pytest.raises(IndexError, match="diagonal is out of range"): + A.setdiag([], -1) + + A = Matrix(int, 2, 2, name="A") + expected = Matrix(int, 2, 2, name="expected") + v = Vector(int, 2, name="v") + Vector(int, 2) + v[0] = 1 + A.setdiag(v) + expected[0, 0] = 1 + assert A.isequal(expected) + A.setdiag(v, accum=binary.plus) + expected[0, 0] = 2 + assert A.isequal(expected) + A.setdiag(10, mask=v.S) + expected[0, 0] = 10 + assert A.isequal(expected) + A.setdiag(10, mask=v.S, accum="+") + expected[0, 0] = 20 + assert A.isequal(expected) + # Allow mask to be a matrix + A.setdiag(10, mask=A.S, accum="+") + expected[0, 0] = 30 + assert A.isequal(expected) + # Test how to clear or not clear missing elements + A.clear() + A.setdiag(99) + A.setdiag(v) + expected[0, 0] = 1 + assert A.isequal(expected) + A.setdiag(99) + A.setdiag(v, accum="second") + expected[1, 1] = 99 + assert A.isequal(expected) + A.setdiag(99) + A.setdiag(v, mask=v.S) + assert A.isequal(expected) + + # We handle complemented masks! + A.clear() + expected.clear() + A.setdiag(42, mask=~v.S) + expected[1, 1] = 42 + assert A.isequal(expected) + A.setdiag(7, mask=~A.V) + expected[0, 0] = 7 + assert A.isequal(expected) + + with pytest.raises(DimensionMismatch, match="Matrix mask in setdiag is the wrong "): + A.setdiag(9, mask=Matrix(int, 3, 3).S) + with pytest.raises(DimensionMismatch, match="Vector mask in setdiag is the wrong "): + A.setdiag(10, mask=Vector(int, 3).S) + + A.clear() + A.resize(2, 3) + expected.clear() + expected.resize(2, 3) + A.setdiag(30, mask=v.S) + expected[0, 0] = 30 + assert A.isequal(expected) diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 57cb2bbba..bd05cf2db 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -1,4 +1,5 @@ import os +import pathlib import sys import numpy as np @@ -82,6 +83,10 @@ def _setup_jit(): gb.ss.config["jit_c_libraries"] = "" gb.ss.config["jit_c_cmake_libs"] = "" + if not pathlib.Path(gb.ss.config["jit_c_compiler_name"]).exists(): + # Can't use the JIT if we don't have a compiler! + gb.ss.config["jit_c_control"] = "off" + @pytest.fixture def v(): diff --git a/pyproject.toml b/pyproject.toml index 619ce18f2..ff970cc0f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -235,6 +235,9 @@ ignore-words-list = "coo,ba" # https://github.com/charliermarsh/ruff/ line-length = 100 target-version = "py39" +unfixable = [ + "F841" # unused-variable (Note: can leave useless expression) +] select = [ # Have we enabled too many checks that they'll become a nuisance? We'll see... "F", # pyflakes diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 1a3e894a6..a76fee1d2 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,15 +3,15 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'numpy[channel=conda-forge]>=1.25.2' -conda search 'pandas[channel=conda-forge]>=2.1.0' +conda search 'flake8-bugbear[channel=conda-forge]>=23.9.16' +conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' +conda search 'numpy[channel=conda-forge]>=1.26.0' +conda search 'pandas[channel=conda-forge]>=2.1.1' conda search 'scipy[channel=conda-forge]>=1.11.2' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.4.1' +conda search 'awkward[channel=conda-forge]>=2.4.3' conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' +conda search 'fast_matrix_market[channel=conda-forge]>=1.7.3' conda search 'numba[channel=conda-forge]>=0.57.1' conda search 'pyyaml[channel=conda-forge]>=6.0.1' -conda search 'flake8-bugbear[channel=conda-forge]>=23.7.10' -conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' # conda search 'python[channel=conda-forge]>=3.9 *pypy*' From add56c4dd0455f5d9660d713d12828884396ebfd Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 26 Sep 2023 13:29:22 -0500 Subject: [PATCH 32/66] Try to fix failing test (#505) --- graphblas/tests/test_io.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index bf2ca2015..6ad92a950 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -377,7 +377,7 @@ def test_scipy_sparse(): @pytest.mark.skipif("not ak") -@pytest.mark.xfail(np.__version__[:5] == "1.25.", reason="awkward bug with numpy 1.25") +@pytest.mark.xfail(np.__version__[:5] in {"1.25.", "1.26."}, reason="awkward bug with numpy >=1.25") def test_awkward_roundtrip(): # Vector v = gb.Vector.from_coo([1, 3, 5], [20, 21, -5], size=22) @@ -399,7 +399,7 @@ def test_awkward_roundtrip(): @pytest.mark.skipif("not ak") -@pytest.mark.xfail(np.__version__[:5] == "1.25.", reason="awkward bug with numpy 1.25") +@pytest.mark.xfail(np.__version__[:5] in {"1.25.", "1.26."}, reason="awkward bug with numpy >=1.25") def test_awkward_iso_roundtrip(): # Vector v = gb.Vector.from_coo([1, 3, 5], [20, 20, 20], size=22) From 1964ebbbb87f1fd1c6aac16c851220f09a723881 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 18:00:40 -0500 Subject: [PATCH 33/66] chore: update pre-commit hooks (#507) --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a945fe49a..bff5b80cd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.12.0 + rev: v3.13.0 hooks: - id: pyupgrade args: [--py39-plus] @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.290 + rev: v0.0.292 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.290 + rev: v0.0.292 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint @@ -110,7 +110,7 @@ repos: - id: pyroma args: [-n, "10", .] - repo: https://github.com/shellcheck-py/shellcheck-py - rev: "v0.9.0.5" + rev: "v0.9.0.6" hooks: - id: shellcheck - repo: local From 43deb66f5ded8f37e303e9dc1bf525653fe336a5 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 7 Oct 2023 16:40:10 -0500 Subject: [PATCH 34/66] Add notebook for creating (and exploring) logos (#506) * Add notebook for creating (and exploring) logos * Install drawsvg when testing notebooks * Add docs/_static/img/python-graphblas-logo.svg * update text logos * Add horizontal and vertical logos (w/ img and text) and update usage --- .github/workflows/test_and_build.yml | 9 +- .pre-commit-config.yaml | 6 +- README.md | 2 +- binder/environment.yml | 4 +- docs/_static/img/logo-horizontal-dark.svg | 1 + docs/_static/img/logo-horizontal-light.svg | 1 + .../img/logo-horizontal-medium-big.svg | 1 + docs/_static/img/logo-horizontal-medium.svg | 1 + docs/_static/img/logo-name-light.svg | 2 +- docs/_static/img/logo-name-medium-big.svg | 1 + docs/_static/img/logo-name-medium.svg | 2 +- docs/_static/img/logo-vertical-dark.svg | 1 + docs/_static/img/logo-vertical-light.svg | 1 + docs/_static/img/logo-vertical-medium.svg | 1 + docs/_static/img/python-graphblas-logo.svg | 1 + docs/conf.py | 6 +- environment.yml | 3 + notebooks/logos_and_colors.ipynb | 1467 +++++++++++++++++ scripts/check_versions.sh | 6 +- 19 files changed, 1502 insertions(+), 14 deletions(-) create mode 100644 docs/_static/img/logo-horizontal-dark.svg create mode 100644 docs/_static/img/logo-horizontal-light.svg create mode 100644 docs/_static/img/logo-horizontal-medium-big.svg create mode 100644 docs/_static/img/logo-horizontal-medium.svg create mode 100644 docs/_static/img/logo-name-medium-big.svg create mode 100644 docs/_static/img/logo-vertical-dark.svg create mode 100644 docs/_static/img/logo-vertical-light.svg create mode 100644 docs/_static/img/logo-vertical-medium.svg create mode 100644 docs/_static/img/python-graphblas-logo.svg create mode 100644 notebooks/logos_and_colors.ipynb diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 5f1ab7dde..e3930a853 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -206,7 +206,12 @@ jobs: else psgver="" fi - if [[ ${npver} == "=1.25" || ${npver} == "=1.26" ]] ; then + if [[ ${npver} == "=1.26" ]] ; then + numbaver="" + if [[ ${spver} == "=1.8" || ${spver} == "=1.9" ]] ; then + spver=$(python -c 'import random ; print(random.choice(["=1.10", "=1.11", ""]))') + fi + elif [[ ${npver} == "=1.25" ]] ; then numbaver="" if [[ ${spver} == "=1.8" ]] ; then spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') @@ -260,7 +265,7 @@ jobs: pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ - ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7"' || '' }} \ + ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7" drawsvg' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bff5b80cd..565e1dc0d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.13.0 + rev: v3.14.0 hooks: - id: pyupgrade args: [--py39-plus] @@ -80,14 +80,14 @@ repos: # These versions need updated manually - flake8==6.1.0 - flake8-bugbear==23.9.16 - - flake8-simplify==0.20.0 + - flake8-simplify==0.21.0 - repo: https://github.com/asottile/yesqa rev: v1.5.0 hooks: - id: yesqa additional_dependencies: *flake8_dependencies - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell types_or: [python, rst, markdown] diff --git a/README.md b/README.md index 4581ef54a..4509e44ac 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Python-graphblas](https://raw.githubusercontent.com/python-graphblas/python-graphblas/main/docs/_static/img/logo-name-medium.svg) +![Python-graphblas](https://raw.githubusercontent.com/python-graphblas/python-graphblas/main/docs/_static/img/logo-horizontal-medium-big.svg) [![conda-forge](https://img.shields.io/conda/vn/conda-forge/python-graphblas.svg)](https://anaconda.org/conda-forge/python-graphblas) [![pypi](https://img.shields.io/pypi/v/python-graphblas.svg)](https://pypi.python.org/pypi/python-graphblas/) diff --git a/binder/environment.yml b/binder/environment.yml index ef72a4d2b..11cd98e0c 100644 --- a/binder/environment.yml +++ b/binder/environment.yml @@ -2,9 +2,11 @@ name: graphblas channels: - conda-forge dependencies: - - python=3.10 + - python=3.11 - python-graphblas - matplotlib - networkx - pandas - scipy + - drawsvg + - cairosvg diff --git a/docs/_static/img/logo-horizontal-dark.svg b/docs/_static/img/logo-horizontal-dark.svg new file mode 100644 index 000000000..be9e5ccca --- /dev/null +++ b/docs/_static/img/logo-horizontal-dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_static/img/logo-horizontal-light.svg b/docs/_static/img/logo-horizontal-light.svg new file mode 100644 index 000000000..5894eed9a --- /dev/null +++ b/docs/_static/img/logo-horizontal-light.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_static/img/logo-horizontal-medium-big.svg b/docs/_static/img/logo-horizontal-medium-big.svg new file mode 100644 index 000000000..649c2aef3 --- /dev/null +++ b/docs/_static/img/logo-horizontal-medium-big.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_static/img/logo-horizontal-medium.svg b/docs/_static/img/logo-horizontal-medium.svg new file mode 100644 index 000000000..038781a3f --- /dev/null +++ b/docs/_static/img/logo-horizontal-medium.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_static/img/logo-name-light.svg b/docs/_static/img/logo-name-light.svg index e9d9738ee..3331ae561 100644 --- a/docs/_static/img/logo-name-light.svg +++ b/docs/_static/img/logo-name-light.svg @@ -1 +1 @@ - + diff --git a/docs/_static/img/logo-name-medium-big.svg b/docs/_static/img/logo-name-medium-big.svg new file mode 100644 index 000000000..7bb245898 --- /dev/null +++ b/docs/_static/img/logo-name-medium-big.svg @@ -0,0 +1 @@ + diff --git a/docs/_static/img/logo-name-medium.svg b/docs/_static/img/logo-name-medium.svg index 2c718ba26..3128fda35 100644 --- a/docs/_static/img/logo-name-medium.svg +++ b/docs/_static/img/logo-name-medium.svg @@ -1 +1 @@ - + diff --git a/docs/_static/img/logo-vertical-dark.svg b/docs/_static/img/logo-vertical-dark.svg new file mode 100644 index 000000000..25dcefc17 --- /dev/null +++ b/docs/_static/img/logo-vertical-dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_static/img/logo-vertical-light.svg b/docs/_static/img/logo-vertical-light.svg new file mode 100644 index 000000000..1cb22644d --- /dev/null +++ b/docs/_static/img/logo-vertical-light.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_static/img/logo-vertical-medium.svg b/docs/_static/img/logo-vertical-medium.svg new file mode 100644 index 000000000..db2fcaefe --- /dev/null +++ b/docs/_static/img/logo-vertical-medium.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/_static/img/python-graphblas-logo.svg b/docs/_static/img/python-graphblas-logo.svg new file mode 100644 index 000000000..2422973ff --- /dev/null +++ b/docs/_static/img/python-graphblas-logo.svg @@ -0,0 +1 @@ + diff --git a/docs/conf.py b/docs/conf.py index 2e6f616d8..283f6d047 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -55,14 +55,16 @@ # html_theme = "pydata_sphinx_theme" +html_favicon = "_static/img/python-graphblas-logo.svg" + # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_theme_options = { "logo": { - "image_light": "_static/img/logo-name-light.svg", - "image_dark": "_static/img/logo-name-dark.svg", + "image_light": "_static/img/logo-horizontal-light.svg", + "image_dark": "_static/img/logo-horizontal-dark.svg", }, "github_url": "https://github.com/python-graphblas/python-graphblas", } diff --git a/environment.yml b/environment.yml index 1a7fb6fa8..4455f4ac6 100644 --- a/environment.yml +++ b/environment.yml @@ -48,6 +48,9 @@ dependencies: - numpydoc - pydata-sphinx-theme - sphinx-panels + # For building logo + - drawsvg + - cairosvg # EXTRA (optional; uncomment as desired) # - autoflake # - black diff --git a/notebooks/logos_and_colors.ipynb b/notebooks/logos_and_colors.ipynb new file mode 100644 index 000000000..7b64a2208 --- /dev/null +++ b/notebooks/logos_and_colors.ipynb @@ -0,0 +1,1467 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1ade2e62-38f4-4017-a0d3-e09f8587c376", + "metadata": {}, + "source": [ + "# Logos and Color Palette of Python-graphblas\n", + "\n", + "To create a minimal environment to run this notebook:\n", + "```bash\n", + "$ conda create -n drawsvg -c conda-forge drawsvg cairosvg scipy jupyter\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "bf42676c-190a-4803-a567-09e0ed260d6a", + "metadata": {}, + "outputs": [], + "source": [ + "import drawsvg as draw\n", + "import numpy as np\n", + "from scipy.spatial.transform import Rotation" + ] + }, + { + "cell_type": "markdown", + "id": "876a6128-94e4-4fb0-938d-0980a2033701", + "metadata": {}, + "source": [ + "## Define color palette" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "786f9c9e-d999-4286-bf79-009ca1681604", + "metadata": {}, + "outputs": [], + "source": [ + "# primary\n", + "blue = \"#409DC1\"\n", + "orange = \"#FF8552\"\n", + "dark_gray = \"#39393A\"\n", + "light_gray = \"#C3C3C7\"\n", + "\n", + "# Neutral, light/dark compatible\n", + "medium_gray = \"#848487\"\n", + "\n", + "# secondary\n", + "light_blue = \"#81B7CC\"\n", + "light_orange = \"#FFBB9E\"\n", + "red = \"#6D213C\"\n", + "light_red = \"#BA708A\"\n", + "green = \"#85FFC7\"\n", + "\n", + "french_rose = \"#FA4B88\" # ;)" + ] + }, + { + "cell_type": "markdown", + "id": "adb66550-f1e8-4846-a12a-e178fe801295", + "metadata": {}, + "source": [ + "## Display color palette" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "983b0cb8-db8b-4ad0-ad5a-36975d59289e", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "Primary\n", + "\n", + "#409DC1\n", + "\n", + "#FF8552\n", + "\n", + "#39393A\n", + "\n", + "#C3C3C7\n", + "\n", + "#848487\n", + "Secondary\n", + "\n", + "#81B7CC\n", + "\n", + "#FFBB9E\n", + "\n", + "#6D213C\n", + "\n", + "#BA708A\n", + "\n", + "#85FFC7\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "d = draw.Drawing(750, 500, origin=\"center\")\n", + "d.append(\n", + " draw.Rectangle(-375, -250, 750, 500, fill=\"white\")\n", + ") # Add `stroke=\"black\"` border to see boundaries for testing\n", + "\n", + "dy = 25\n", + "dx = 0\n", + "w = h = 150\n", + "b = 25\n", + "x = -400 + 62.5 + dx\n", + "y = -200 + dy\n", + "\n", + "d.draw(\n", + " draw.Text(\n", + " \"Primary\",\n", + " x=x + 1.5 * (b + w) + w / 2,\n", + " y=y - b,\n", + " font_size=1.5 * b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x, y, w, h, fill=blue))\n", + "d.draw(\n", + " draw.Text(\n", + " blue.upper(),\n", + " x=x + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x + b + w, y, w, h, fill=orange))\n", + "d.draw(\n", + " draw.Text(\n", + " orange.upper(),\n", + " x=x + (b + w) + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x + 2 * (b + w), y, w, h, fill=dark_gray))\n", + "d.draw(\n", + " draw.Text(\n", + " dark_gray.upper(),\n", + " x=x + 2 * (b + w) + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"white\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x + 3 * (b + w), y, w, h, fill=light_gray))\n", + "d.draw(\n", + " draw.Text(\n", + " light_gray.upper(),\n", + " x=x + 3 * (b + w) + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "\n", + "d.draw(draw.Rectangle(x, -25 + dy, 675, 45, fill=medium_gray))\n", + "d.draw(\n", + " draw.Text(\n", + " medium_gray.upper(),\n", + " x=x + 675 / 2,\n", + " y=-25 + 30 + dy,\n", + " font_size=22.5,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "\n", + "y = 40 + dy\n", + "w = h = 119\n", + "b = 20\n", + "d.draw(\n", + " draw.Text(\n", + " \"Secondary\",\n", + " x=x + 2 * (b + w) + w / 2,\n", + " y=y + h + 2 * b,\n", + " font_size=1.5 * b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x, y, w, h, fill=light_blue))\n", + "d.draw(\n", + " draw.Text(\n", + " light_blue.upper(),\n", + " x=x + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x + b + w, y, w, h, fill=light_orange))\n", + "d.draw(\n", + " draw.Text(\n", + " light_orange.upper(),\n", + " x=x + (b + w) + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x + 2 * (b + w), y, w, h, fill=red))\n", + "d.draw(\n", + " draw.Text(\n", + " red.upper(),\n", + " x=x + 2 * (b + w) + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"white\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x + 3 * (b + w), y, w, h, fill=light_red))\n", + "d.draw(\n", + " draw.Text(\n", + " light_red.upper(),\n", + " x=x + 3 * (b + w) + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "d.draw(draw.Rectangle(x + 4 * (b + w), y, w, h, fill=green))\n", + "d.draw(\n", + " draw.Text(\n", + " green.upper(),\n", + " x=x + 4 * (b + w) + w / 2,\n", + " y=y + h - b,\n", + " font_size=b,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Arial\",\n", + " fill=\"black\",\n", + " )\n", + ")\n", + "\n", + "color_palette = d\n", + "d" + ] + }, + { + "cell_type": "markdown", + "id": "e59c3941-c73b-455e-88f2-4b3aae228421", + "metadata": {}, + "source": [ + "## Display color wheel" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "c27e8ef2-04f2-4752-9c3b-cf297a0c87a5", + "metadata": {}, + "outputs": [], + "source": [ + "def create_color_wheel(color_wheel):\n", + " d = draw.Drawing(300, 300, origin=\"center\")\n", + " theta = np.pi / 3\n", + "\n", + " angle = 0\n", + " for i, color in enumerate(color_wheel):\n", + " angle = i * np.pi / 3\n", + " clip = draw.ClipPath()\n", + " if i == 5:\n", + " angle_offset = theta\n", + " else:\n", + " angle_offset = theta * 1.05\n", + " clip.append(\n", + " draw.Lines(\n", + " 0,\n", + " 0,\n", + " 300 * np.sin(angle),\n", + " 300 * np.cos(angle),\n", + " 300 * np.sin(angle + angle_offset),\n", + " 300 * np.cos(angle + angle_offset),\n", + " close=True,\n", + " )\n", + " )\n", + " if i == 0:\n", + " clip = None\n", + " d.append(draw.Circle(0, 0, 145, fill=color, clip_path=clip))\n", + "\n", + " angle = 3 * theta\n", + " for i, color in enumerate(color_wheel):\n", + " angle = ((i + 3) % 6) * np.pi / 3\n", + " clip = draw.ClipPath()\n", + " if i == 5:\n", + " angle_offset = theta\n", + " else:\n", + " angle_offset = theta * 1.05\n", + " clip.append(\n", + " draw.Lines(\n", + " 0,\n", + " 0,\n", + " 300 * np.sin(angle),\n", + " 300 * np.cos(angle),\n", + " 300 * np.sin(angle + angle_offset),\n", + " 300 * np.cos(angle + angle_offset),\n", + " close=True,\n", + " )\n", + " )\n", + " if i == 0:\n", + " clip = None\n", + " d.append(draw.Circle(0, 0, 105, fill=color, clip_path=clip))\n", + "\n", + " angle = theta\n", + " for i, color in enumerate(color_wheel):\n", + " angle = ((i + 1) % 6) * np.pi / 3\n", + " clip = draw.ClipPath()\n", + " if i == 5:\n", + " angle_offset = theta\n", + " else:\n", + " angle_offset = theta * 1.05\n", + " clip.append(\n", + " draw.Lines(\n", + " 0,\n", + " 0,\n", + " 300 * np.sin(angle),\n", + " 300 * np.cos(angle),\n", + " 300 * np.sin(angle + angle_offset),\n", + " 300 * np.cos(angle + angle_offset),\n", + " close=True,\n", + " )\n", + " )\n", + " if i == 0:\n", + " clip = None\n", + " d.append(draw.Circle(0, 0, 65, fill=color, clip_path=clip))\n", + "\n", + " d.append(draw.Circle(0, 0, 25, fill=medium_gray))\n", + " return d" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2564bf63-8293-4828-8e38-d00a3b96b067", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Standard\n", + "standard_wheel = create_color_wheel(\n", + " [\n", + " blue,\n", + " light_gray,\n", + " light_blue,\n", + " dark_gray,\n", + " orange,\n", + " light_orange,\n", + " ]\n", + ")\n", + "standard_wheel" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7a500a39-4114-49bb-aa19-912c6a8a8d95", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# High contrast\n", + "high_wheel = create_color_wheel(\n", + " [\n", + " light_gray,\n", + " blue,\n", + " green,\n", + " dark_gray,\n", + " orange,\n", + " red,\n", + " ]\n", + ")\n", + "high_wheel" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "8f404efe-2b88-4bdf-9102-2e6ad9389ca3", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Low contrast\n", + "low_wheel = create_color_wheel(\n", + " [\n", + " green,\n", + " light_red,\n", + " orange,\n", + " light_blue,\n", + " light_orange,\n", + " blue,\n", + " ]\n", + ")\n", + "low_wheel" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "fd913698-ea45-4219-8003-0fd30124d091", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Warm :)\n", + "warm_wheel = create_color_wheel(\n", + " [\n", + " light_gray, # or dark_gray\n", + " light_red,\n", + " french_rose, # ;)\n", + " red,\n", + " orange,\n", + " light_orange,\n", + " ]\n", + ")\n", + "warm_wheel" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "c7a3a5e6-4be4-4def-9687-00d1e3f80375", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Cool\n", + "cool_wheel = create_color_wheel(\n", + " [\n", + " light_blue,\n", + " light_gray,\n", + " blue,\n", + " light_red,\n", + " green,\n", + " dark_gray,\n", + " ]\n", + ")\n", + "cool_wheel" + ] + }, + { + "cell_type": "markdown", + "id": "343256c8-35a7-4c89-aa60-c6bf60930c09", + "metadata": {}, + "source": [ + "## Create logos" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "7855cd3f-8155-4d11-9730-b6041578e112", + "metadata": {}, + "outputs": [], + "source": [ + "default_angles = [\n", + " 180, # Don't modify this\n", + " 30, # How much of the \"left face\" to see\n", + " 22.5, # How much of the \"top face\" to see\n", + "]\n", + "R = Rotation.from_euler(\"ZYX\", default_angles, degrees=True).as_matrix()\n", + "\n", + "gcube = np.array(\n", + " [\n", + " [-1, 1, -1],\n", + " [-1, 1, 1],\n", + " [1, 1, 1],\n", + " [-1, -1, 1],\n", + " [1, -1, 1],\n", + " [1, 0, 1],\n", + " [0, 0, 1],\n", + " ]\n", + ")\n", + "gcube_major = gcube[:5] # Big circles\n", + "gcube_minor = gcube[5:] # Small circles\n", + "lines = np.array(\n", + " [\n", + " [gcube[1], gcube[0]],\n", + " ]\n", + ")\n", + "Gpath = np.array(\n", + " [\n", + " gcube[2],\n", + " gcube[1],\n", + " gcube[3],\n", + " gcube[4],\n", + " gcube[5],\n", + " gcube[6],\n", + " ]\n", + ")\n", + "\n", + "\n", + "def create_logo(\n", + " *,\n", + " bracket_color=None,\n", + " bg_color=None,\n", + " edge_color=None,\n", + " edge_width=8,\n", + " edge_border_color=\"white\",\n", + " edge_border_width=16,\n", + " node_color=None,\n", + " large_node_width=16,\n", + " small_node_width=8,\n", + " node_border_color=\"white\",\n", + " node_stroke_width=4,\n", + " large_border=True,\n", + " g_color=None,\n", + " angles=None,\n", + "):\n", + " if angles is None:\n", + " angles = default_angles\n", + " if edge_color is None:\n", + " edge_color = blue\n", + " if bracket_color is None:\n", + " bracket_color = edge_color\n", + " if node_color is None:\n", + " node_color = orange\n", + " if g_color is None:\n", + " g_color = edge_color\n", + "\n", + " d = draw.Drawing(190, 190, origin=\"center\")\n", + " if bg_color:\n", + " d.append(\n", + " draw.Rectangle(-95, -95, 190, 190, fill=bg_color)\n", + " ) # Add `stroke=\"black\"` border to see boundaries for testing\n", + "\n", + " scale = 40\n", + " dx = 0\n", + " dy = -2\n", + "\n", + " if edge_border_width:\n", + " # Add white border around lines\n", + " d.append(\n", + " draw.Lines(\n", + " *(((Gpath @ R) * scale)[:, :2] * [-1, 1]).ravel().tolist(),\n", + " fill=\"none\",\n", + " stroke=edge_border_color,\n", + " stroke_width=edge_border_width,\n", + " )\n", + " )\n", + " for (x0, y0, z0), (x1, y1, z1) in ((lines @ R) * scale).tolist():\n", + " x0 = -x0\n", + " x1 = -x1 # Just live with this\n", + " d.append(\n", + " draw.Line(\n", + " x0 + dx,\n", + " y0 + dy,\n", + " x1 + dx,\n", + " y1 + dy,\n", + " stroke=edge_border_color,\n", + " stroke_width=edge_border_width,\n", + " )\n", + " )\n", + "\n", + " # Add edges\n", + " d.append(\n", + " draw.Lines(\n", + " *(((Gpath @ R) * scale)[:, :2] * [-1, 1]).ravel().tolist(),\n", + " fill=\"none\",\n", + " stroke=g_color,\n", + " stroke_width=edge_width,\n", + " )\n", + " )\n", + " for (x0, y0, z0), (x1, y1, z1) in ((lines @ R) * scale).tolist():\n", + " x0 = -x0\n", + " x1 = -x1\n", + " d.append(\n", + " draw.Line(\n", + " x0 + dx, y0 + dy, x1 + dx, y1 + dy, stroke=edge_color, stroke_width=edge_width\n", + " )\n", + " )\n", + "\n", + " # Add vertices\n", + " for x, y, z in ((gcube_major @ R) * scale).tolist():\n", + " x = -x\n", + " d.append(\n", + " draw.Circle(\n", + " x + dx,\n", + " y + dy,\n", + " large_node_width,\n", + " fill=node_color,\n", + " stroke=node_border_color,\n", + " stroke_width=node_stroke_width if large_border else 0,\n", + " )\n", + " )\n", + " for x, y, z in ((gcube_minor @ R) * scale).tolist():\n", + " x = -x\n", + " d.append(\n", + " draw.Circle(\n", + " x + dx,\n", + " y + dy,\n", + " small_node_width,\n", + " fill=node_color,\n", + " stroke=node_border_color,\n", + " stroke_width=node_stroke_width,\n", + " )\n", + " )\n", + "\n", + " # Add brackets\n", + " d.append(\n", + " draw.Text(\n", + " \"[\",\n", + " x=-85,\n", + " y=52,\n", + " font_size=214,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Courier New\",\n", + " fill=bracket_color,\n", + " )\n", + " )\n", + " d.append(\n", + " draw.Text(\n", + " \"]\",\n", + " x=85,\n", + " y=52,\n", + " font_size=214,\n", + " text_anchor=\"middle\",\n", + " font_family=\"Courier New\",\n", + " fill=bracket_color,\n", + " )\n", + " )\n", + "\n", + " return d" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "4325e0b8-dbbb-4219-a2b3-4d9cdee2bdc8", + "metadata": {}, + "outputs": [], + "source": [ + "logo_defaults = dict(\n", + " bracket_color=blue,\n", + " edge_color=blue,\n", + " node_color=orange,\n", + " edge_border_width=0,\n", + " edge_width=12,\n", + " small_node_width=11,\n", + " large_node_width=17,\n", + " node_border_color=\"none\",\n", + " node_stroke_width=0,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "f886df89-b3b5-4671-bcc0-98e8705feb5a", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[\n", + "]\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "create_logo(bg_color=\"white\", **logo_defaults)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "68e01137-55e3-4973-bf97-4fcd36c8c662", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[\n", + "]\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "create_logo(bg_color=\"black\", **logo_defaults)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "b1d5e928-16c5-4377-aee1-1489ab45efc8", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[\n", + "]\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Transparent background\n", + "logo = create_logo(**logo_defaults)\n", + "logo" + ] + }, + { + "cell_type": "markdown", + "id": "b187c131-d337-4a7b-ab54-80ebe0f48ab4", + "metadata": {}, + "source": [ + "## Alternatives with gray brackets\n", + "### Background-agnostic (works with light and dark mode)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "acca9b2e-2f54-4b86-9a33-2c57502f6160", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[\n", + "]\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "medium_logo = create_logo(**{**logo_defaults, \"bracket_color\": medium_gray})\n", + "create_logo(bg_color=\"white\", **{**logo_defaults, \"bracket_color\": medium_gray})" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "f5d0086d-b50e-49eb-9aae-b0953cdc0045", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[\n", + "]\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "create_logo(bg_color=\"black\", **{**logo_defaults, \"bracket_color\": medium_gray})" + ] + }, + { + "cell_type": "markdown", + "id": "c4dce89d-e34c-4190-a068-7e78cdeea745", + "metadata": {}, + "source": [ + "### For light mode" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "27137343-141a-422e-abd6-123af3416ea4", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[\n", + "]\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "light_logo = create_logo(**{**logo_defaults, \"bracket_color\": dark_gray})\n", + "create_logo(bg_color=\"white\", **{**logo_defaults, \"bracket_color\": dark_gray})" + ] + }, + { + "cell_type": "markdown", + "id": "8a70b0f7-c3c4-44ae-af09-8992400f362e", + "metadata": {}, + "source": [ + "### For dark mode" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3ab9bb40-d7a8-4788-9971-54a5779d284d", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[\n", + "]\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dark_logo = create_logo(**{**logo_defaults, \"bracket_color\": light_gray})\n", + "create_logo(bg_color=\"black\", **{**logo_defaults, \"bracket_color\": light_gray})" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "d53046c1-8cbb-47fa-a88b-4d98958df26b", + "metadata": {}, + "outputs": [], + "source": [ + "if False:\n", + " logo.save_svg(\"python-graphblas-logo.svg\")\n", + " light_logo.save_svg(\"python-graphblas-logo-light.svg\")\n", + " medium_logo.save_svg(\"python-graphblas-logo-medium.svg\")\n", + " dark_logo.save_svg(\"python-graphblas-logo-dark.svg\")\n", + " color_palette.save_svg(\"color-palette.svg\")\n", + " standard_wheel.save_svg(\"color-wheel.svg\")\n", + " high_wheel.save_svg(\"color-wheel-high.svg\")\n", + " low_wheel.save_svg(\"color-wheel-low.svg\")\n", + " warm_wheel.save_svg(\"color-wheel-warm.svg\")\n", + " cool_wheel.save_svg(\"color-wheel-cool.svg\")" + ] + }, + { + "cell_type": "markdown", + "id": "51093fab-600b-47d7-9809-fa0f16e7246f", + "metadata": {}, + "source": [ + "### *NOTE: The font in the SVG files should be converted to paths, because not all systems have Courier New*\n", + "Also, SVG files can be minified here: https://vecta.io/nano" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index a76fee1d2..9051ebe6e 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -4,12 +4,12 @@ # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. conda search 'flake8-bugbear[channel=conda-forge]>=23.9.16' -conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' +conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' conda search 'numpy[channel=conda-forge]>=1.26.0' conda search 'pandas[channel=conda-forge]>=2.1.1' -conda search 'scipy[channel=conda-forge]>=1.11.2' +conda search 'scipy[channel=conda-forge]>=1.11.3' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.4.3' +conda search 'awkward[channel=conda-forge]>=2.4.4' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.3' conda search 'numba[channel=conda-forge]>=0.57.1' From 39d52b10905919f22c5e735b8b268d880e743dfc Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 11 Oct 2023 09:22:50 -0500 Subject: [PATCH 35/66] Add support for Python 3.12 (#508) * Add support for Python 3.12 Also, test against python-suitesparse-graphblas 8.2.0.1 * Update codecov config --- .codecov.yml | 9 +++++++ .github/workflows/imports.yml | 4 ++- .github/workflows/test_and_build.yml | 39 ++++++++++++++++++++++------ .pre-commit-config.yaml | 6 ++--- environment.yml | 1 + pyproject.toml | 10 ++++--- scripts/check_versions.sh | 2 +- 7 files changed, 55 insertions(+), 16 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 1720ac027..7a57a7568 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,3 +1,12 @@ +coverage: + status: + project: + default: + informational: true + patch: + default: + informational: true + changes: false comment: off ignore: - graphblas/viz.py diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 753ce5162..18be6256a 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -33,17 +33,19 @@ jobs: 3.9 3.10 3.11 + 3.12 weights: | 1 1 1 + 1 test_imports: needs: rngs runs-on: ${{ needs.rngs.outputs.os }} # runs-on: ${{ matrix.os }} # strategy: # matrix: - # python-version: ["3.9", "3.10", "3.11"] + # python-version: ["3.9", "3.10", "3.11", "3.12"] # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index e3930a853..b1ca58616 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -112,10 +112,12 @@ jobs: 3.9 3.10 3.11 + 3.12 weights: | 1 1 1 + 1 - name: RNG for source of python-suitesparse-graphblas uses: ddradar/choose-random-action@v2.0.2 id: sourcetype @@ -178,11 +180,16 @@ jobs: spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') - else # Python 3.11 + elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') + else # Python 3.12 + npver=$(python -c 'import random ; print(random.choice(["=1.26", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.11", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=2.1", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=2.4", ""]))') fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when @@ -195,16 +202,23 @@ jobs: # That is, we don't need to support versions of it that are two years old. # But, it's still useful for us to test with different versions! psg="" - if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", ""]))') + if [[ ${{ steps.sourcetype.outputs.selected}} == "upstream" ]] ; then + psgver="" + elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then + if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", ""]))') + psg=python-suitesparse-graphblas${psgver} + else + psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", ""]))') + fi + elif [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", ""]))') psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", ""]))') elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", ""]))') - else - psgver="" + psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", ""]))') fi if [[ ${npver} == "=1.26" ]] ; then numbaver="" @@ -258,10 +272,15 @@ jobs: numba=numba${numbaver} sparse=sparse${sparsever} fi + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]]; then + coveralls="" + else + coveralls="coveralls=3.3.1" + fi echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psg${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler make \ + $(command -v mamba || command -v conda) install packaging pytest coverage ${coveralls} pytest-randomly cffi donfig tomli c-compiler make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ @@ -397,11 +416,15 @@ jobs: if: matrix.slowtask == 'pytest_bizarro' run: | # This step uses `black` + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]]; then + pip install black # Latest version of black on conda-forge does not have builds for Python 3.12 + fi coverage run -a -m graphblas.core.automethods coverage run -a -m graphblas.core.infixmethods git diff --exit-code - name: Coverage1 id: coverageAttempt1 + if: startsWith(steps.pyver.outputs.selected, '3.12') != true continue-on-error: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 565e1dc0d..ee6600327 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ default_language_version: python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-added-large-files - id: check-case-conflict @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.14.0 + rev: v3.15.0 hooks: - id: pyupgrade args: [--py39-plus] @@ -126,7 +126,7 @@ repos: args: [graphblas/] pass_filenames: false - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: no-commit-to-branch # no commit directly to main # diff --git a/environment.yml b/environment.yml index 4455f4ac6..1863d4006 100644 --- a/environment.yml +++ b/environment.yml @@ -69,6 +69,7 @@ dependencies: # - flake8-simplify # - gcc # - gh + # - git # - graph-tool # - xorg-libxcursor # for graph-tool # - grayskull diff --git a/pyproject.toml b/pyproject.toml index ff970cc0f..9579b1c16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3 :: Only", "Intended Audience :: Developers", "Intended Audience :: Other Audience", @@ -62,7 +63,7 @@ dependencies = [ "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >=7.4.0.0, <7.5", + "suitesparse-graphblas >=7.4.0.0, <9", "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported ] @@ -74,7 +75,7 @@ changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] suitesparse = [ - "suitesparse-graphblas >=7.4.0.0, <7.5", + "suitesparse-graphblas >=7.4.0.0, <9", ] networkx = [ "networkx >=2.8", @@ -156,7 +157,7 @@ dirty_template = "{tag}+{ccount}.g{sha}.dirty" [tool.black] line-length = 100 -target-version = ["py39", "py310", "py311"] +target-version = ["py39", "py310", "py311", "py312"] [tool.isort] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] @@ -207,6 +208,9 @@ filterwarnings = [ # pypy gives this warning "ignore:can't resolve package from __spec__ or __package__:ImportWarning:", + + # Python 3.12 introduced this deprecation, which is triggered by pandas 2.1.1 + "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:dateutil", ] [tool.coverage.run] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 9051ebe6e..c373692ed 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -9,7 +9,7 @@ conda search 'numpy[channel=conda-forge]>=1.26.0' conda search 'pandas[channel=conda-forge]>=2.1.1' conda search 'scipy[channel=conda-forge]>=1.11.3' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.4.4' +conda search 'awkward[channel=conda-forge]>=2.4.5' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.3' conda search 'numba[channel=conda-forge]>=0.57.1' From 486b422571df1bc7708f884fcdef84ae82fcdefc Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 11 Oct 2023 11:44:20 -0500 Subject: [PATCH 36/66] Drop coveralls (use codecov instead) (#509) --- .github/workflows/test_and_build.yml | 56 +--------------------------- README.md | 2 +- 2 files changed, 3 insertions(+), 55 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index b1ca58616..bf34f1ce5 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -272,15 +272,10 @@ jobs: numba=numba${numbaver} sparse=sparse${sparsever} fi - if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]]; then - coveralls="" - else - coveralls="coveralls=3.3.1" - fi echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psg${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage ${coveralls} pytest-randomly cffi donfig tomli c-compiler make \ + $(command -v mamba || command -v conda) install packaging pytest coverage pytest-randomly cffi donfig tomli c-compiler make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ @@ -422,42 +417,10 @@ jobs: coverage run -a -m graphblas.core.automethods coverage run -a -m graphblas.core.infixmethods git diff --exit-code - - name: Coverage1 - id: coverageAttempt1 - if: startsWith(steps.pyver.outputs.selected, '3.12') != true - continue-on-error: true - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} - COVERALLS_PARALLEL: true + - name: Coverage run: | coverage xml coverage report --show-missing - coveralls --service=github - # Retry upload if first attempt failed. - # This happens somewhat randomly and for irregular reasons. - # Logic is a duplicate of previous step. - - name: Coverage2 - id: coverageAttempt2 - if: steps.coverageAttempt1.outcome == 'failure' - continue-on-error: true - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} - COVERALLS_PARALLEL: true - run: | - coveralls --service=github - - name: Coverage3 - id: coverageAttempt3 - if: steps.coverageAttempt2.outcome == 'failure' - # Continue even if it failed 3 times... (sheesh! use codecov instead) - continue-on-error: true - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} - COVERALLS_PARALLEL: true - run: | - coveralls --service=github - name: codecov uses: codecov/codecov-action@v3 - name: Notebooks Execution check @@ -467,18 +430,3 @@ jobs: if python -c 'import numba' 2> /dev/null ; then jupyter nbconvert --to notebook --execute notebooks/*ipynb fi - - finish: - needs: build_and_test - if: always() - runs-on: ubuntu-latest - steps: - - uses: actions/setup-python@v4 - with: - python-version: "3.10" - - run: python -m pip install --upgrade pip - - run: pip install coveralls - - name: Coveralls Finished - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: coveralls --finish diff --git a/README.md b/README.md index 4509e44ac..0a4342dd3 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@
[![Tests](https://github.com/python-graphblas/python-graphblas/workflows/Tests/badge.svg?branch=main)](https://github.com/python-graphblas/python-graphblas/actions) [![Docs](https://readthedocs.org/projects/python-graphblas/badge/?version=latest)](https://python-graphblas.readthedocs.io/en/latest/) -[![Coverage](https://coveralls.io/repos/python-graphblas/python-graphblas/badge.svg?branch=main)](https://coveralls.io/r/python-graphblas/python-graphblas) +[![Coverage](https://codecov.io/gh/python-graphblas/python-graphblas/graph/badge.svg?token=D7HHLDPQ2Q)](https://codecov.io/gh/python-graphblas/python-graphblas) [![pyOpenSci](https://tinyurl.com/y22nb8up)](https://github.com/pyOpenSci/software-review/issues/81)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7328791.svg)](https://doi.org/10.5281/zenodo.7328791) From c1dcc385d42e8f8925476c9c9143dd93e277999d Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 14 Oct 2023 10:54:20 -0500 Subject: [PATCH 37/66] Add NumFOCUS badge (#510) --- .pre-commit-config.yaml | 4 ++-- README.md | 7 ++++--- scripts/check_versions.sh | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ee6600327..afdc6c75b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,7 +33,7 @@ repos: - id: name-tests-test args: ["--pytest-test-first"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.14 + rev: v0.15 hooks: - id: validate-pyproject name: Validate pyproject.toml @@ -98,7 +98,7 @@ repos: hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.6.8 + rev: v0.7.0 hooks: - id: sphinx-lint args: [--enable, all, "--disable=line-too-long,leaked-markup"] diff --git a/README.md b/README.md index 0a4342dd3..c10562783 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ ![Python-graphblas](https://raw.githubusercontent.com/python-graphblas/python-graphblas/main/docs/_static/img/logo-horizontal-medium-big.svg) +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org) +[![pyOpenSci](https://tinyurl.com/y22nb8up)](https://github.com/pyOpenSci/software-review/issues/81) +[![Discord](https://img.shields.io/badge/Chat-Discord-Blue?color=5865f2)](https://discord.com/invite/vur45CbwMz) +
[![conda-forge](https://img.shields.io/conda/vn/conda-forge/python-graphblas.svg)](https://anaconda.org/conda-forge/python-graphblas) [![pypi](https://img.shields.io/pypi/v/python-graphblas.svg)](https://pypi.python.org/pypi/python-graphblas/) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/python-graphblas)](https://pypi.python.org/pypi/python-graphblas/) @@ -8,11 +12,8 @@ [![Tests](https://github.com/python-graphblas/python-graphblas/workflows/Tests/badge.svg?branch=main)](https://github.com/python-graphblas/python-graphblas/actions) [![Docs](https://readthedocs.org/projects/python-graphblas/badge/?version=latest)](https://python-graphblas.readthedocs.io/en/latest/) [![Coverage](https://codecov.io/gh/python-graphblas/python-graphblas/graph/badge.svg?token=D7HHLDPQ2Q)](https://codecov.io/gh/python-graphblas/python-graphblas) -[![pyOpenSci](https://tinyurl.com/y22nb8up)](https://github.com/pyOpenSci/software-review/issues/81) -
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7328791.svg)](https://doi.org/10.5281/zenodo.7328791) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/python-graphblas/python-graphblas/HEAD?filepath=notebooks%2FIntro%20to%20GraphBLAS%20%2B%20SSSP%20example.ipynb) -[![Discord](https://img.shields.io/badge/Chat-Discord-blue)](https://discord.com/invite/vur45CbwMz) Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics. For algorithms, see diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index c373692ed..dc0331359 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -9,7 +9,7 @@ conda search 'numpy[channel=conda-forge]>=1.26.0' conda search 'pandas[channel=conda-forge]>=2.1.1' conda search 'scipy[channel=conda-forge]>=1.11.3' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.4.5' +conda search 'awkward[channel=conda-forge]>=2.4.6' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.3' conda search 'numba[channel=conda-forge]>=0.57.1' From cb9be5406b529546d859638424100848f5d7fb64 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 14 Oct 2023 12:00:44 -0500 Subject: [PATCH 38/66] Test against python-suitesparse-graphblas 8.2.1.0 in CI (#511) * Test against python-suitesparse-graphblas 8.2.1.0 in CI --- .github/workflows/test_and_build.yml | 13 +++++++------ CODE_OF_CONDUCT.md | 6 +++--- LICENSE | 4 ++-- README.md | 2 +- docs/make.bat | 2 +- notebooks/Example B.1 -- Level BFS.ipynb | 2 +- notebooks/Example B.3 -- Parent BFS.ipynb | 2 +- 7 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index bf34f1ce5..1a7dff313 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -144,7 +144,8 @@ jobs: use-mamba: true python-version: ${{ steps.pyver.outputs.selected }} channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }} - channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} + # mamba does not yet implement strict priority + # channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} activate-environment: graphblas auto-activate-base: false - name: Setup conda @@ -206,19 +207,19 @@ jobs: psgver="" elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", "=8.2.1.0", ""]))') psg=python-suitesparse-graphblas${psgver} else - psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", "==8.2.1.0", ""]))') fi elif [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", "=8.2.1.0", ""]))') psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0", ""]))') elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0", ""]))') fi if [[ ${npver} == "=1.26" ]] ; then numbaver="" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 7cfcb10f9..814c8052a 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -54,10 +54,10 @@ incident. This Code of Conduct is adapted from the [Numba Code of Conduct][numba], which is based on the [Contributor Covenant][homepage], version 1.3.0, available at -[http://contributor-covenant.org/version/1/3/0/][version], +[https://contributor-covenant.org/version/1/3/0/][version], and the [Swift Code of Conduct][swift]. [numba]: https://github.com/numba/numba-governance/blob/accepted/code-of-conduct.md -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/3/0/ +[homepage]: https://contributor-covenant.org +[version]: https://contributor-covenant.org/version/1/3/0/ [swift]: https://swift.org/community/#code-of-conduct diff --git a/LICENSE b/LICENSE index 935875c92..21c605c21 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -192,7 +192,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/README.md b/README.md index c10562783..42ed0d41e 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ For algorithms, see - **Source:** [https://github.com/python-graphblas/python-graphblas](https://github.com/python-graphblas/python-graphblas) - **Bug reports:** [https://github.com/python-graphblas/python-graphblas/issues](https://github.com/python-graphblas/python-graphblas/issues) - **Github discussions:** [https://github.com/python-graphblas/python-graphblas/discussions](https://github.com/python-graphblas/python-graphblas/discussions) -- **Weekly community call:** [https://github.com/python-graphblas/python-graphblas/issues/247](https://github.com/python-graphblas/python-graphblas/issues/247) +- **Weekly community call:** [python-graphblas#247](https://github.com/python-graphblas/python-graphblas/issues/247) or [https://scientific-python.org/calendars/](https://scientific-python.org/calendars/) - **Chat via Discord:** [https://discord.com/invite/vur45CbwMz](https://discord.com/invite/vur45CbwMz) in the [#graphblas channel](https://discord.com/channels/786703927705862175/1024732940233605190)

diff --git a/docs/make.bat b/docs/make.bat index 2119f5109..153be5e2f 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -21,7 +21,7 @@ if errorlevel 9009 ( echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ + echo.https://www.sphinx-doc.org/ exit /b 1 ) diff --git a/notebooks/Example B.1 -- Level BFS.ipynb b/notebooks/Example B.1 -- Level BFS.ipynb index cdee2f2fc..e96d6d7d5 100644 --- a/notebooks/Example B.1 -- Level BFS.ipynb +++ b/notebooks/Example B.1 -- Level BFS.ipynb @@ -6,7 +6,7 @@ "source": [ "## Example B.1 Level Breadth-first Search\n", "\n", - "Examples come from http://people.eecs.berkeley.edu/~aydin/GraphBLAS_API_C_v13.pdf" + "Examples come from https://people.eecs.berkeley.edu/~aydin/GraphBLAS_API_C_v13.pdf" ] }, { diff --git a/notebooks/Example B.3 -- Parent BFS.ipynb b/notebooks/Example B.3 -- Parent BFS.ipynb index d1fbd82c5..d3c7c761f 100644 --- a/notebooks/Example B.3 -- Parent BFS.ipynb +++ b/notebooks/Example B.3 -- Parent BFS.ipynb @@ -6,7 +6,7 @@ "source": [ "## Example B.3 Parent Breadth-first Search\n", "\n", - "Examples come from http://people.eecs.berkeley.edu/~aydin/GraphBLAS_API_C_v13.pdf" + "Examples come from https://people.eecs.berkeley.edu/~aydin/GraphBLAS_API_C_v13.pdf" ] }, { From f0fd1945db3630504e2d208df65f4f291782d751 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 16 Oct 2023 22:08:21 -0500 Subject: [PATCH 39/66] Add docstrings for using SS JIT, and make better (#512) * Add docstrings for using SS JIT, and make better Also: - allow a SS JIT function to be registered under the same name with different input types - be extra-strict about input types for SS JIT - test numba JIT with select with udt - add codecov comment to PRs --- .codecov.yml | 6 +- .github/workflows/test_and_build.yml | 6 +- .pre-commit-config.yaml | 2 +- graphblas/core/operator/base.py | 17 +++- graphblas/core/operator/binary.py | 9 +- graphblas/core/operator/indexunary.py | 7 ++ graphblas/core/operator/select.py | 57 ++++++++++- graphblas/core/operator/unary.py | 2 + graphblas/core/ss/binary.py | 63 +++++++++++- graphblas/core/ss/indexunary.py | 89 +++++++++++++++-- graphblas/core/ss/select.py | 43 ++++++++ graphblas/core/ss/unary.py | 54 +++++++++- graphblas/tests/test_op.py | 23 ++++- graphblas/tests/test_ssjit.py | 136 ++++++++++++++++++++++++-- 14 files changed, 470 insertions(+), 44 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 7a57a7568..1894009c1 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -7,6 +7,10 @@ coverage: default: informational: true changes: false -comment: off +comment: + layout: "header, diff" + behavior: default +github_checks: + annotations: false ignore: - graphblas/viz.py diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 1a7dff313..d4504e2fd 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -144,8 +144,7 @@ jobs: use-mamba: true python-version: ${{ steps.pyver.outputs.selected }} channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }} - # mamba does not yet implement strict priority - # channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} + channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} activate-environment: graphblas auto-activate-base: false - name: Setup conda @@ -412,9 +411,6 @@ jobs: if: matrix.slowtask == 'pytest_bizarro' run: | # This step uses `black` - if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]]; then - pip install black # Latest version of black on conda-forge does not have builds for Python 3.12 - fi coverage run -a -m graphblas.core.automethods coverage run -a -m graphblas.core.infixmethods git diff --exit-code diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index afdc6c75b..96c8b9aeb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -98,7 +98,7 @@ repos: hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.7.0 + rev: v0.8.0 hooks: - id: sphinx-lint args: [--enable, all, "--disable=line-too-long,leaked-markup"] diff --git a/graphblas/core/operator/base.py b/graphblas/core/operator/base.py index cddee6a33..d66aa2f4a 100644 --- a/graphblas/core/operator/base.py +++ b/graphblas/core/operator/base.py @@ -336,15 +336,22 @@ def __getitem__(self, type_): raise KeyError(f"{self.name} does not work with {type_}") else: return self._typed_ops[type_] - if not _supports_udfs: - raise KeyError(f"{self.name} does not work with {type_}") # This is a UDT or is able to operate on UDTs such as `first` any `any` dtype = lookup_dtype(type_) return self._compile_udt(dtype, dtype) - def _add(self, op): - self._typed_ops[op.type] = op - self.types[op.type] = op.return_type + def _add(self, op, *, is_jit=False): + if is_jit: + if hasattr(op, "type2") or hasattr(op, "thunk_type"): + dtypes = (op.type, op._type2) + else: + dtypes = op.type + self.types[dtypes] = op.return_type # This is a different use of .types + self._udt_types[dtypes] = op.return_type + self._udt_ops[dtypes] = op + else: + self._typed_ops[op.type] = op + self.types[op.type] = op.return_type def __delitem__(self, type_): type_ = lookup_dtype(type_) diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 77a686868..676ed0970 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -523,8 +523,8 @@ def _compile_udt(self, dtype, dtype2): if dtypes in self._udt_types: return self._udt_ops[dtypes] - nt = numba.types - if self.name == "eq" and not self._anonymous: + if self.name == "eq" and not self._anonymous and _has_numba: + nt = numba.types # assert dtype.np_type == dtype2.np_type itemsize = dtype.np_type.itemsize mask = _udt_mask(dtype.np_type) @@ -561,7 +561,8 @@ def binary_wrapper(z_ptr, x_ptr, y_ptr): # pragma: no cover (numba) # z_ptr[0] = True z_ptr[0] = (x[mask] == y[mask]).all() - elif self.name == "ne" and not self._anonymous: + elif self.name == "ne" and not self._anonymous and _has_numba: + nt = numba.types # assert dtype.np_type == dtype2.np_type itemsize = dtype.np_type.itemsize mask = _udt_mask(dtype.np_type) @@ -597,6 +598,8 @@ def binary_wrapper(z_ptr, x_ptr, y_ptr): # pragma: no cover (numba) # z_ptr[0] = False z_ptr[0] = (x[mask] != y[mask]).any() + elif self._numba_func is None: + raise KeyError(f"{self.name} does not work with {dtypes} types") else: numba_func = self._numba_func sig = (dtype.numba_type, dtype2.numba_type) diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py index b5351e916..b6fc74e91 100644 --- a/graphblas/core/operator/indexunary.py +++ b/graphblas/core/operator/indexunary.py @@ -25,6 +25,10 @@ def __call__(self, val, thunk=None): thunk = False # most basic form of 0 when unifying dtypes return _call_op(self, val, right=thunk) + @property + def thunk_type(self): + return self.type if self._type2 is None else self._type2 + class TypedUserIndexUnaryOp(TypedOpBase): __slots__ = () @@ -41,6 +45,7 @@ def orig_func(self): def _numba_func(self): return self.parent._numba_func + thunk_type = TypedBuiltinIndexUnaryOp.thunk_type __call__ = TypedBuiltinIndexUnaryOp.__call__ @@ -210,6 +215,8 @@ def _compile_udt(self, dtype, dtype2): dtypes = (dtype, dtype2) if dtypes in self._udt_types: return self._udt_ops[dtypes] + if self._numba_func is None: + raise KeyError(f"{self.name} does not work with {dtypes} types") numba_func = self._numba_func sig = (dtype.numba_type, UINT64.numba_type, UINT64.numba_type, dtype2.numba_type) diff --git a/graphblas/core/operator/select.py b/graphblas/core/operator/select.py index 4c9cd4639..4dd65ef16 100644 --- a/graphblas/core/operator/select.py +++ b/graphblas/core/operator/select.py @@ -1,9 +1,17 @@ import inspect from ... import _STANDARD_OPERATOR_NAMES, select -from ...dtypes import BOOL +from ...dtypes import BOOL, UINT64 +from ...exceptions import check_status_carg +from .. import _has_numba, ffi, lib from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _deserialize_parameterized -from .indexunary import IndexUnaryOp +from .indexunary import IndexUnaryOp, TypedBuiltinIndexUnaryOp + +if _has_numba: + import numba + + from .base import _get_udt_wrapper +ffi_new = ffi.new class TypedBuiltinSelectOp(TypedOpBase): @@ -15,13 +23,15 @@ def __call__(self, val, thunk=None): thunk = False # most basic form of 0 when unifying dtypes return _call_op(self, val, thunk=thunk) + thunk_type = TypedBuiltinIndexUnaryOp.thunk_type + class TypedUserSelectOp(TypedOpBase): __slots__ = () opclass = "SelectOp" - def __init__(self, parent, name, type_, return_type, gb_obj): - super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}") + def __init__(self, parent, name, type_, return_type, gb_obj, dtype2=None): + super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}", dtype2=dtype2) @property def orig_func(self): @@ -31,6 +41,7 @@ def orig_func(self): def _numba_func(self): return self.parent._numba_func + thunk_type = TypedBuiltinSelectOp.thunk_type __call__ = TypedBuiltinSelectOp.__call__ @@ -120,6 +131,44 @@ def _from_indexunary(cls, iop): obj.types[type_] = op.return_type return obj + def _compile_udt(self, dtype, dtype2): + if dtype2 is None: # pragma: no cover + dtype2 = dtype + dtypes = (dtype, dtype2) + if dtypes in self._udt_types: + return self._udt_ops[dtypes] + if self._numba_func is None: + raise KeyError(f"{self.name} does not work with {dtypes} types") + + # It would be nice if we could reuse compiling done for IndexUnaryOp + numba_func = self._numba_func + sig = (dtype.numba_type, UINT64.numba_type, UINT64.numba_type, dtype2.numba_type) + numba_func.compile(sig) # Should we catch and give additional error message? + select_wrapper, wrapper_sig = _get_udt_wrapper( + numba_func, BOOL, dtype, dtype2, include_indexes=True + ) + + select_wrapper = numba.cfunc(wrapper_sig, nopython=True)(select_wrapper) + new_select = ffi_new("GrB_IndexUnaryOp*") + check_status_carg( + lib.GrB_IndexUnaryOp_new( + new_select, select_wrapper.cffi, BOOL._carg, dtype._carg, dtype2._carg + ), + "IndexUnaryOp", + new_select[0], + ) + op = TypedUserSelectOp( + self, + self.name, + dtype, + BOOL, + new_select[0], + dtype2=dtype2, + ) + self._udt_types[dtypes] = BOOL + self._udt_ops[dtypes] = op + return op + @classmethod def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=False): """Register a SelectOp without registering it in the ``graphblas.select`` namespace. diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py index 437334ccc..7484f74d9 100644 --- a/graphblas/core/operator/unary.py +++ b/graphblas/core/operator/unary.py @@ -252,6 +252,8 @@ def unary_wrapper(z, x): def _compile_udt(self, dtype, dtype2): if dtype in self._udt_types: return self._udt_ops[dtype] + if self._numba_func is None: + raise KeyError(f"{self.name} does not work with {dtype}") numba_func = self._numba_func sig = (dtype.numba_type,) diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py index 898257fac..6965aeaf1 100644 --- a/graphblas/core/ss/binary.py +++ b/graphblas/core/ss/binary.py @@ -31,6 +31,47 @@ def jit_c_definition(self): def register_new(name, jit_c_definition, left_type, right_type, ret_type): + """Register a new BinaryOp using the SuiteSparse:GraphBLAS JIT compiler. + + This creates a BinaryOp by compiling the C string definition of the function. + It requires a shell call to a C compiler. The resulting operator will be as + fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the + overhead of additional function calls as when using ``gb.binary.register_new``. + + This is an advanced feature that requires a C compiler and proper configuration. + Configuration is handled by ``gb.ss.config``; see its docstring for details. + By default, the JIT caches results in ``~/.SuiteSparse/``. For more information, + see the SuiteSparse:GraphBLAS user guide. + + Only one type signature may be registered at a time, but repeated calls using + the same name with different input types is allowed. + + Parameters + ---------- + name : str + The name of the operator. This will show up as ``gb.binary.ss.{name}``. + The name may contain periods, ".", which will result in nested objects + such as ``gb.binary.ss.x.y.z`` for name ``"x.y.z"``. + jit_c_definition : str + The C definition as a string of the user-defined function. For example: + ``"void absdiff (double *z, double *x, double *y) { (*z) = fabs ((*x) - (*y)) ; }"``. + left_type : dtype + The dtype of the left operand of the binary operator. + right_type : dtype + The dtype of the right operand of the binary operator. + ret_type : dtype + The dtype of the result of the binary operator. + + Returns + ------- + BinaryOp + + See Also + -------- + gb.binary.register_new + gb.binary.register_anonymous + gb.unary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.binary.ss.register_new` invalid when not using 'suitesparse' backend" @@ -47,9 +88,23 @@ def register_new(name, jit_c_definition, left_type, right_type, ret_type): right_type = lookup_dtype(right_type) ret_type = lookup_dtype(ret_type) name = name if name.startswith("ss.") else f"ss.{name}" - module, funcname = BinaryOp._remove_nesting(name) - - rv = BinaryOp(name) + module, funcname = BinaryOp._remove_nesting(name, strict=False) + if hasattr(module, funcname): + rv = getattr(module, funcname) + if not isinstance(rv, BinaryOp): + BinaryOp._remove_nesting(name) + if ( + (left_type, right_type) in rv.types + or rv._udt_types is not None + and (left_type, right_type) in rv._udt_types + ): + raise TypeError( + f"BinaryOp gb.binary.{name} already defined for " + f"({left_type}, {right_type}) input types" + ) + else: + # We use `is_udt=True` to make dtype handling flexible and explicit. + rv = BinaryOp(name, is_udt=True) gb_obj = ffi_new("GrB_BinaryOp*") check_status_carg( lib.GxB_BinaryOp_new( @@ -67,6 +122,6 @@ def register_new(name, jit_c_definition, left_type, right_type, ret_type): op = TypedJitBinaryOp( rv, funcname, left_type, ret_type, gb_obj[0], jit_c_definition, dtype2=right_type ) - rv._add(op) + rv._add(op, is_jit=True) setattr(module, funcname, rv) return rv diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py index c0f185737..d5f709526 100644 --- a/graphblas/core/ss/indexunary.py +++ b/graphblas/core/ss/indexunary.py @@ -21,10 +21,56 @@ def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, d def jit_c_definition(self): return self._jit_c_definition + thunk_type = TypedUserIndexUnaryOp.thunk_type __call__ = TypedUserIndexUnaryOp.__call__ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): + """Register a new IndexUnaryOp using the SuiteSparse:GraphBLAS JIT compiler. + + This creates a IndexUnaryOp by compiling the C string definition of the function. + It requires a shell call to a C compiler. The resulting operator will be as + fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the + overhead of additional function calls as when using ``gb.indexunary.register_new``. + + This is an advanced feature that requires a C compiler and proper configuration. + Configuration is handled by ``gb.ss.config``; see its docstring for details. + By default, the JIT caches results in ``~/.SuiteSparse/``. For more information, + see the SuiteSparse:GraphBLAS user guide. + + Only one type signature may be registered at a time, but repeated calls using + the same name with different input types is allowed. + + This will also create a SelectOp operator under ``gb.select.ss`` if the return + type is boolean. + + Parameters + ---------- + name : str + The name of the operator. This will show up as ``gb.indexunary.ss.{name}``. + The name may contain periods, ".", which will result in nested objects + such as ``gb.indexunary.ss.x.y.z`` for name ``"x.y.z"``. + jit_c_definition : str + The C definition as a string of the user-defined function. For example: + ``"void diffy (double *z, double *x, GrB_Index i, GrB_Index j, double *y) "`` + ``"{ (*z) = (i + j) * fabs ((*x) - (*y)) ; }"`` + input_type : dtype + The dtype of the operand of the indexunary operator. + thunk_type : dtype + The dtype of the thunk of the indexunary operator. + ret_type : dtype + The dtype of the result of the indexunary operator. + + Returns + ------- + IndexUnaryOp + + See Also + -------- + gb.indexunary.register_new + gb.indexunary.register_anonymous + gb.select.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.indexunary.ss.register_new` invalid when not using 'suitesparse' backend" @@ -41,9 +87,23 @@ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): thunk_type = lookup_dtype(thunk_type) ret_type = lookup_dtype(ret_type) name = name if name.startswith("ss.") else f"ss.{name}" - module, funcname = IndexUnaryOp._remove_nesting(name) - - rv = IndexUnaryOp(name) + module, funcname = IndexUnaryOp._remove_nesting(name, strict=False) + if hasattr(module, funcname): + rv = getattr(module, funcname) + if not isinstance(rv, IndexUnaryOp): + IndexUnaryOp._remove_nesting(name) + if ( + (input_type, thunk_type) in rv.types + or rv._udt_types is not None + and (input_type, thunk_type) in rv._udt_types + ): + raise TypeError( + f"IndexUnaryOp gb.indexunary.{name} already defined for " + f"({input_type}, {thunk_type}) input types" + ) + else: + # We use `is_udt=True` to make dtype handling flexible and explicit. + rv = IndexUnaryOp(name, is_udt=True) gb_obj = ffi_new("GrB_IndexUnaryOp*") check_status_carg( lib.GxB_IndexUnaryOp_new( @@ -61,17 +121,32 @@ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): op = TypedJitIndexUnaryOp( rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type ) - rv._add(op) + rv._add(op, is_jit=True) if ret_type == BOOL: from ..operator.select import SelectOp from .select import TypedJitSelectOp select_module, funcname = SelectOp._remove_nesting(name, strict=False) - selectop = SelectOp(name) + if hasattr(select_module, funcname): + selectop = getattr(select_module, funcname) + if not isinstance(selectop, SelectOp): + SelectOp._remove_nesting(name) + if ( + (input_type, thunk_type) in selectop.types + or selectop._udt_types is not None + and (input_type, thunk_type) in selectop._udt_types + ): + raise TypeError( + f"SelectOp gb.select.{name} already defined for " + f"({input_type}, {thunk_type}) input types" + ) + else: + # We use `is_udt=True` to make dtype handling flexible and explicit. + selectop = SelectOp(name, is_udt=True) op2 = TypedJitSelectOp( - rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type + selectop, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type ) - selectop._add(op2) + selectop._add(op2, is_jit=True) setattr(select_module, funcname, selectop) setattr(module, funcname, rv) return rv diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py index 37c352b67..ff12f80fa 100644 --- a/graphblas/core/ss/select.py +++ b/graphblas/core/ss/select.py @@ -20,10 +20,53 @@ def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, d def jit_c_definition(self): return self._jit_c_definition + thunk_type = TypedUserSelectOp.thunk_type __call__ = TypedUserSelectOp.__call__ def register_new(name, jit_c_definition, input_type, thunk_type): + """Register a new SelectOp using the SuiteSparse:GraphBLAS JIT compiler. + + This creates a SelectOp by compiling the C string definition of the function. + It requires a shell call to a C compiler. The resulting operator will be as + fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the + overhead of additional function calls as when using ``gb.select.register_new``. + + This is an advanced feature that requires a C compiler and proper configuration. + Configuration is handled by ``gb.ss.config``; see its docstring for details. + By default, the JIT caches results in ``~/.SuiteSparse/``. For more information, + see the SuiteSparse:GraphBLAS user guide. + + Only one type signature may be registered at a time, but repeated calls using + the same name with different input types is allowed. + + This will also create an IndexUnary operator under ``gb.indexunary.ss`` + + Parameters + ---------- + name : str + The name of the operator. This will show up as ``gb.select.ss.{name}``. + The name may contain periods, ".", which will result in nested objects + such as ``gb.select.ss.x.y.z`` for name ``"x.y.z"``. + jit_c_definition : str + The C definition as a string of the user-defined function. For example: + ``"void woot (bool *z, const int32_t *x, GrB_Index i, GrB_Index j, int32_t *y) "`` + ``"{ (*z) = ((*x) + i + j == (*y)) ; }"`` + input_type : dtype + The dtype of the operand of the select operator. + thunk_type : dtype + The dtype of the thunk of the select operator. + + Returns + ------- + SelectOp + + See Also + -------- + gb.select.register_new + gb.select.register_anonymous + gb.indexunary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.select.ss.register_new` invalid when not using 'suitesparse' backend" diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index 97c4614c0..5a5c63632 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -25,6 +25,45 @@ def jit_c_definition(self): def register_new(name, jit_c_definition, input_type, ret_type): + """Register a new UnaryOp using the SuiteSparse:GraphBLAS JIT compiler. + + This creates a UnaryOp by compiling the C string definition of the function. + It requires a shell call to a C compiler. The resulting operator will be as + fast as if it were built-in to SuiteSparse:GraphBLAS and does not have the + overhead of additional function calls as when using ``gb.unary.register_new``. + + This is an advanced feature that requires a C compiler and proper configuration. + Configuration is handled by ``gb.ss.config``; see its docstring for details. + By default, the JIT caches results in ``~/.SuiteSparse/``. For more information, + see the SuiteSparse:GraphBLAS user guide. + + Only one type signature may be registered at a time, but repeated calls using + the same name with different input types is allowed. + + Parameters + ---------- + name : str + The name of the operator. This will show up as ``gb.unary.ss.{name}``. + The name may contain periods, ".", which will result in nested objects + such as ``gb.unary.ss.x.y.z`` for name ``"x.y.z"``. + jit_c_definition : str + The C definition as a string of the user-defined function. For example: + ``"void square (float *z, float *x) { (*z) = (*x) * (*x) ; } ;"`` + input_type : dtype + The dtype of the operand of the unary operator. + ret_type : dtype + The dtype of the result of the unary operator. + + Returns + ------- + UnaryOp + + See Also + -------- + gb.unary.register_new + gb.unary.register_anonymous + gb.binary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.unary.ss.register_new` invalid when not using 'suitesparse' backend" @@ -40,9 +79,16 @@ def register_new(name, jit_c_definition, input_type, ret_type): input_type = lookup_dtype(input_type) ret_type = lookup_dtype(ret_type) name = name if name.startswith("ss.") else f"ss.{name}" - module, funcname = UnaryOp._remove_nesting(name) - - rv = UnaryOp(name) + module, funcname = UnaryOp._remove_nesting(name, strict=False) + if hasattr(module, funcname): + rv = getattr(module, funcname) + if not isinstance(rv, UnaryOp): + UnaryOp._remove_nesting(name) + if input_type in rv.types or rv._udt_types is not None and input_type in rv._udt_types: + raise TypeError(f"UnaryOp gb.unary.{name} already defined for {input_type} input type") + else: + # We use `is_udt=True` to make dtype handling flexible and explicit. + rv = UnaryOp(name, is_udt=True) gb_obj = ffi_new("GrB_UnaryOp*") check_status_carg( lib.GxB_UnaryOp_new( @@ -57,6 +103,6 @@ def register_new(name, jit_c_definition, input_type, ret_type): gb_obj[0], ) op = TypedJitUnaryOp(rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition) - rv._add(op) + rv._add(op, is_jit=True) setattr(module, funcname, rv) return rv diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py index b54ea76c4..c7d1ce97c 100644 --- a/graphblas/tests/test_op.py +++ b/graphblas/tests/test_op.py @@ -19,7 +19,15 @@ ) from graphblas.core import _supports_udfs as supports_udfs from graphblas.core import lib, operator -from graphblas.core.operator import BinaryOp, IndexUnaryOp, Monoid, Semiring, UnaryOp, get_semiring +from graphblas.core.operator import ( + BinaryOp, + IndexUnaryOp, + Monoid, + SelectOp, + Semiring, + UnaryOp, + get_semiring, +) from graphblas.dtypes import ( BOOL, FP32, @@ -1336,6 +1344,19 @@ def badfunc2(x, y): # pragma: no cover (numba) assert binary.first[udt, dtypes.INT8].type2 is dtypes.INT8 assert monoid.any[udt].type2 is udt + def _this_or_that(val, idx, _, thunk): # pragma: no cover (numba) + return val["x"] + + sel = SelectOp.register_anonymous(_this_or_that, is_udt=True) + sel[udt] + assert udt in sel + result = v.select(sel, 0).new() + assert result.nvals == 0 + assert result.dtype == v.dtype + result = w.select(sel, 0).new() + assert result.nvals == 3 + assert result.isequal(w) + def test_dir(): for mod in [unary, binary, monoid, semiring, op]: diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index bd05cf2db..3c974c50d 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -165,6 +165,20 @@ def test_jit_unary(v): expected = Vector.from_coo([1, 3, 4, 6], [1, 1, 4, 0], dtype="FP32") assert expected.isequal(v) assert square["FP32"].jit_c_definition == cdef + assert "FP64" not in square + with burble(): + square_fp64 = unary.ss.register_new( + "square", cdef.replace("float", "double"), "FP64", "FP64" + ) + assert square_fp64 is square + assert "FP64" in square + with pytest.raises( + TypeError, match="UnaryOp gb.unary.ss.square already defined for FP32 input type" + ): + unary.ss.register_new("square", cdef, "FP32", "FP32") + unary.ss.register_new("nested.square", cdef, "FP32", "FP32") + with pytest.raises(AttributeError, match="nested is already defined"): + unary.ss.register_new("nested", cdef, "FP32", "FP32") def test_jit_binary(v): @@ -186,9 +200,11 @@ def test_jit_binary(v): assert not hasattr(binary, "absdiff") assert binary.ss.absdiff is absdiff assert absdiff.name == "ss.absdiff" - assert absdiff.types == {dtypes.FP64: dtypes.FP64} + assert absdiff.types == {(dtypes.FP64, dtypes.FP64): dtypes.FP64} # different than normal + assert "FP64" in absdiff + assert absdiff["FP64"].return_type == dtypes.FP64 # The JIT is unforgiving and does not coerce--use the correct types! - with pytest.raises(KeyError, match="absdiff does not work with INT64"): + with pytest.raises(KeyError, match="absdiff does not work with .INT64, INT64. types"): v << absdiff(v & v) w = (v - 1).new("FP64") v = v.dup("FP64") @@ -198,6 +214,36 @@ def test_jit_binary(v): res = absdiff(w & v).new() assert expected.isequal(res) assert absdiff["FP64"].jit_c_definition == cdef + assert "FP32" not in absdiff + with burble(): + absdiff_fp32 = binary.ss.register_new( + "absdiff", + cdef.replace("FP64", "FP32").replace("fabs", "fabsf"), + "FP32", + "FP32", + "FP32", + ) + assert absdiff_fp32 is absdiff + assert "FP32" in absdiff + with pytest.raises( + TypeError, + match="BinaryOp gb.binary.ss.absdiff already defined for .FP64, FP64. input types", + ): + binary.ss.register_new("absdiff", cdef, "FP64", "FP64", "FP64") + binary.ss.register_new("nested.absdiff", cdef, "FP64", "FP64", "FP64") + with pytest.raises(AttributeError, match="nested is already defined"): + binary.ss.register_new("nested", cdef, "FP64", "FP64", "FP64") + # Make sure we can be specific with left/right dtypes + absdiff_mixed = binary.ss.register_new( + "absdiff", + "void absdiff (double *z, double *x, float *y) { (*z) = fabs ((*x) - (double)(*y)) ; }", + "FP64", + "FP32", + "FP64", + ) + assert absdiff_mixed is absdiff + assert ("FP64", "FP32") in absdiff + assert ("FP32", "FP64") not in absdiff def test_jit_indexunary(v): @@ -218,15 +264,50 @@ def test_jit_indexunary(v): assert not hasattr(select, "diffy") assert not hasattr(select.ss, "diffy") assert diffy.name == "ss.diffy" - assert diffy.types == {dtypes.FP64: dtypes.FP64} + assert diffy.types == {(dtypes.FP64, dtypes.FP64): dtypes.FP64} + assert "FP64" in diffy + assert diffy["FP64"].return_type == dtypes.FP64 # The JIT is unforgiving and does not coerce--use the correct types! - with pytest.raises(KeyError, match="diffy does not work with INT64"): + with pytest.raises(KeyError, match="diffy does not work with .INT64, INT64. types"): v << diffy(v, 1) v = v.dup("FP64") - res = diffy(v, -1).new() + with pytest.raises(KeyError, match="diffy does not work with .FP64, INT64. types"): + v << diffy(v, -1) + res = diffy(v, -1.0).new() expected = Vector.from_coo([1, 3, 4, 6], [2, 6, 12, 6], dtype="FP64") assert expected.isequal(res) assert diffy["FP64"].jit_c_definition == cdef + assert "FP32" not in diffy + with burble(): + diffy_fp32 = indexunary.ss.register_new( + "diffy", + cdef.replace("double", "float").replace("fabs", "fabsf"), + "FP32", + "FP32", + "FP32", + ) + assert diffy_fp32 is diffy + assert "FP32" in diffy + with pytest.raises( + TypeError, + match="IndexUnaryOp gb.indexunary.ss.diffy already defined for .FP64, FP64. input types", + ): + indexunary.ss.register_new("diffy", cdef, "FP64", "FP64", "FP64") + indexunary.ss.register_new("nested.diffy", cdef, "FP64", "FP64", "FP64") + with pytest.raises(AttributeError, match="nested is already defined"): + indexunary.ss.register_new("nested", cdef, "FP64", "FP64", "FP64") + # Make sure we can be specific with left/right dtypes + diffy_mixed = indexunary.ss.register_new( + "diffy", + "void diffy (double *z, double *x, GrB_Index i, GrB_Index j, float *y) " + "{ (*z) = (i + j) * fabs ((*x) - (double)(*y)) ; }", + "FP64", + "FP32", + "FP64", + ) + assert diffy_mixed is diffy + assert ("FP64", "FP32") in diffy + assert ("FP32", "FP64") not in diffy def test_jit_select(v): @@ -248,20 +329,57 @@ def test_jit_select(v): assert not hasattr(indexunary, "woot") assert hasattr(indexunary.ss, "woot") assert woot.name == "ss.woot" - assert woot.types == {dtypes.INT32: dtypes.BOOL} + assert woot.types == {(dtypes.INT32, dtypes.INT32): dtypes.BOOL} + assert "INT32" in woot + assert woot["INT32"].return_type == dtypes.BOOL # The JIT is unforgiving and does not coerce--use the correct types! - with pytest.raises(KeyError, match="woot does not work with INT64"): + with pytest.raises(KeyError, match="woot does not work with .INT64, INT64. types"): v << woot(v, 1) v = v.dup("INT32") - res = woot(v, 6).new() + with pytest.raises(KeyError, match="woot does not work with .INT32, INT64. types"): + v << woot(v, 6) + res = woot(v, gb.Scalar.from_value(6, "INT32")).new() expected = Vector.from_coo([4, 6], [2, 0]) assert expected.isequal(res) - res = indexunary.ss.woot(v, 6).new() + res = indexunary.ss.woot(v, gb.Scalar.from_value(6, "INT32")).new() expected = Vector.from_coo([1, 3, 4, 6], [False, False, True, True]) assert expected.isequal(res) assert woot["INT32"].jit_c_definition == cdef + assert "INT64" not in woot + with burble(): + woot_int64 = select.ss.register_new( + "woot", cdef.replace("int32", "int64"), "INT64", "INT64" + ) + assert woot_int64 is woot + assert "INT64" in woot + with pytest.raises(TypeError, match="ss.woot already defined for .INT32, INT32. input types"): + select.ss.register_new("woot", cdef, "INT32", "INT32") + del indexunary.ss.woot + with pytest.raises(TypeError, match="ss.woot already defined for .INT32, INT32. input types"): + select.ss.register_new("woot", cdef, "INT32", "INT32") + select.ss.register_new("nested.woot", cdef, "INT32", "INT32") + with pytest.raises(AttributeError, match="nested is already defined"): + select.ss.register_new("nested", cdef, "INT32", "INT32") + del indexunary.ss.nested + with pytest.raises(AttributeError, match="nested is already defined"): + select.ss.register_new("nested", cdef.replace("woot", "nested"), "INT32", "INT32") + select.ss.haha = "haha" + with pytest.raises(AttributeError, match="haha is already defined"): + select.ss.register_new("haha", cdef.replace("woot", "haha"), "INT32", "INT32") + # Make sure we can be specific with left/right dtypes + woot_mixed = select.ss.register_new( + "woot", + "void woot (bool *z, const int64_t *x, GrB_Index i, GrB_Index j, int32_t *y) " + "{ (*z) = ((*x) + i + j == (*y)) ; }", + "INT64", + "INT32", + ) + assert woot_mixed is woot + assert ("INT64", "INT32") in woot + assert ("INT32", "INT64") not in woot + def test_context_importable(): if _IS_SSGB7: From 228faa1a67605bd9bfc0a6d9a64e6e49097fe0a0 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 18 Oct 2023 08:18:52 -0500 Subject: [PATCH 40/66] Add `gb.ss.burble` from python-suitesparse-graphblas (#514) --- graphblas/ss/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/graphblas/ss/__init__.py b/graphblas/ss/__init__.py index b723d9cb8..1f059771b 100644 --- a/graphblas/ss/__init__.py +++ b/graphblas/ss/__init__.py @@ -1,3 +1,5 @@ +from suitesparse_graphblas import burble + from ._core import _IS_SSGB7, about, concat, config, diag if not _IS_SSGB7: From bbb1f4fa2d26b9435a0921b20383c174a2491831 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 22 Oct 2023 10:29:38 -0500 Subject: [PATCH 41/66] Handle numpy 1.21 and pandas 2.1 incompatibility (& more maint) (#516) --- .github/workflows/test_and_build.yml | 7 ++++++- .pre-commit-config.yaml | 8 ++++---- pyproject.toml | 2 +- scripts/check_versions.sh | 4 ++-- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index d4504e2fd..56d13557f 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -166,10 +166,11 @@ jobs: # # First let's randomly get versions of dependencies to install. # Consider removing old versions when they become problematic or very old (>=2 years). - nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", ""]))') + nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", "=3.2", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + # Randomly choosing versions of dependencies based on Python version works surprisingly well... if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') @@ -191,6 +192,10 @@ jobs: pdver=$(python -c 'import random ; print(random.choice(["=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=2.4", ""]))') fi + # But there may be edge cases of incompatibility we need to handle (more handled below) + if [[ ${pdver} == "=2.1" && ${npver} == "=1.21" ]]; then + pdver="=2.0" + fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when # installing python-suitesparse-grphblas from source or upstream. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 96c8b9aeb..b2e08e638 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -61,12 +61,12 @@ repos: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.9.1 + rev: 23.10.0 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.292 + rev: v0.1.1 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,11 +94,11 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.292 + rev: v0.1.1 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.8.0 + rev: v0.8.1 hooks: - id: sphinx-lint args: [--enable, all, "--disable=line-too-long,leaked-markup"] diff --git a/pyproject.toml b/pyproject.toml index 9579b1c16..04ef28645 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,7 +87,7 @@ pandas = [ "pandas >=1.2", ] scipy = [ - "scipy >=1.8", + "scipy >=1.9", ] suitesparse-udf = [ # udf requires numba "python-graphblas[suitesparse,numba]", diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index dc0331359..7c09bc168 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -8,10 +8,10 @@ conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' conda search 'numpy[channel=conda-forge]>=1.26.0' conda search 'pandas[channel=conda-forge]>=2.1.1' conda search 'scipy[channel=conda-forge]>=1.11.3' -conda search 'networkx[channel=conda-forge]>=3.1' +conda search 'networkx[channel=conda-forge]>=3.2' conda search 'awkward[channel=conda-forge]>=2.4.6' conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.7.3' +conda search 'fast_matrix_market[channel=conda-forge]>=1.7.4' conda search 'numba[channel=conda-forge]>=0.57.1' conda search 'pyyaml[channel=conda-forge]>=6.0.1' # conda search 'python[channel=conda-forge]>=3.9 *pypy*' From 7935e50eeb8e9e479544e876e1e42d16a6fa90e7 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 30 Oct 2023 16:14:48 -0500 Subject: [PATCH 42/66] Handle dtypes (esp. UDTs) better in ewise_union (#517) --- .pre-commit-config.yaml | 6 +- graphblas/core/infix.py | 123 +++++++++++++++++------------- graphblas/core/matrix.py | 29 ++++--- graphblas/core/operator/monoid.py | 26 +------ graphblas/core/scalar.py | 33 +++++--- graphblas/core/vector.py | 29 ++++--- graphblas/tests/test_matrix.py | 7 ++ graphblas/tests/test_vector.py | 1 + scripts/check_versions.sh | 4 +- 9 files changed, 151 insertions(+), 107 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b2e08e638..3766e2e7c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -61,12 +61,12 @@ repos: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.10.0 + rev: 23.10.1 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.1 + rev: v0.1.3 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.1 + rev: v0.1.3 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py index 88fc52dbe..09b6a6811 100644 --- a/graphblas/core/infix.py +++ b/graphblas/core/infix.py @@ -1,8 +1,9 @@ from .. import backend, binary from ..dtypes import BOOL +from ..exceptions import DimensionMismatch from ..monoid import land, lor from ..semiring import any_pair -from . import automethods, utils +from . import automethods, recorder, utils from .base import _expect_op, _expect_type from .expr import InfixExprBase from .mask import Mask @@ -402,43 +403,62 @@ def __init__(self, left, right, *, nrows, ncols): utils._output_types[MatrixMatMulExpr] = Matrix +def _dummy(obj, obj_type): + with recorder.skip_record: + return output_type(obj)(BOOL, *obj.shape, name="") + + +def _mismatched(left, right, method, op): + # Create dummy expression to raise on incompatible dimensions + getattr(_dummy(left) if isinstance(left, InfixExprBase) else left, method)( + _dummy(right) if isinstance(right, InfixExprBase) else right, op + ) + raise DimensionMismatch # pragma: no cover + + def _ewise_infix_expr(left, right, *, method, within): left_type = output_type(left) right_type = output_type(right) types = {Vector, Matrix, TransposedMatrix} if left_type in types and right_type in types: - # Create dummy expression to check compatibility of dimensions, etc. - expr = getattr(left, method)(right, binary.any) - if expr.output_type is Vector: - if method == "ewise_mult": - return VectorEwiseMultExpr(left, right) - return VectorEwiseAddExpr(left, right) + if left_type is Vector: + if right_type is Vector: + if left._size != right._size: + _mismatched(left, right, method, binary.first) + if method == "ewise_mult": + return VectorEwiseMultExpr(left, right) + return VectorEwiseAddExpr(left, right) + if left._size != right._nrows: + _mismatched(left, right, method, binary.first) + elif right_type is Vector: + if left._ncols != right._size: + _mismatched(left, right, method, binary.first) + elif left.shape != right.shape: + _mismatched(left, right, method, binary.first) if method == "ewise_mult": return MatrixEwiseMultExpr(left, right) return MatrixEwiseAddExpr(left, right) + if within == "__or__" and isinstance(right, Mask): return right.__ror__(left) if within == "__and__" and isinstance(right, Mask): return right.__rand__(left) if left_type in types: left._expect_type(right, tuple(types), within=within, argname="right") - elif right_type in types: + if right_type in types: right._expect_type(left, tuple(types), within=within, argname="left") - elif left_type is Scalar: - # Create dummy expression to check compatibility of dimensions, etc. - expr = getattr(left, method)(right, binary.any) + if left_type is Scalar: if method == "ewise_mult": return ScalarEwiseMultExpr(left, right) return ScalarEwiseAddExpr(left, right) - elif right_type is Scalar: - # Create dummy expression to check compatibility of dimensions, etc. - expr = getattr(right, method)(left, binary.any) + if right_type is Scalar: if method == "ewise_mult": return ScalarEwiseMultExpr(right, left) return ScalarEwiseAddExpr(right, left) - else: # pragma: no cover (sanity) - raise TypeError(f"Bad types for ewise infix: {type(left).__name__}, {type(right).__name__}") + raise TypeError( # pragma: no cover (sanity) + f"Bad types for ewise infix: {type(left).__name__}, {type(right).__name__}" + ) def _matmul_infix_expr(left, right, *, within): @@ -447,54 +467,51 @@ def _matmul_infix_expr(left, right, *, within): if left_type is Vector: if right_type is Matrix or right_type is TransposedMatrix: - method = "vxm" - elif right_type is Vector: - method = "inner" - else: - right = left._expect_type( - right, - (Matrix, TransposedMatrix), - within=within, - argname="right", - ) - elif left_type is Matrix or left_type is TransposedMatrix: + if left._size != right._nrows: + _mismatched(left, right, "vxm", any_pair[BOOL]) + return VectorMatMulExpr(left, right, method_name="vxm", size=right._ncols) if right_type is Vector: - method = "mxv" - elif right_type is Matrix or right_type is TransposedMatrix: - method = "mxm" - else: - right = left._expect_type( - right, - (Vector, Matrix, TransposedMatrix), - within=within, - argname="right", - ) - elif right_type is Vector: - left = right._expect_type( + if left._size != right._size: + _mismatched(left, right, "inner", any_pair[BOOL]) + return ScalarMatMulExpr(left, right) + left._expect_type( + right, + (Matrix, TransposedMatrix, Vector), + within=within, + argname="right", + ) + if left_type is Matrix or left_type is TransposedMatrix: + if right_type is Vector: + if left._ncols != right._size: + _mismatched(left, right, "mxv", any_pair[BOOL]) + return VectorMatMulExpr(left, right, method_name="mxv", size=left._nrows) + if right_type is Matrix or right_type is TransposedMatrix: + if left._ncols != right._nrows: + _mismatched(left, right, "mxm", any_pair[BOOL]) + return MatrixMatMulExpr(left, right, nrows=left._nrows, ncols=right._ncols) + left._expect_type( + right, + (Vector, Matrix, TransposedMatrix), + within=within, + argname="right", + ) + if right_type is Vector: + right._expect_type( left, (Matrix, TransposedMatrix), within=within, argname="left", ) - elif right_type is Matrix or right_type is TransposedMatrix: - left = right._expect_type( + if right_type is Matrix or right_type is TransposedMatrix: + right._expect_type( left, (Vector, Matrix, TransposedMatrix), within=within, argname="left", ) - else: # pragma: no cover (sanity) - raise TypeError( - f"Bad types for matmul infix: {type(left).__name__}, {type(right).__name__}" - ) - - # Create dummy expression to check compatibility of dimensions, etc. - expr = getattr(left, method)(right, any_pair[bool]) - if expr.output_type is Vector: - return VectorMatMulExpr(left, right, method_name=method, size=expr._size) - if expr.output_type is Matrix: - return MatrixMatMulExpr(left, right, nrows=expr._nrows, ncols=expr._ncols) - return ScalarMatMulExpr(left, right) + raise TypeError( # pragma: no cover (sanity) + f"Bad types for matmul infix: {type(left).__name__}, {type(right).__name__}" + ) # Import infixmethods, which has side effects diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index aed98f57d..5e1a76720 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -67,13 +67,13 @@ def _m_mult_v(updater, left, right, op): updater << left.mxm(right.diag(name="M_temp"), get_semiring(monoid.any, op)) -def _m_union_m(updater, left, right, left_default, right_default, op, dtype): +def _m_union_m(updater, left, right, left_default, right_default, op): mask = updater.kwargs.get("mask") opts = updater.opts - new_left = left.dup(dtype, clear=True) + new_left = left.dup(op.type, clear=True) new_left(mask=mask, **opts) << binary.second(right, left_default) new_left(mask=mask, **opts) << binary.first(left | new_left) - new_right = right.dup(dtype, clear=True) + new_right = right.dup(op.type2, clear=True) new_right(mask=mask, **opts) << binary.second(left, right_default) new_right(mask=mask, **opts) << binary.first(right | new_right) updater << op(new_left & new_right) @@ -2078,7 +2078,10 @@ def ewise_union(self, other, op, left_default, right_default): other = self._expect_type( other, (Matrix, TransposedMatrix, Vector), within=method_name, argname="other", op=op ) - dtype = self.dtype if self.dtype._is_udt else None + temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + + left_dtype = temp_op.type + dtype = left_dtype if left_dtype._is_udt else None if type(left_default) is not Scalar: try: left = Scalar.from_value( @@ -2095,6 +2098,8 @@ def ewise_union(self, other, op, left_default, right_default): ) else: left = _as_scalar(left_default, dtype, is_cscalar=False) # pragma: is_grbscalar + right_dtype = temp_op.type2 + dtype = right_dtype if right_dtype._is_udt else None if type(right_default) is not Scalar: try: right = Scalar.from_value( @@ -2111,12 +2116,19 @@ def ewise_union(self, other, op, left_default, right_default): ) else: right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar - scalar_dtype = unify(left.dtype, right.dtype) - nonscalar_dtype = unify(self.dtype, other.dtype) - op = get_typed_op(op, scalar_dtype, nonscalar_dtype, is_left_scalar=True, kind="binary") + + op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary") + op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary") + if op1 is not op2: + left_dtype = unify(op1.type, op2.type, is_right_scalar=True) + right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True) + op = get_typed_op(op, left_dtype, right_dtype, kind="binary") + else: + op = op1 self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") if op.opclass == "Monoid": op = op.binaryop + expr_repr = "{0.name}.{method_name}({2.name}, {op}, {1._expr_name}, {3._expr_name})" if other.ndim == 1: # Broadcast rowwise from the right @@ -2146,11 +2158,10 @@ def ewise_union(self, other, op, left_default, right_default): expr_repr=expr_repr, ) else: - dtype = unify(scalar_dtype, nonscalar_dtype, is_left_scalar=True) expr = MatrixExpression( method_name, None, - [self, left, other, right, _m_union_m, (self, other, left, right, op, dtype)], + [self, left, other, right, _m_union_m, (self, other, left, right, op)], expr_repr=expr_repr, nrows=self._nrows, ncols=self._ncols, diff --git a/graphblas/core/operator/monoid.py b/graphblas/core/operator/monoid.py index fc327b4a7..21d2b7cac 100644 --- a/graphblas/core/operator/monoid.py +++ b/graphblas/core/operator/monoid.py @@ -19,10 +19,9 @@ ) from ...exceptions import check_status_carg from .. import ffi, lib -from ..expr import InfixExprBase from ..utils import libget -from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _hasop -from .binary import BinaryOp, ParameterizedBinaryOp +from .base import OpBase, ParameterizedUdf, TypedOpBase, _hasop +from .binary import BinaryOp, ParameterizedBinaryOp, TypedBuiltinBinaryOp ffi_new = ffi.new @@ -36,25 +35,6 @@ def __init__(self, parent, name, type_, return_type, gb_obj, gb_name): super().__init__(parent, name, type_, return_type, gb_obj, gb_name) self._identity = None - def __call__(self, left, right=None, *, left_default=None, right_default=None): - if left_default is not None or right_default is not None: - if ( - left_default is None - or right_default is None - or right is not None - or not isinstance(left, InfixExprBase) - or left.method_name != "ewise_add" - ): - raise TypeError( - "Specifying `left_default` or `right_default` keyword arguments implies " - "performing `ewise_union` operation with infix notation.\n" - "There is only one valid way to do this:\n\n" - f">>> {self}(x | y, left_default=0, right_default=0)\n\nwhere x and y " - "are Vectors or Matrices, and left_default and right_default are scalars." - ) - return left.left.ewise_union(left.right, self, left_default, right_default) - return _call_op(self, left, right) - @property def identity(self): if self._identity is None: @@ -84,6 +64,8 @@ def is_idempotent(self): """True if ``monoid(x, x) == x`` for any x.""" return self.parent.is_idempotent + __call__ = TypedBuiltinBinaryOp.__call__ + class TypedUserMonoid(TypedOpBase): __slots__ = "binaryop", "identity" diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index 8a95e1d71..b822bd58a 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -30,12 +30,12 @@ def _scalar_index(name): return self -def _s_union_s(updater, left, right, left_default, right_default, op, dtype): +def _s_union_s(updater, left, right, left_default, right_default, op): opts = updater.opts - new_left = left.dup(dtype, clear=True) + new_left = left.dup(op.type, clear=True) new_left(**opts) << binary.second(right, left_default) new_left(**opts) << binary.first(left | new_left) - new_right = right.dup(dtype, clear=True) + new_right = right.dup(op.type2, clear=True) new_right(**opts) << binary.second(left, right_default) new_right(**opts) << binary.first(right | new_right) updater << op(new_left & new_right) @@ -742,7 +742,8 @@ def ewise_union(self, other, op, left_default, right_default): c << binary.div(a | b, left_default=1, right_default=1) """ method_name = "ewise_union" - dtype = self.dtype if self.dtype._is_udt else None + right_dtype = self.dtype + dtype = right_dtype if right_dtype._is_udt else None if type(other) is not Scalar: try: other = Scalar.from_value(other, dtype, is_cscalar=False, name="") @@ -755,6 +756,13 @@ def ewise_union(self, other, op, left_default, right_default): extra_message="Literal scalars also accepted.", op=op, ) + else: + other = _as_scalar(other, dtype, is_cscalar=False) # pragma: is_grbscalar + + temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + + left_dtype = temp_op.type + dtype = left_dtype if left_dtype._is_udt else None if type(left_default) is not Scalar: try: left = Scalar.from_value( @@ -771,6 +779,8 @@ def ewise_union(self, other, op, left_default, right_default): ) else: left = _as_scalar(left_default, dtype, is_cscalar=False) # pragma: is_grbscalar + right_dtype = temp_op.type2 + dtype = right_dtype if right_dtype._is_udt else None if type(right_default) is not Scalar: try: right = Scalar.from_value( @@ -787,9 +797,15 @@ def ewise_union(self, other, op, left_default, right_default): ) else: right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar - defaults_dtype = unify(left.dtype, right.dtype) - args_dtype = unify(self.dtype, other.dtype) - op = get_typed_op(op, defaults_dtype, args_dtype, kind="binary") + + op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary") + op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary") + if op1 is not op2: + left_dtype = unify(op1.type, op2.type, is_right_scalar=True) + right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True) + op = get_typed_op(op, left_dtype, right_dtype, kind="binary") + else: + op = op1 self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") if op.opclass == "Monoid": op = op.binaryop @@ -805,11 +821,10 @@ def ewise_union(self, other, op, left_default, right_default): scalar_as_vector=True, ) else: - dtype = unify(defaults_dtype, args_dtype) expr = ScalarExpression( method_name, None, - [self, left, other, right, _s_union_s, (self, other, left, right, op, dtype)], + [self, left, other, right, _s_union_s, (self, other, left, right, op)], op=op, expr_repr=expr_repr, is_cscalar=False, diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index cd5b992ba..9d19d80da 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -61,13 +61,13 @@ def _v_union_m(updater, left, right, left_default, right_default, op): updater << temp.ewise_union(right, op, left_default=left_default, right_default=right_default) -def _v_union_v(updater, left, right, left_default, right_default, op, dtype): +def _v_union_v(updater, left, right, left_default, right_default, op): mask = updater.kwargs.get("mask") opts = updater.opts - new_left = left.dup(dtype, clear=True) + new_left = left.dup(op.type, clear=True) new_left(mask=mask, **opts) << binary.second(right, left_default) new_left(mask=mask, **opts) << binary.first(left | new_left) - new_right = right.dup(dtype, clear=True) + new_right = right.dup(op.type2, clear=True) new_right(mask=mask, **opts) << binary.second(left, right_default) new_right(mask=mask, **opts) << binary.first(right | new_right) updater << op(new_left & new_right) @@ -1177,7 +1177,10 @@ def ewise_union(self, other, op, left_default, right_default): other = self._expect_type( other, (Vector, Matrix, TransposedMatrix), within=method_name, argname="other", op=op ) - dtype = self.dtype if self.dtype._is_udt else None + temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + + left_dtype = temp_op.type + dtype = left_dtype if left_dtype._is_udt else None if type(left_default) is not Scalar: try: left = Scalar.from_value( @@ -1194,6 +1197,8 @@ def ewise_union(self, other, op, left_default, right_default): ) else: left = _as_scalar(left_default, dtype, is_cscalar=False) # pragma: is_grbscalar + right_dtype = temp_op.type2 + dtype = right_dtype if right_dtype._is_udt else None if type(right_default) is not Scalar: try: right = Scalar.from_value( @@ -1210,12 +1215,19 @@ def ewise_union(self, other, op, left_default, right_default): ) else: right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar - scalar_dtype = unify(left.dtype, right.dtype) - nonscalar_dtype = unify(self.dtype, other.dtype) - op = get_typed_op(op, scalar_dtype, nonscalar_dtype, is_left_scalar=True, kind="binary") + + op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary") + op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary") + if op1 is not op2: + left_dtype = unify(op1.type, op2.type, is_right_scalar=True) + right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True) + op = get_typed_op(op, left_dtype, right_dtype, kind="binary") + else: + op = op1 self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") if op.opclass == "Monoid": op = op.binaryop + expr_repr = "{0.name}.{method_name}({2.name}, {op}, {1._expr_name}, {3._expr_name})" if other.ndim == 2: # Broadcast columnwise from the left @@ -1243,11 +1255,10 @@ def ewise_union(self, other, op, left_default, right_default): expr_repr=expr_repr, ) else: - dtype = unify(scalar_dtype, nonscalar_dtype, is_left_scalar=True) expr = VectorExpression( method_name, None, - [self, left, other, right, _v_union_v, (self, other, left, right, op, dtype)], + [self, left, other, right, _v_union_v, (self, other, left, right, op)], expr_repr=expr_repr, size=self._size, op=op, diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index e08f96b32..3f66e46ef 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -2827,7 +2827,10 @@ def test_auto(A, v): "__and__", "__or__", # "kronecker", + "__rand__", + "__ror__", ]: + # print(type(expr).__name__, method) val1 = getattr(expected, method)(expected).new() val2 = getattr(expected, method)(expr) val3 = getattr(expr, method)(expected) @@ -3138,6 +3141,10 @@ def test_ss_reshape(A): def test_autocompute_argument_messages(A, v): with pytest.raises(TypeError, match="autocompute"): A.ewise_mult(A & A) + with pytest.raises(TypeError, match="autocompute"): + A.ewise_mult(binary.plus(A & A)) + with pytest.raises(TypeError, match="autocompute"): + A.ewise_mult(A + A) with pytest.raises(TypeError, match="autocompute"): A.mxv(A @ v) diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index 2571f288b..b66bc96c9 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -1579,6 +1579,7 @@ def test_auto(v): "__rand__", "__ror__", ]: + # print(type(expr).__name__, method) val1 = getattr(expected, method)(expected).new() val2 = getattr(expected, method)(expr) val3 = getattr(expr, method)(expected) diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 7c09bc168..d197f2af2 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -6,9 +6,9 @@ conda search 'flake8-bugbear[channel=conda-forge]>=23.9.16' conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' conda search 'numpy[channel=conda-forge]>=1.26.0' -conda search 'pandas[channel=conda-forge]>=2.1.1' +conda search 'pandas[channel=conda-forge]>=2.1.2' conda search 'scipy[channel=conda-forge]>=1.11.3' -conda search 'networkx[channel=conda-forge]>=3.2' +conda search 'networkx[channel=conda-forge]>=3.2.1' conda search 'awkward[channel=conda-forge]>=2.4.6' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.4' From c6d1e3113f4712fd217b15deee2722d3ffc82dad Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 4 Nov 2023 11:39:36 -0500 Subject: [PATCH 43/66] Add `semiring(A @ B @ C)` that applies semiring to both matmuls (#501) * Add `semiring(A @ B @ C)` that applies semiring to both matmuls * Also support e.g. `binaryop(x | y | z)` and `monoid(x & y & z)` --- graphblas/core/base.py | 16 +- graphblas/core/infix.py | 72 +++++ graphblas/core/matrix.py | 192 ++++++++++--- graphblas/core/operator/__init__.py | 1 + graphblas/core/operator/base.py | 4 +- graphblas/core/operator/binary.py | 4 +- graphblas/core/operator/utils.py | 25 ++ graphblas/core/scalar.py | 48 ++++ graphblas/core/vector.py | 181 ++++++++++-- graphblas/tests/test_infix.py | 414 +++++++++++++++++++++++++++- graphblas/tests/test_matrix.py | 27 +- graphblas/tests/test_scalar.py | 4 +- graphblas/tests/test_vector.py | 33 ++- 13 files changed, 933 insertions(+), 88 deletions(-) diff --git a/graphblas/core/base.py b/graphblas/core/base.py index 42a4de9a1..5658e99c1 100644 --- a/graphblas/core/base.py +++ b/graphblas/core/base.py @@ -263,23 +263,31 @@ def __call__( ) def __or__(self, other): - from .infix import _ewise_infix_expr + from .infix import _ewise_infix_expr, _ewise_mult_expr_types + if isinstance(other, _ewise_mult_expr_types): + raise TypeError("XXX") return _ewise_infix_expr(self, other, method="ewise_add", within="__or__") def __ror__(self, other): - from .infix import _ewise_infix_expr + from .infix import _ewise_infix_expr, _ewise_mult_expr_types + if isinstance(other, _ewise_mult_expr_types): + raise TypeError("XXX") return _ewise_infix_expr(other, self, method="ewise_add", within="__ror__") def __and__(self, other): - from .infix import _ewise_infix_expr + from .infix import _ewise_add_expr_types, _ewise_infix_expr + if isinstance(other, _ewise_add_expr_types): + raise TypeError("XXX") return _ewise_infix_expr(self, other, method="ewise_mult", within="__and__") def __rand__(self, other): - from .infix import _ewise_infix_expr + from .infix import _ewise_add_expr_types, _ewise_infix_expr + if isinstance(other, _ewise_add_expr_types): + raise TypeError("XXX") return _ewise_infix_expr(other, self, method="ewise_mult", within="__rand__") def __matmul__(self, other): diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py index 09b6a6811..51714633c 100644 --- a/graphblas/core/infix.py +++ b/graphblas/core/infix.py @@ -126,6 +126,19 @@ class ScalarEwiseAddExpr(ScalarInfixExpr): _to_expr = _ewise_add_to_expr + # Allow e.g. `plus(x | y | z)` + __or__ = Scalar.__or__ + __ror__ = Scalar.__ror__ + _ewise_add = Scalar._ewise_add + _ewise_union = Scalar._ewise_union + + # Don't allow e.g. `plus(x | y & z)` + def __and__(self, other): + raise TypeError("XXX") + + def __rand__(self, other): + raise TypeError("XXX") + class ScalarEwiseMultExpr(ScalarInfixExpr): __slots__ = () @@ -135,6 +148,18 @@ class ScalarEwiseMultExpr(ScalarInfixExpr): _to_expr = _ewise_mult_to_expr + # Allow e.g. `plus(x & y & z)` + __and__ = Scalar.__and__ + __rand__ = Scalar.__rand__ + _ewise_mult = Scalar._ewise_mult + + # Don't allow e.g. `plus(x | y & z)` + def __or__(self, other): + raise TypeError("XXX") + + def __ror__(self, other): + raise TypeError("XXX") + class ScalarMatMulExpr(ScalarInfixExpr): __slots__ = () @@ -239,6 +264,15 @@ class VectorEwiseAddExpr(VectorInfixExpr): _to_expr = _ewise_add_to_expr + # Allow e.g. `plus(x | y | z)` + __or__ = Vector.__or__ + __ror__ = Vector.__ror__ + _ewise_add = Vector._ewise_add + _ewise_union = Vector._ewise_union + # Don't allow e.g. `plus(x | y & z)` + __and__ = ScalarEwiseAddExpr.__and__ # raises + __rand__ = ScalarEwiseAddExpr.__rand__ # raises + class VectorEwiseMultExpr(VectorInfixExpr): __slots__ = () @@ -248,6 +282,14 @@ class VectorEwiseMultExpr(VectorInfixExpr): _to_expr = _ewise_mult_to_expr + # Allow e.g. `plus(x & y & z)` + __and__ = Vector.__and__ + __rand__ = Vector.__rand__ + _ewise_mult = Vector._ewise_mult + # Don't allow e.g. `plus(x | y & z)` + __or__ = ScalarEwiseMultExpr.__or__ # raises + __ror__ = ScalarEwiseMultExpr.__ror__ # raises + class VectorMatMulExpr(VectorInfixExpr): __slots__ = "method_name" @@ -259,6 +301,11 @@ def __init__(self, left, right, *, method_name, size): self.method_name = method_name self._size = size + __matmul__ = Vector.__matmul__ + __rmatmul__ = Vector.__rmatmul__ + _inner = Vector._inner + _vxm = Vector._vxm + utils._output_types[VectorEwiseAddExpr] = Vector utils._output_types[VectorEwiseMultExpr] = Vector @@ -376,6 +423,15 @@ class MatrixEwiseAddExpr(MatrixInfixExpr): _to_expr = _ewise_add_to_expr + # Allow e.g. `plus(x | y | z)` + __or__ = Matrix.__or__ + __ror__ = Matrix.__ror__ + _ewise_add = Matrix._ewise_add + _ewise_union = Matrix._ewise_union + # Don't allow e.g. `plus(x | y & z)` + __and__ = VectorEwiseAddExpr.__and__ # raises + __rand__ = VectorEwiseAddExpr.__rand__ # raises + class MatrixEwiseMultExpr(MatrixInfixExpr): __slots__ = () @@ -385,6 +441,14 @@ class MatrixEwiseMultExpr(MatrixInfixExpr): _to_expr = _ewise_mult_to_expr + # Allow e.g. `plus(x & y & z)` + __and__ = Matrix.__and__ + __rand__ = Matrix.__rand__ + _ewise_mult = Matrix._ewise_mult + # Don't allow e.g. `plus(x | y & z)` + __or__ = VectorEwiseMultExpr.__or__ # raises + __ror__ = VectorEwiseMultExpr.__ror__ # raises + class MatrixMatMulExpr(MatrixInfixExpr): __slots__ = () @@ -397,6 +461,11 @@ def __init__(self, left, right, *, nrows, ncols): self._nrows = nrows self._ncols = ncols + __matmul__ = Matrix.__matmul__ + __rmatmul__ = Matrix.__rmatmul__ + _mxm = Matrix._mxm + _mxv = Matrix._mxv + utils._output_types[MatrixEwiseAddExpr] = Matrix utils._output_types[MatrixEwiseMultExpr] = Matrix @@ -514,5 +583,8 @@ def _matmul_infix_expr(left, right, *, within): ) +_ewise_add_expr_types = (MatrixEwiseAddExpr, VectorEwiseAddExpr, ScalarEwiseAddExpr) +_ewise_mult_expr_types = (MatrixEwiseMultExpr, VectorEwiseMultExpr, ScalarEwiseMultExpr) + # Import infixmethods, which has side effects from . import infixmethods # noqa: E402, F401 isort:skip diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 5e1a76720..34789d68d 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -10,9 +10,16 @@ from . import _supports_udfs, automethods, ffi, lib, utils from .base import BaseExpression, BaseType, _check_mask, call from .descriptor import lookup as descriptor_lookup -from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, Updater +from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, InfixExprBase, Updater from .mask import Mask, StructuralMask, ValueMask -from .operator import UNKNOWN_OPCLASS, find_opclass, get_semiring, get_typed_op, op_from_string +from .operator import ( + UNKNOWN_OPCLASS, + _get_typed_op_from_exprs, + find_opclass, + get_semiring, + get_typed_op, + op_from_string, +) from .scalar import ( _COMPLETE, _MATERIALIZE, @@ -1938,17 +1945,39 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax C << monoid.max(A | B) """ + return self._ewise_add(other, op) + + def _ewise_add(self, other, op=monoid.plus, is_infix=False): method_name = "ewise_add" - other = self._expect_type( - other, - (Matrix, TransposedMatrix, Vector), - within=method_name, - argname="other", - op=op, - ) - op = get_typed_op(op, self.dtype, other.dtype, kind="binary") - # Per the spec, op may be a semiring, but this is weird, so don't. - self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if is_infix: + from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr + + other = self._expect_type( + other, + (Matrix, TransposedMatrix, Vector, MatrixEwiseAddExpr, VectorEwiseAddExpr), + within=method_name, + argname="other", + op=op, + ) + op = _get_typed_op_from_exprs(op, self, other, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if isinstance(self, MatrixEwiseAddExpr): + self = op(self).new() + if isinstance(other, InfixExprBase): + other = op(other).new() + else: + other = self._expect_type( + other, + (Matrix, TransposedMatrix, Vector), + within=method_name, + argname="other", + op=op, + ) + op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if other.ndim == 1: # Broadcast rowwise from the right if self._ncols != other._size: @@ -2006,13 +2035,39 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax C << binary.gt(A & B) """ + return self._ewise_mult(other, op) + + def _ewise_mult(self, other, op=binary.times, is_infix=False): method_name = "ewise_mult" - other = self._expect_type( - other, (Matrix, TransposedMatrix, Vector), within=method_name, argname="other", op=op - ) - op = get_typed_op(op, self.dtype, other.dtype, kind="binary") - # Per the spec, op may be a semiring, but this is weird, so don't. - self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if is_infix: + from .infix import MatrixEwiseMultExpr, VectorEwiseMultExpr + + other = self._expect_type( + other, + (Matrix, TransposedMatrix, Vector, MatrixEwiseMultExpr, VectorEwiseMultExpr), + within=method_name, + argname="other", + op=op, + ) + op = _get_typed_op_from_exprs(op, self, other, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if isinstance(self, MatrixEwiseMultExpr): + self = op(self).new() + if isinstance(other, InfixExprBase): + other = op(other).new() + else: + other = self._expect_type( + other, + (Matrix, TransposedMatrix, Vector), + within=method_name, + argname="other", + op=op, + ) + op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if other.ndim == 1: # Broadcast rowwise from the right if self._ncols != other._size: @@ -2074,11 +2129,30 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax C << binary.div(A | B, left_default=1, right_default=1) """ + return self._ewise_union(other, op, left_default, right_default) + + def _ewise_union(self, other, op, left_default, right_default, is_infix=False): method_name = "ewise_union" - other = self._expect_type( - other, (Matrix, TransposedMatrix, Vector), within=method_name, argname="other", op=op - ) - temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + if is_infix: + from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr + + other = self._expect_type( + other, + (Matrix, TransposedMatrix, Vector, MatrixEwiseAddExpr, VectorEwiseAddExpr), + within=method_name, + argname="other", + op=op, + ) + temp_op = _get_typed_op_from_exprs(op, self, other, kind="binary") + else: + other = self._expect_type( + other, + (Matrix, TransposedMatrix, Vector), + within=method_name, + argname="other", + op=op, + ) + temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary") left_dtype = temp_op.type dtype = left_dtype if left_dtype._is_udt else None @@ -2117,8 +2191,12 @@ def ewise_union(self, other, op, left_default, right_default): else: right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar - op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary") - op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary") + if is_infix: + op1 = _get_typed_op_from_exprs(op, self, right, kind="binary") + op2 = _get_typed_op_from_exprs(op, left, other, kind="binary") + else: + op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary") + op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary") if op1 is not op2: left_dtype = unify(op1.type, op2.type, is_right_scalar=True) right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True) @@ -2129,6 +2207,12 @@ def ewise_union(self, other, op, left_default, right_default): if op.opclass == "Monoid": op = op.binaryop + if is_infix: + if isinstance(self, MatrixEwiseAddExpr): + self = op(self, left_default=left, right_default=right).new() + if isinstance(other, InfixExprBase): + other = op(other, left_default=left, right_default=right).new() + expr_repr = "{0.name}.{method_name}({2.name}, {op}, {1._expr_name}, {3._expr_name})" if other.ndim == 1: # Broadcast rowwise from the right @@ -2198,10 +2282,27 @@ def mxv(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(A @ v) """ + return self._mxv(other, op) + + def _mxv(self, other, op=semiring.plus_times, is_infix=False): method_name = "mxv" - other = self._expect_type(other, Vector, within=method_name, argname="other", op=op) - op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") - self._expect_op(op, "Semiring", within=method_name, argname="op") + if is_infix: + from .infix import MatrixMatMulExpr, VectorMatMulExpr + + other = self._expect_type( + other, (Vector, VectorMatMulExpr), within=method_name, argname="other", op=op + ) + op = _get_typed_op_from_exprs(op, self, other, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + if isinstance(self, MatrixMatMulExpr): + self = op(self).new() + if isinstance(other, VectorMatMulExpr): + other = op(other).new() + else: + other = self._expect_type(other, Vector, within=method_name, argname="other", op=op) + op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + expr = VectorExpression( method_name, "GrB_mxv", @@ -2241,12 +2342,33 @@ def mxm(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(A @ B) """ + return self._mxm(other, op) + + def _mxm(self, other, op=semiring.plus_times, is_infix=False): method_name = "mxm" - other = self._expect_type( - other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op - ) - op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") - self._expect_op(op, "Semiring", within=method_name, argname="op") + if is_infix: + from .infix import MatrixMatMulExpr + + other = self._expect_type( + other, + (Matrix, TransposedMatrix, MatrixMatMulExpr), + within=method_name, + argname="other", + op=op, + ) + op = _get_typed_op_from_exprs(op, self, other, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + if isinstance(self, MatrixMatMulExpr): + self = op(self).new() + if isinstance(other, MatrixMatMulExpr): + other = op(other).new() + else: + other = self._expect_type( + other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op + ) + op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + expr = MatrixExpression( method_name, "GrB_mxm", @@ -3862,6 +3984,12 @@ def to_dicts(self, order="rowwise"): reposition = Matrix.reposition power = Matrix.power + _ewise_add = Matrix._ewise_add + _ewise_mult = Matrix._ewise_mult + _ewise_union = Matrix._ewise_union + _mxv = Matrix._mxv + _mxm = Matrix._mxm + # Operator sugar __or__ = Matrix.__or__ __ror__ = Matrix.__ror__ diff --git a/graphblas/core/operator/__init__.py b/graphblas/core/operator/__init__.py index 509e84a04..d59c835b3 100644 --- a/graphblas/core/operator/__init__.py +++ b/graphblas/core/operator/__init__.py @@ -6,6 +6,7 @@ from .semiring import ParameterizedSemiring, Semiring from .unary import ParameterizedUnaryOp, UnaryOp from .utils import ( + _get_typed_op_from_exprs, aggregator_from_string, binary_from_string, get_semiring, diff --git a/graphblas/core/operator/base.py b/graphblas/core/operator/base.py index d66aa2f4a..59482b47d 100644 --- a/graphblas/core/operator/base.py +++ b/graphblas/core/operator/base.py @@ -111,7 +111,9 @@ def _call_op(op, left, right=None, thunk=None, **kwargs): if right is None and thunk is None: if isinstance(left, InfixExprBase): # op(A & B), op(A | B), op(A @ B) - return getattr(left.left, left.method_name)(left.right, op, **kwargs) + return getattr(left.left, f"_{left.method_name}")( + left.right, op, is_infix=True, **kwargs + ) if find_opclass(op)[1] == "Semiring": raise TypeError( f"Bad type when calling {op!r}. Got type: {type(left)}.\n" diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 676ed0970..278ee3183 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -94,7 +94,9 @@ def __call__(self, left, right=None, *, left_default=None, right_default=None): f">>> {self}(x | y, left_default=0, right_default=0)\n\nwhere x and y " "are Vectors or Matrices, and left_default and right_default are scalars." ) - return left.left.ewise_union(left.right, self, left_default, right_default) + return left.left._ewise_union( + left.right, self, left_default, right_default, is_infix=True + ) return _call_op(self, left, right) @property diff --git a/graphblas/core/operator/utils.py b/graphblas/core/operator/utils.py index 00df31db8..cd0b82d3c 100644 --- a/graphblas/core/operator/utils.py +++ b/graphblas/core/operator/utils.py @@ -2,6 +2,7 @@ from ... import backend, binary, config, indexunary, monoid, op, select, semiring, unary from ...dtypes import UINT64, lookup_dtype, unify +from ..expr import InfixExprBase from .base import ( _SS_OPERATORS, OpBase, @@ -132,6 +133,30 @@ def get_typed_op(op, dtype, dtype2=None, *, is_left_scalar=False, is_right_scala raise TypeError(f"Unable to get typed operator from object with type {type(op)}") +def _get_typed_op_from_exprs(op, left, right, *, kind=None): + if isinstance(left, InfixExprBase): + left_op = _get_typed_op_from_exprs(op, left.left, left.right, kind=kind) + left_dtype = left_op.type + else: + left_op = None + left_dtype = left.dtype + if isinstance(right, InfixExprBase): + right_op = _get_typed_op_from_exprs(op, right.left, right.right, kind=kind) + if right_op is left_op: + return right_op + right_dtype = right_op.type2 + else: + right_dtype = right.dtype + return get_typed_op( + op, + left_dtype, + right_dtype, + is_left_scalar=left._is_scalar, + is_right_scalar=right._is_scalar, + kind=kind, + ) + + def get_semiring(monoid, binaryop, name=None): """Get or create a Semiring object from a monoid and binaryop. diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index b822bd58a..9cdf3043e 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -629,7 +629,23 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax c << monoid.max(a | b) """ + return self._ewise_add(other, op) + + def _ewise_add(self, other, op=monoid.plus, is_infix=False): method_name = "ewise_add" + if is_infix: + from .infix import ScalarEwiseAddExpr + + # This is a little different than how we handle ewise_add for Vector and + # Matrix where we are super-careful to handle dtypes well to support UDTs. + # For Scalar, we're going to let dtypes in expressions resolve themselves. + # Scalars are more challenging, because they may be literal scalars. + # Also, we have not yet resolved `op` here, so errors may be different. + if isinstance(self, ScalarEwiseAddExpr): + self = op(self).new() + if isinstance(other, ScalarEwiseAddExpr): + other = op(other).new() + if type(other) is not Scalar: dtype = self.dtype if self.dtype._is_udt else None try: @@ -683,7 +699,23 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax c << binary.gt(a & b) """ + return self._ewise_mult(other, op) + + def _ewise_mult(self, other, op=binary.times, is_infix=False): method_name = "ewise_mult" + if is_infix: + from .infix import ScalarEwiseMultExpr + + # This is a little different than how we handle ewise_mult for Vector and + # Matrix where we are super-careful to handle dtypes well to support UDTs. + # For Scalar, we're going to let dtypes in expressions resolve themselves. + # Scalars are more challenging, because they may be literal scalars. + # Also, we have not yet resolved `op` here, so errors may be different. + if isinstance(self, ScalarEwiseMultExpr): + self = op(self).new() + if isinstance(other, ScalarEwiseMultExpr): + other = op(other).new() + if type(other) is not Scalar: dtype = self.dtype if self.dtype._is_udt else None try: @@ -741,7 +773,23 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax c << binary.div(a | b, left_default=1, right_default=1) """ + return self._ewise_union(other, op, left_default, right_default) + + def _ewise_union(self, other, op, left_default, right_default, is_infix=False): method_name = "ewise_union" + if is_infix: + from .infix import ScalarEwiseAddExpr + + # This is a little different than how we handle ewise_union for Vector and + # Matrix where we are super-careful to handle dtypes well to support UDTs. + # For Scalar, we're going to let dtypes in expressions resolve themselves. + # Scalars are more challenging, because they may be literal scalars. + # Also, we have not yet resolved `op` here, so errors may be different. + if isinstance(self, ScalarEwiseAddExpr): + self = op(self, left_default=left_default, right_default=right_default).new() + if isinstance(other, ScalarEwiseAddExpr): + other = op(other, left_default=left_default, right_default=right_default).new() + right_dtype = self.dtype dtype = right_dtype if right_dtype._is_udt else None if type(other) is not Scalar: diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index 9d19d80da..feb95ed02 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -9,9 +9,16 @@ from . import _supports_udfs, automethods, ffi, lib, utils from .base import BaseExpression, BaseType, _check_mask, call from .descriptor import lookup as descriptor_lookup -from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, Updater +from .expr import _ALL_INDICES, AmbiguousAssignOrExtract, IndexerResolver, InfixExprBase, Updater from .mask import Mask, StructuralMask, ValueMask -from .operator import UNKNOWN_OPCLASS, find_opclass, get_semiring, get_typed_op, op_from_string +from .operator import ( + UNKNOWN_OPCLASS, + _get_typed_op_from_exprs, + find_opclass, + get_semiring, + get_typed_op, + op_from_string, +) from .scalar import ( _COMPLETE, _MATERIALIZE, @@ -1038,15 +1045,41 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax w << monoid.max(u | v) """ + return self._ewise_add(other, op) + + def _ewise_add(self, other, op=monoid.plus, is_infix=False): from .matrix import Matrix, MatrixExpression, TransposedMatrix method_name = "ewise_add" - other = self._expect_type( - other, (Vector, Matrix, TransposedMatrix), within=method_name, argname="other", op=op - ) - op = get_typed_op(op, self.dtype, other.dtype, kind="binary") - # Per the spec, op may be a semiring, but this is weird, so don't. - self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if is_infix: + from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr + + other = self._expect_type( + other, + (Vector, Matrix, TransposedMatrix, MatrixEwiseAddExpr, VectorEwiseAddExpr), + within=method_name, + argname="other", + op=op, + ) + op = _get_typed_op_from_exprs(op, self, other, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if isinstance(self, VectorEwiseAddExpr): + self = op(self).new() + if isinstance(other, InfixExprBase): + other = op(other).new() + else: + other = self._expect_type( + other, + (Vector, Matrix, TransposedMatrix), + within=method_name, + argname="other", + op=op, + ) + op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if other.ndim == 2: # Broadcast columnwise from the left if other._nrows != self._size: @@ -1103,15 +1136,40 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax w << binary.gt(u & v) """ + return self._ewise_mult(other, op) + + def _ewise_mult(self, other, op=binary.times, is_infix=False): from .matrix import Matrix, MatrixExpression, TransposedMatrix method_name = "ewise_mult" - other = self._expect_type( - other, (Vector, Matrix, TransposedMatrix), within=method_name, argname="other", op=op - ) - op = get_typed_op(op, self.dtype, other.dtype, kind="binary") - # Per the spec, op may be a semiring, but this is weird, so don't. - self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if is_infix: + from .infix import MatrixEwiseMultExpr, VectorEwiseMultExpr + + other = self._expect_type( + other, + (Vector, Matrix, TransposedMatrix, MatrixEwiseMultExpr, VectorEwiseMultExpr), + within=method_name, + argname="other", + op=op, + ) + op = _get_typed_op_from_exprs(op, self, other, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") + if isinstance(self, VectorEwiseMultExpr): + self = op(self).new() + if isinstance(other, InfixExprBase): + other = op(other).new() + else: + other = self._expect_type( + other, + (Vector, Matrix, TransposedMatrix), + within=method_name, + argname="other", + op=op, + ) + op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + # Per the spec, op may be a semiring, but this is weird, so don't. + self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op") if other.ndim == 2: # Broadcast columnwise from the left if other._nrows != self._size: @@ -1171,13 +1229,32 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax w << binary.div(u | v, left_default=1, right_default=1) """ + return self._ewise_union(other, op, left_default, right_default) + + def _ewise_union(self, other, op, left_default, right_default, is_infix=False): from .matrix import Matrix, MatrixExpression, TransposedMatrix method_name = "ewise_union" - other = self._expect_type( - other, (Vector, Matrix, TransposedMatrix), within=method_name, argname="other", op=op - ) - temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary") + if is_infix: + from .infix import MatrixEwiseAddExpr, VectorEwiseAddExpr + + other = self._expect_type( + other, + (Vector, Matrix, TransposedMatrix, MatrixEwiseAddExpr, VectorEwiseAddExpr), + within=method_name, + argname="other", + op=op, + ) + temp_op = _get_typed_op_from_exprs(op, self, other, kind="binary") + else: + other = self._expect_type( + other, + (Vector, Matrix, TransposedMatrix), + within=method_name, + argname="other", + op=op, + ) + temp_op = get_typed_op(op, self.dtype, other.dtype, kind="binary") left_dtype = temp_op.type dtype = left_dtype if left_dtype._is_udt else None @@ -1216,8 +1293,12 @@ def ewise_union(self, other, op, left_default, right_default): else: right = _as_scalar(right_default, dtype, is_cscalar=False) # pragma: is_grbscalar - op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary") - op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary") + if is_infix: + op1 = _get_typed_op_from_exprs(op, self, right, kind="binary") + op2 = _get_typed_op_from_exprs(op, left, other, kind="binary") + else: + op1 = get_typed_op(op, self.dtype, right.dtype, kind="binary") + op2 = get_typed_op(op, left.dtype, other.dtype, kind="binary") if op1 is not op2: left_dtype = unify(op1.type, op2.type, is_right_scalar=True) right_dtype = unify(op1.type2, op2.type2, is_left_scalar=True) @@ -1228,6 +1309,12 @@ def ewise_union(self, other, op, left_default, right_default): if op.opclass == "Monoid": op = op.binaryop + if is_infix: + if isinstance(self, VectorEwiseAddExpr): + self = op(self, left_default=left, right_default=right).new() + if isinstance(other, InfixExprBase): + other = op(other, left_default=left, right_default=right).new() + expr_repr = "{0.name}.{method_name}({2.name}, {op}, {1._expr_name}, {3._expr_name})" if other.ndim == 2: # Broadcast columnwise from the left @@ -1296,14 +1383,35 @@ def vxm(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(v @ A) """ + return self._vxm(other, op) + + def _vxm(self, other, op=semiring.plus_times, is_infix=False): from .matrix import Matrix, TransposedMatrix method_name = "vxm" - other = self._expect_type( - other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op - ) - op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") - self._expect_op(op, "Semiring", within=method_name, argname="op") + if is_infix: + from .infix import MatrixMatMulExpr, VectorMatMulExpr + + other = self._expect_type( + other, + (Matrix, TransposedMatrix, MatrixMatMulExpr), + within=method_name, + argname="other", + op=op, + ) + op = _get_typed_op_from_exprs(op, self, other, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + if isinstance(self, VectorMatMulExpr): + self = op(self).new() + if isinstance(other, MatrixMatMulExpr): + other = op(other).new() + else: + other = self._expect_type( + other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op + ) + op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + expr = VectorExpression( method_name, "GrB_vxm", @@ -1645,10 +1753,27 @@ def inner(self, other, op=semiring.plus_times): `Matrix Multiplication <../user_guide/operations.html#matrix-multiply>`__ family of functions. """ + return self._inner(other, op) + + def _inner(self, other, op=semiring.plus_times, is_infix=False): method_name = "inner" - other = self._expect_type(other, Vector, within=method_name, argname="other", op=op) - op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") - self._expect_op(op, "Semiring", within=method_name, argname="op") + if is_infix: + from .infix import VectorMatMulExpr + + other = self._expect_type( + other, (Vector, VectorMatMulExpr), within=method_name, argname="other", op=op + ) + op = _get_typed_op_from_exprs(op, self, other, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + if isinstance(self, VectorMatMulExpr): + self = op(self).new() + if isinstance(other, VectorMatMulExpr): + other = op(other).new() + else: + other = self._expect_type(other, Vector, within=method_name, argname="other", op=op) + op = get_typed_op(op, self.dtype, other.dtype, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + expr = ScalarExpression( method_name, "GrB_vxm", diff --git a/graphblas/tests/test_infix.py b/graphblas/tests/test_infix.py index 72e1c8a42..e688086b9 100644 --- a/graphblas/tests/test_infix.py +++ b/graphblas/tests/test_infix.py @@ -1,6 +1,6 @@ import pytest -from graphblas import monoid, op +from graphblas import binary, monoid, op from graphblas.exceptions import DimensionMismatch from .conftest import autocompute @@ -367,3 +367,415 @@ def test_infix_expr_value_types(): expr._value = None assert expr._value is None assert expr._expr._value is None + + +def test_multi_infix_vector(): + D0 = Vector.from_scalar(0, 3).diag() + v1 = Vector.from_coo([0, 1], [1, 2], size=3) # 1 2 . + v2 = Vector.from_coo([1, 2], [1, 2], size=3) # . 1 2 + v3 = Vector.from_coo([2, 0], [1, 2], size=3) # 2 . 1 + # ewise_add + result = binary.plus((v1 | v2) | v3).new() + expected = Vector.from_scalar(3, size=3) + assert result.isequal(expected) + result = binary.plus(v1 | (v2 | v3)).new() + assert result.isequal(expected) + result = monoid.min(v1 | v2 | v3).new() + expected = Vector.from_scalar(1, size=3) + assert result.isequal(expected) + # ewise_mult + result = monoid.max((v1 & v2) & v3).new() + expected = Vector(int, size=3) + assert result.isequal(expected) + result = monoid.max(v1 & (v2 & v3)).new() + assert result.isequal(expected) + result = monoid.min((v1 & v2) & v1).new() + expected = Vector.from_coo([1], [1], size=3) + assert result.isequal(expected) + # ewise_union + result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10).new() + expected = Vector.from_scalar(13, size=3) + assert result.isequal(expected) + result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10.0).new() + expected = Vector.from_scalar(13.0, size=3) + assert result.isequal(expected) + result = binary.plus(v1 | (v2 | v3), left_default=10, right_default=10).new() + assert result.isequal(expected) + # inner + assert op.plus_plus(v1 @ v1).new().value == 6 + assert op.plus_plus(v1 @ (v1 @ D0)).new().value == 6 + assert op.plus_plus((D0 @ v1) @ v1).new().value == 6 + # matrix-vector ewise_add + result = binary.plus((D0 | v1) | v2).new() + expected = binary.plus(binary.plus(D0 | v1).new() | v2).new() + assert result.isequal(expected) + result = binary.plus(D0 | (v1 | v2)).new() + assert result.isequal(expected) + result = binary.plus((v1 | v2) | D0).new() + assert result.isequal(expected.T) + result = binary.plus(v1 | (v2 | D0)).new() + assert result.isequal(expected.T) + # matrix-vector ewise_mult + result = binary.plus((D0 & v1) & v2).new() + expected = binary.plus(binary.plus(D0 & v1).new() & v2).new() + assert result.isequal(expected) + assert result.nvals > 0 + result = binary.plus(D0 & (v1 & v2)).new() + assert result.isequal(expected) + result = binary.plus((v1 & v2) & D0).new() + assert result.isequal(expected.T) + result = binary.plus(v1 & (v2 & D0)).new() + assert result.isequal(expected.T) + # matrix-vector ewise_union + kwargs = {"left_default": 10, "right_default": 20} + result = binary.plus((D0 | v1) | v2, **kwargs).new() + expected = binary.plus(binary.plus(D0 | v1, **kwargs).new() | v2, **kwargs).new() + assert result.isequal(expected) + result = binary.plus(D0 | (v1 | v2), **kwargs).new() + expected = binary.plus(D0 | binary.plus(v1 | v2, **kwargs).new(), **kwargs).new() + assert result.isequal(expected) + result = binary.plus((v1 | v2) | D0, **kwargs).new() + expected = binary.plus(binary.plus(v1 | v2, **kwargs).new() | D0, **kwargs).new() + assert result.isequal(expected) + result = binary.plus(v1 | (v2 | D0), **kwargs).new() + expected = binary.plus(v1 | binary.plus(v2 | D0, **kwargs).new(), **kwargs).new() + assert result.isequal(expected) + # vxm, mxv + result = op.plus_plus((D0 @ v1) @ D0).new() + assert result.isequal(v1) + result = op.plus_plus(D0 @ (v1 @ D0)).new() + assert result.isequal(v1) + result = op.plus_plus(v1 @ (D0 @ D0)).new() + assert result.isequal(v1) + result = op.plus_plus((D0 @ D0) @ v1).new() + assert result.isequal(v1) + result = op.plus_plus((v1 @ D0) @ D0).new() + assert result.isequal(v1) + result = op.plus_plus(D0 @ (D0 @ v1)).new() + assert result.isequal(v1) + + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | v3 + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2).__ror__(v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | (v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1 | (v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1.__ror__(v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) | (v2 & v3) + + with pytest.raises(TypeError, match="XXX"): # TODO + v1 & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1.__rand__(v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & v3 + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2).__rand__(v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & (v2 & v3) + + # We differentiate between infix and methods + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_add(v2 & v3) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 & v2).ewise_add(v3) + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_mult(v2 | v3) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 | v2).ewise_mult(v3) + + +@autocompute +def test_multi_infix_vector_auto(): + v1 = Vector.from_coo([0, 1], [1, 2], size=3) # 1 2 . + v2 = Vector.from_coo([1, 2], [1, 2], size=3) # . 1 2 + v3 = Vector.from_coo([2, 0], [1, 2], size=3) # 2 . 1 + # We differentiate between infix and methods + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_add(v2 & v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 & v2).ewise_add(v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_mult(v2 | v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 | v2).ewise_mult(v3) + + +def test_multi_infix_matrix(): + # Adapted from test_multi_infix_vector + D0 = Vector.from_scalar(0, 3).diag() + v1 = Matrix.from_coo([0, 1], [0, 0], [1, 2], nrows=3) # 1 2 . + v2 = Matrix.from_coo([1, 2], [0, 0], [1, 2], nrows=3) # . 1 2 + v3 = Matrix.from_coo([2, 0], [0, 0], [1, 2], nrows=3) # 2 . 1 + # ewise_add + result = binary.plus((v1 | v2) | v3).new() + expected = Matrix.from_scalar(3, 3, 1) + assert result.isequal(expected) + result = binary.plus(v1 | (v2 | v3)).new() + assert result.isequal(expected) + result = monoid.min(v1 | v2 | v3).new() + expected = Matrix.from_scalar(1, 3, 1) + assert result.isequal(expected) + result = binary.plus(v1 | v1 | v1 | v1 | v1).new() + expected = (5 * v1).new() + assert result.isequal(expected) + # ewise_mult + result = monoid.max((v1 & v2) & v3).new() + expected = Matrix(int, 3, 1) + assert result.isequal(expected) + result = monoid.max(v1 & (v2 & v3)).new() + assert result.isequal(expected) + result = monoid.min((v1 & v2) & v1).new() + expected = Matrix.from_coo([1], [0], [1], nrows=3) + assert result.isequal(expected) + result = binary.plus(v1 & v1 & v1 & v1 & v1).new() + expected = (5 * v1).new() + assert result.isequal(expected) + # ewise_union + result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10).new() + expected = Matrix.from_scalar(13, 3, 1) + assert result.isequal(expected) + result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10.0).new() + expected = Matrix.from_scalar(13.0, 3, 1) + assert result.isequal(expected) + result = binary.plus(v1 | (v2 | v3), left_default=10, right_default=10).new() + assert result.isequal(expected) + # mxm + assert op.plus_plus(v1.T @ v1).new()[0, 0].new().value == 6 + assert op.plus_plus(v1 @ (v1.T @ D0)).new()[0, 0].new().value == 2 + assert op.plus_plus((v1.T @ D0) @ v1).new()[0, 0].new().value == 6 + assert op.plus_plus(D0 @ D0 @ D0 @ D0 @ D0).new().isequal(D0) + + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | v3 + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2).__ror__(v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | (v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1 | (v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1.__ror__(v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) | (v2 & v3) + + with pytest.raises(TypeError, match="XXX"): # TODO + v1 & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1.__rand__(v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & v3 + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2).__rand__(v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & (v2 & v3) + + # We differentiate between infix and methods + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_add(v2 & v3) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 & v2).ewise_add(v3) + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_mult(v2 | v3) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 | v2).ewise_mult(v3) + + +@autocompute +def test_multi_infix_matrix_auto(): + v1 = Matrix.from_coo([0, 1], [0, 0], [1, 2], nrows=3) # 1 2 . + v2 = Matrix.from_coo([1, 2], [0, 0], [1, 2], nrows=3) # . 1 2 + v3 = Matrix.from_coo([2, 0], [0, 0], [1, 2], nrows=3) # 2 . 1 + # We differentiate between infix and methods + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_add(v2 & v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 & v2).ewise_add(v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_mult(v2 | v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 | v2).ewise_mult(v3) + + +def test_multi_infix_scalar(): + # Adapted from test_multi_infix_vector + v1 = Scalar.from_value(1) + v2 = Scalar.from_value(2) + v3 = Scalar(int) + # ewise_add + result = binary.plus((v1 | v2) | v3).new() + expected = 3 + assert result.isequal(expected) + result = binary.plus((1 | v2) | v3).new() + assert result.isequal(expected) + result = binary.plus((1 | v2) | 0).new() + assert result.isequal(expected) + result = binary.plus((v1 | 2) | v3).new() + assert result.isequal(expected) + result = binary.plus((v1 | 2) | 0).new() + assert result.isequal(expected) + result = binary.plus((v1 | v2) | 0).new() + assert result.isequal(expected) + + result = binary.plus(v1 | (v2 | v3)).new() + assert result.isequal(expected) + result = binary.plus(1 | (v2 | v3)).new() + assert result.isequal(expected) + result = binary.plus(1 | (2 | v3)).new() + assert result.isequal(expected) + result = binary.plus(1 | (v2 | 0)).new() + assert result.isequal(expected) + result = binary.plus(v1 | (2 | v3)).new() + assert result.isequal(expected) + result = binary.plus(v1 | (v2 | 0)).new() + assert result.isequal(expected) + + result = monoid.min(v1 | v2 | v3).new() + expected = 1 + assert result.isequal(expected) + # ewise_mult + result = monoid.max((v1 & v2) & v3).new() + expected = None + assert result.isequal(expected) + result = monoid.max(v1 & (v2 & v3)).new() + assert result.isequal(expected) + result = monoid.min((v1 & v2) & v1).new() + expected = 1 + assert result.isequal(expected) + + result = monoid.min((1 & v2) & v1).new() + assert result.isequal(expected) + result = monoid.min((1 & v2) & 1).new() + assert result.isequal(expected) + result = monoid.min((v1 & 2) & v1).new() + assert result.isequal(expected) + result = monoid.min((v1 & 2) & 1).new() + assert result.isequal(expected) + result = monoid.min((v1 & v2) & 1).new() + assert result.isequal(expected) + + result = monoid.min(1 & (v2 & v1)).new() + assert result.isequal(expected) + result = monoid.min(1 & (2 & v1)).new() + assert result.isequal(expected) + result = monoid.min(1 & (v2 & 1)).new() + assert result.isequal(expected) + result = monoid.min(v1 & (2 & v1)).new() + assert result.isequal(expected) + result = monoid.min(v1 & (v2 & 1)).new() + assert result.isequal(expected) + + # ewise_union + result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10).new() + expected = 13 + assert result.isequal(expected) + result = binary.plus((1 | v2) | v3, left_default=10, right_default=10).new() + assert result.isequal(expected) + result = binary.plus((v1 | 2) | v3, left_default=10, right_default=10).new() + assert result.isequal(expected) + result = binary.plus((v1 | v2) | v3, left_default=10, right_default=10.0).new() + assert result.isequal(expected) + result = binary.plus(v1 | (v2 | v3), left_default=10, right_default=10).new() + assert result.isequal(expected) + result = binary.plus(1 | (v2 | v3), left_default=10, right_default=10).new() + assert result.isequal(expected) + result = binary.plus(1 | (2 | v3), left_default=10, right_default=10).new() + assert result.isequal(expected) + result = binary.plus(v1 | (2 | v3), left_default=10, right_default=10).new() + assert result.isequal(expected) + + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | v3 + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2).__ror__(v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | (v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) | (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1 | (v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1.__ror__(v2 & v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) | (v2 & v3) + + with pytest.raises(TypeError, match="XXX"): # TODO + v1 & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + v1.__rand__(v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 & v2) & (v2 | v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & v3 + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2).__rand__(v3) + with pytest.raises(TypeError, match="XXX"): # TODO + (v1 | v2) & (v2 & v3) + + # We differentiate between infix and methods + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_add(v2 & v3) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 & v2).ewise_add(v3) + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="to automatically compute"): + v1.ewise_mult(v2 | v3) + with pytest.raises(TypeError, match="Automatic computation"): + (v1 | v2).ewise_mult(v3) + + +@autocompute +def test_multi_infix_scalar_auto(): + v1 = Scalar.from_value(1) + v2 = Scalar.from_value(2) + v3 = Scalar(int) + # We differentiate between infix and methods + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_add(v2 & v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 & v2).ewise_add(v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_union(v2 & v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 & v2).ewise_union(v3, binary.plus, left_default=1, right_default=1) + with pytest.raises(TypeError, match="only valid for BOOL"): + v1.ewise_mult(v2 | v3) + with pytest.raises(TypeError, match="only valid for BOOL"): + (v1 | v2).ewise_mult(v3) diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 3f66e46ef..c716c97a9 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -2805,6 +2805,8 @@ def test_ss_nbytes(A): @autocompute def test_auto(A, v): + from graphblas.core.infix import MatrixEwiseMultExpr + expected = binary.land[bool](A & A).new() B = A.dup(dtype=bool) for expr in [(B & B), binary.land[bool](A & A)]: @@ -2832,12 +2834,21 @@ def test_auto(A, v): ]: # print(type(expr).__name__, method) val1 = getattr(expected, method)(expected).new() - val2 = getattr(expected, method)(expr) - val3 = getattr(expr, method)(expected) - val4 = getattr(expr, method)(expr) - assert val1.isequal(val2) - assert val1.isequal(val3) - assert val1.isequal(val4) + if method in {"__or__", "__ror__"} and type(expr) is MatrixEwiseMultExpr: + # Doing e.g. `plus(A & B | C)` isn't allowed--make user be explicit + with pytest.raises(TypeError): + val2 = getattr(expected, method)(expr) + with pytest.raises(TypeError): + val3 = getattr(expr, method)(expected) + with pytest.raises(TypeError): + val4 = getattr(expr, method)(expr) + else: + val2 = getattr(expected, method)(expr) + assert val1.isequal(val2) + val3 = getattr(expr, method)(expected) + assert val1.isequal(val3) + val4 = getattr(expr, method)(expr) + assert val1.isequal(val4) for method in ["reduce_rowwise", "reduce_columnwise", "reduce_scalar"]: s1 = getattr(expected, method)(monoid.lor).new() s2 = getattr(expr, method)(monoid.lor) @@ -2946,7 +2957,7 @@ def test_expr_is_like_matrix(A): "setdiag", "update", } - ignore = {"__sizeof__"} + ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_mxm", "_mxv"} assert attrs - expr_attrs - ignore == expected, ( "If you see this message, you probably added a method to Matrix. You may need to " "add an entry to `matrix` or `matrix_vector` set in `graphblas.core.automethods` " @@ -3011,7 +3022,7 @@ def test_index_expr_is_like_matrix(A): "resize", "setdiag", } - ignore = {"__sizeof__"} + ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_mxm", "_mxv"} assert attrs - expr_attrs - ignore == expected, ( "If you see this message, you probably added a method to Matrix. You may need to " "add an entry to `matrix` or `matrix_vector` set in `graphblas.core.automethods` " diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py index ba9903169..aeb19e170 100644 --- a/graphblas/tests/test_scalar.py +++ b/graphblas/tests/test_scalar.py @@ -360,7 +360,7 @@ def test_expr_is_like_scalar(s): } if s.is_cscalar: expected.add("_empty") - ignore = {"__sizeof__"} + ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union"} assert attrs - expr_attrs - ignore == expected, ( "If you see this message, you probably added a method to Scalar. You may need to " "add an entry to `scalar` set in `graphblas.core.automethods` " @@ -402,7 +402,7 @@ def test_index_expr_is_like_scalar(s): } if s.is_cscalar: expected.add("_empty") - ignore = {"__sizeof__"} + ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union"} assert attrs - expr_attrs - ignore == expected, ( "If you see this message, you probably added a method to Scalar. You may need to " "add an entry to `scalar` set in `graphblas.core.automethods` " diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index b66bc96c9..1c9a8d38c 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -1532,6 +1532,8 @@ def test_outer(v): @autocompute def test_auto(v): + from graphblas.core.infix import VectorEwiseMultExpr + v = v.dup(dtype=bool) expected = binary.land(v & v).new() assert 0 not in expected @@ -1581,15 +1583,24 @@ def test_auto(v): ]: # print(type(expr).__name__, method) val1 = getattr(expected, method)(expected).new() - val2 = getattr(expected, method)(expr) - val3 = getattr(expr, method)(expected) - val4 = getattr(expr, method)(expr) - assert val1.isequal(val2) - assert val1.isequal(val3) - assert val1.isequal(val4) - assert val1.isequal(val2.new()) - assert val1.isequal(val3.new()) - assert val1.isequal(val4.new()) + if method in {"__or__", "__ror__"} and type(expr) is VectorEwiseMultExpr: + # Doing e.g. `plus(x & y | z)` isn't allowed--make user be explicit + with pytest.raises(TypeError): + val2 = getattr(expected, method)(expr) + with pytest.raises(TypeError): + val3 = getattr(expr, method)(expected) + with pytest.raises(TypeError): + val4 = getattr(expr, method)(expr) + else: + val2 = getattr(expected, method)(expr) + assert val1.isequal(val2) + assert val1.isequal(val2.new()) + val3 = getattr(expr, method)(expected) + assert val1.isequal(val3) + assert val1.isequal(val3.new()) + val4 = getattr(expr, method)(expr) + assert val1.isequal(val4) + assert val1.isequal(val4.new()) s1 = expected.reduce(monoid.lor).new() s2 = expr.reduce(monoid.lor) assert s1.isequal(s2.new()) @@ -1653,7 +1664,7 @@ def test_expr_is_like_vector(v): "resize", "update", } - ignore = {"__sizeof__"} + ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_inner", "_vxm"} assert attrs - expr_attrs - ignore == expected, ( "If you see this message, you probably added a method to Vector. You may need to " "add an entry to `vector` or `matrix_vector` set in `graphblas.core.automethods` " @@ -1702,7 +1713,7 @@ def test_index_expr_is_like_vector(v): "from_values", "resize", } - ignore = {"__sizeof__"} + ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_inner", "_vxm"} assert attrs - expr_attrs - ignore == expected, ( "If you see this message, you probably added a method to Vector. You may need to " "add an entry to `vector` or `matrix_vector` set in `graphblas.core.automethods` " From 1ecf3334182e5aa15604b94cf966d7a9c603095c Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 5 Nov 2023 19:28:06 -0600 Subject: [PATCH 44/66] Add `gb.MAX_SIZE`, which is `GrB_INDEX_MAX + 1` (#519) --- graphblas/__init__.py | 5 +++++ graphblas/tests/test_core.py | 4 ++++ scripts/test_imports.sh | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/graphblas/__init__.py b/graphblas/__init__.py index a9895cb6a..63110eeeb 100644 --- a/graphblas/__init__.py +++ b/graphblas/__init__.py @@ -39,6 +39,7 @@ def get_config(): backend = None _init_params = None _SPECIAL_ATTRS = { + "MAX_SIZE", # The maximum size of Vector and Matrix dimensions (GrB_INDEX_MAX + 1) "Matrix", "Recorder", "Scalar", @@ -205,6 +206,10 @@ def _load(name): if name in {"Matrix", "Vector", "Scalar", "Recorder"}: module = _import_module(f".core.{name.lower()}", __name__) globals()[name] = getattr(module, name) + elif name == "MAX_SIZE": + from .core import lib + + globals()[name] = lib.GrB_INDEX_MAX + 1 else: # Everything else is a module globals()[name] = _import_module(f".{name}", __name__) diff --git a/graphblas/tests/test_core.py b/graphblas/tests/test_core.py index 003affc6c..3586eb4a8 100644 --- a/graphblas/tests/test_core.py +++ b/graphblas/tests/test_core.py @@ -90,3 +90,7 @@ def test_packages(): assert ( pkgs == pkgs2 ), "If there are extra items on the left, add them to pyproject.toml:tool.setuptools.packages" + + +def test_index_max(): + assert gb.MAX_SIZE == 2**60 # True for all current backends diff --git a/scripts/test_imports.sh b/scripts/test_imports.sh index cc989ef06..6ce88c83e 100755 --- a/scripts/test_imports.sh +++ b/scripts/test_imports.sh @@ -13,7 +13,7 @@ if ! python -c "from graphblas.select import tril" ; then exit 1 ; fi if ! python -c "from graphblas.semiring import plus_times" ; then exit 1 ; fi if ! python -c "from graphblas.unary import exp" ; then exit 1 ; fi if ! (for attr in Matrix Scalar Vector Recorder agg binary dtypes exceptions \ - init io monoid op select semiring tests unary ss viz + init io monoid op select semiring tests unary ss viz MAX_SIZE do echo python -c \"from graphblas import $attr\" if ! python -c "from graphblas import $attr" then exit 1 From 303c5a1abec06cb05b0d9dc1bb84295a37b6b2e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 15:02:48 -0600 Subject: [PATCH 45/66] Bump conda-incubator/setup-miniconda from 2 to 3 (#521) Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 2 to 3. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/v2...v3) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test_and_build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 56d13557f..492b9e62a 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -135,7 +135,7 @@ jobs: 1 1 - name: Setup mamba - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 id: setup_mamba continue-on-error: true with: @@ -148,7 +148,7 @@ jobs: activate-environment: graphblas auto-activate-base: false - name: Setup conda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 id: setup_conda if: steps.setup_mamba.outcome == 'failure' continue-on-error: false From 34f5c40fd6bb6df12c9776e24154b0bf79de6d4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 15:02:58 -0600 Subject: [PATCH 46/66] Bump pypa/gh-action-pypi-publish from 1.8.10 to 1.8.11 (#522) Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.10 to 1.8.11. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.8.10...v1.8.11) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish_pypi.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index 45a2b7880..28ecd13d8 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -35,7 +35,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.10 + uses: pypa/gh-action-pypi-publish@v1.8.11 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From 8a800327e71e08029192b6a964281a986e7c84a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 14:05:44 -0600 Subject: [PATCH 47/66] Bump actions/setup-python from 4 to 5 (#523) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/imports.yml | 2 +- .github/workflows/lint.yml | 2 +- .github/workflows/publish_pypi.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 18be6256a..0116f615d 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -49,7 +49,7 @@ jobs: # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ needs.rngs.outputs.pyver }} # python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index e0945022c..97bb856f6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: "3.10" - uses: pre-commit/action@v3.0.0 diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index 28ecd13d8..406f7c269 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -18,7 +18,7 @@ jobs: with: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install build dependencies From 0bfcb6665706d7169095ae6b2f8d3bd5a7165db4 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 12 Dec 2023 14:06:41 -0600 Subject: [PATCH 48/66] Support `A.power(0)` (#518) --- .pre-commit-config.yaml | 4 ++-- graphblas/core/matrix.py | 22 ++++++++++++++++++---- graphblas/tests/test_matrix.py | 18 +++++++++++++----- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3766e2e7c..97bf22889 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.3 + rev: v0.1.4 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.3 + rev: v0.1.4 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 34789d68d..1ea24f479 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -101,6 +101,10 @@ def _reposition(updater, indices, chunk): def _power(updater, A, n, op): opts = updater.opts + if n == 0: + v = Vector.from_scalar(op.binaryop.monoid.identity, A._nrows, A.dtype, name="v_diag") + updater << v.diag(name="M_diag") + return if n == 1: updater << A return @@ -2895,7 +2899,11 @@ def power(self, n, op=semiring.plus_times): Parameters ---------- n : int - The exponent must be a positive integer. + The exponent must be a nonnegative integer. If n=0, the result will be a diagonal + matrix with values equal to the identity of the semiring's binary operator. + For example, ``plus_times`` will have diagonal values of 1, which is the + identity of ``times``. The binary operator must be associated with a monoid + when n=0 so the identity can be determined; otherwise, ValueError is raised. op : :class:`~graphblas.core.operator.Semiring` Semiring used in the computation @@ -2923,11 +2931,17 @@ def power(self, n, op=semiring.plus_times): if self._nrows != self._ncols: raise DimensionMismatch(f"power only works for square Matrix; shape is {self.shape}") if (N := maybe_integral(n)) is None: - raise TypeError(f"n must be a positive integer; got bad type: {type(n)}") - if N <= 0: - raise ValueError(f"n must be a positive integer; got: {N}") + raise TypeError(f"n must be a nonnegative integer; got bad type: {type(n)}") + if N < 0: + raise ValueError(f"n must be a nonnegative integer; got: {N}") op = get_typed_op(op, self.dtype, kind="semiring") self._expect_op(op, "Semiring", within=method_name, argname="op") + if N == 0 and op.binaryop.monoid is None: + raise ValueError( + f"Binary operator of {op} semiring does not have a monoid with an identity. " + "When n=0, the result is a diagonal matrix with values equal to the " + "identity of the binaryop, so the binaryop must be associated with a monoid." + ) return MatrixExpression( "power", None, diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index c716c97a9..233fc9a9b 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -4402,14 +4402,22 @@ def test_power(A): result = A.power(i, semiring.min_plus).new() assert result.isequal(expected) expected << semiring.min_plus(A @ expected) + # n == 0 + result = A.power(0).new() + expected = Vector.from_scalar(1, A.nrows, A.dtype).diag() + assert result.isequal(expected) + result = A.power(0, semiring.plus_min).new() + identity = semiring.plus_min[A.dtype].binaryop.monoid.identity + assert identity != 1 + expected = Vector.from_scalar(identity, A.nrows, A.dtype).diag() + assert result.isequal(expected) # Exceptional - with pytest.raises(TypeError, match="must be a positive integer"): + with pytest.raises(TypeError, match="must be a nonnegative integer"): A.power(1.5) - with pytest.raises(ValueError, match="must be a positive integer"): + with pytest.raises(ValueError, match="must be a nonnegative integer"): A.power(-1) - with pytest.raises(ValueError, match="must be a positive integer"): - # Not implemented yet... could create identity matrix - A.power(0) + with pytest.raises(ValueError, match="binaryop must be associated with a monoid"): + A.power(0, semiring.min_first) B = A[:2, :3].new() with pytest.raises(DimensionMismatch): B.power(2) From 6ec05d10d8fd3b6d1170091c6219227d43b2308e Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 13 Dec 2023 06:20:13 -0600 Subject: [PATCH 49/66] Fix the return type of e.g. `agg.count` to be INT64 by default (#524) This should fix https://github.com/python-graphblas/graphblas-algorithms/issues/82 --- .pre-commit-config.yaml | 12 ++++++------ graphblas/core/operator/utils.py | 3 +++ graphblas/tests/test_scalar.py | 2 +- graphblas/tests/test_vector.py | 15 +++++++++++++++ scripts/check_versions.sh | 2 +- 5 files changed, 26 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97bf22889..b1d264509 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: # We can probably remove `isort` if we come to trust `ruff --fix`, # but we'll need to figure out the configuration to do this in `ruff` - repo: https://github.com/pycqa/isort - rev: 5.12.0 + rev: 5.13.1 hooks: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it @@ -61,12 +61,12 @@ repos: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.10.1 + rev: 23.12.0 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.4 + rev: v0.1.7 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -79,7 +79,7 @@ repos: additional_dependencies: &flake8_dependencies # These versions need updated manually - flake8==6.1.0 - - flake8-bugbear==23.9.16 + - flake8-bugbear==23.12.2 - flake8-simplify==0.21.0 - repo: https://github.com/asottile/yesqa rev: v1.5.0 @@ -94,11 +94,11 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.4 + rev: v0.1.7 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.8.1 + rev: v0.9.1 hooks: - id: sphinx-lint args: [--enable, all, "--disable=line-too-long,leaked-markup"] diff --git a/graphblas/core/operator/utils.py b/graphblas/core/operator/utils.py index cd0b82d3c..543df793e 100644 --- a/graphblas/core/operator/utils.py +++ b/graphblas/core/operator/utils.py @@ -75,6 +75,9 @@ def get_typed_op(op, dtype, dtype2=None, *, is_left_scalar=False, is_right_scala from .agg import Aggregator, TypedAggregator if isinstance(op, Aggregator): + # agg._any_dtype basically serves the same purpose as op._custom_dtype + if op._any_dtype is not None and op._any_dtype is not True: + return op[op._any_dtype] return op[dtype] if isinstance(op, TypedAggregator): return op diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py index aeb19e170..3c7bffa9a 100644 --- a/graphblas/tests/test_scalar.py +++ b/graphblas/tests/test_scalar.py @@ -250,7 +250,7 @@ def test_update(s): def test_not_hashable(s): with pytest.raises(TypeError, match="unhashable type"): - {s} + _ = {s} with pytest.raises(TypeError, match="unhashable type"): hash(s) diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index 1c9a8d38c..8a2cd0824 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -948,6 +948,21 @@ def test_reduce_agg(v): assert s.is_empty +def test_reduce_agg_count_is_int64(v): + """Aggregators that count should default to INT64 return dtype.""" + assert v.dtype == dtypes.INT64 + res = v.reduce(agg.count).new() + assert res.dtype == dtypes.INT64 + assert res == 4 + res = v.dup(dtypes.INT8).reduce(agg.count).new() + assert res.dtype == dtypes.INT64 + assert res == 4 + # Allow return dtype to be specified + res = v.dup(dtypes.INT8).reduce(agg.count[dtypes.INT16]).new() + assert res.dtype == dtypes.INT16 + assert res == 4 + + @pytest.mark.skipif("not suitesparse") def test_reduce_agg_argminmax(v): assert v.reduce(agg.ss.argmin).new() == 6 diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index d197f2af2..db786b190 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,7 +3,7 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'flake8-bugbear[channel=conda-forge]>=23.9.16' +conda search 'flake8-bugbear[channel=conda-forge]>=23.12.2' conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' conda search 'numpy[channel=conda-forge]>=1.26.0' conda search 'pandas[channel=conda-forge]>=2.1.2' From 5fcdbf667807c7fd6f5a300ee539388b3c87a893 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 13 Dec 2023 08:14:48 -0600 Subject: [PATCH 50/66] Update dependency versions (#525) --- .github/workflows/test_and_build.yml | 12 ++++++------ scripts/check_versions.sh | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 492b9e62a..4b9035cc3 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -175,22 +175,22 @@ jobs: npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') else # Python 3.12 npver=$(python -c 'import random ; print(random.choice(["=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=2.1", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=2.4", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=2.4", "=2.5", ""]))') fi # But there may be edge cases of incompatibility we need to handle (more handled below) if [[ ${pdver} == "=2.1" && ${npver} == "=1.21" ]]; then @@ -236,11 +236,11 @@ jobs: spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') fi elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.57", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", ""]))') elif [[ ${npver} == "=1.21" ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.55", "=0.56", "=0.57", ""]))') else - numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", "=0.58", ""]))') fi fmm=fast_matrix_market${fmmver} awkward=awkward${akver} diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index db786b190..958bf2210 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -5,13 +5,13 @@ # Tip: add `--json` for more information. conda search 'flake8-bugbear[channel=conda-forge]>=23.12.2' conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' -conda search 'numpy[channel=conda-forge]>=1.26.0' -conda search 'pandas[channel=conda-forge]>=2.1.2' -conda search 'scipy[channel=conda-forge]>=1.11.3' +conda search 'numpy[channel=conda-forge]>=1.26.2' +conda search 'pandas[channel=conda-forge]>=2.1.4' +conda search 'scipy[channel=conda-forge]>=1.11.4' conda search 'networkx[channel=conda-forge]>=3.2.1' -conda search 'awkward[channel=conda-forge]>=2.4.6' +conda search 'awkward[channel=conda-forge]>=2.5.0' conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.7.4' -conda search 'numba[channel=conda-forge]>=0.57.1' +conda search 'fast_matrix_market[channel=conda-forge]>=1.7.5' +conda search 'numba[channel=conda-forge]>=0.58.1' conda search 'pyyaml[channel=conda-forge]>=6.0.1' # conda search 'python[channel=conda-forge]>=3.9 *pypy*' From e673d603ebf637286d7b8bb5e9028d0c2adbb13a Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 13 Dec 2023 12:00:46 -0600 Subject: [PATCH 51/66] Fix link to test status badge (#526) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 42ed0d41e..de942f88e 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/python-graphblas)](https://pypi.python.org/pypi/python-graphblas/) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/python-graphblas/python-graphblas/blob/main/LICENSE)
-[![Tests](https://github.com/python-graphblas/python-graphblas/workflows/Tests/badge.svg?branch=main)](https://github.com/python-graphblas/python-graphblas/actions) +[![Tests](https://github.com/python-graphblas/python-graphblas/actions/workflows/test_and_build.yml/badge.svg?branch=main)](https://github.com/python-graphblas/python-graphblas/actions) [![Docs](https://readthedocs.org/projects/python-graphblas/badge/?version=latest)](https://python-graphblas.readthedocs.io/en/latest/) [![Coverage](https://codecov.io/gh/python-graphblas/python-graphblas/graph/badge.svg?token=D7HHLDPQ2Q)](https://codecov.io/gh/python-graphblas/python-graphblas) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7328791.svg)](https://doi.org/10.5281/zenodo.7328791) From ee5f4e757996a4b881a67c94d7c4ba752dffb298 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 17 Dec 2023 06:56:59 -0600 Subject: [PATCH 52/66] NetworkX 3.2 doesn't support SciPy 1.8 (#530) --- .github/workflows/test_and_build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 4b9035cc3..7c7a7691b 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -196,6 +196,9 @@ jobs: if [[ ${pdver} == "=2.1" && ${npver} == "=1.21" ]]; then pdver="=2.0" fi + if [[ ${nxver} == "=3.2" && ${spver} == "=1.8" ]]; then + spver="=1.9" + fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when # installing python-suitesparse-grphblas from source or upstream. From 0149cdefb03b9e63d3f502abff2dd1cb5b93ddcf Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 17 Dec 2023 11:31:18 -0600 Subject: [PATCH 53/66] Remove deprecated `gb.io.from_numpy` (and `to_numpy`) (#528) * Remove deprecated `gb.io.from_numpy` (and `to_numpy`) These have been deprecated since 2023-02-27, so could be removed after 2023-10-27 per our policy. --- .github/workflows/test_and_build.yml | 12 +--- docs/api_reference/io.rst | 13 ++-- docs/user_guide/collections.rst | 2 +- docs/user_guide/io.rst | 2 + graphblas/io/__init__.py | 1 - graphblas/io/_numpy.py | 104 --------------------------- graphblas/tests/test_io.py | 11 --- 7 files changed, 14 insertions(+), 131 deletions(-) delete mode 100644 graphblas/io/_numpy.py diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 7c7a7691b..190c1840e 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -173,12 +173,12 @@ jobs: # Randomly choosing versions of dependencies based on Python version works surprisingly well... if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]]; then @@ -196,9 +196,6 @@ jobs: if [[ ${pdver} == "=2.1" && ${npver} == "=1.21" ]]; then pdver="=2.0" fi - if [[ ${nxver} == "=3.2" && ${spver} == "=1.8" ]]; then - spver="=1.9" - fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when # installing python-suitesparse-grphblas from source or upstream. @@ -230,14 +227,11 @@ jobs: fi if [[ ${npver} == "=1.26" ]] ; then numbaver="" - if [[ ${spver} == "=1.8" || ${spver} == "=1.9" ]] ; then + if [[ ${spver} == "=1.9" ]] ; then spver=$(python -c 'import random ; print(random.choice(["=1.10", "=1.11", ""]))') fi elif [[ ${npver} == "=1.25" ]] ; then numbaver="" - if [[ ${spver} == "=1.8" ]] ; then - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') - fi elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", ""]))') elif [[ ${npver} == "=1.21" ]] ; then diff --git a/docs/api_reference/io.rst b/docs/api_reference/io.rst index cd6057a31..1cfc98516 100644 --- a/docs/api_reference/io.rst +++ b/docs/api_reference/io.rst @@ -10,15 +10,18 @@ These methods require `networkx `_ to be installed. .. autofunction:: graphblas.io.to_networkx -Numpy +NumPy ~~~~~ -These methods require `scipy `_ to be installed, as some -of the scipy.sparse machinery is used during the conversion process. +These methods convert to and from dense arrays. For more, see :ref:`IO in the user guide `. -.. autofunction:: graphblas.io.from_numpy +.. automethod:: graphblas.core.matrix.Matrix.from_dense -.. autofunction:: graphblas.io.to_numpy +.. automethod:: graphblas.core.matrix.Matrix.to_dense + +.. automethod:: graphblas.core.vector.Vector.from_dense + +.. automethod:: graphblas.core.vector.Vector.to_dense Scipy Sparse ~~~~~~~~~~~~ diff --git a/docs/user_guide/collections.rst b/docs/user_guide/collections.rst index 2ce759bf4..de7469c6d 100644 --- a/docs/user_guide/collections.rst +++ b/docs/user_guide/collections.rst @@ -145,7 +145,7 @@ The shape and dtype remain unchanged, but the collection will be fully sparse (i to_coo ~~~~~~ -To go from a collection back to the index and values, ``.to_coo()`` can be called. Numpy arrays +To go from a collection back to the index and values, ``.to_coo()`` can be called. NumPy arrays will be returned in a tuple. .. code-block:: python diff --git a/docs/user_guide/io.rst b/docs/user_guide/io.rst index ecb4c0862..f27b40bd3 100644 --- a/docs/user_guide/io.rst +++ b/docs/user_guide/io.rst @@ -4,6 +4,8 @@ Input/Output There are several ways to get data into and out of python-graphblas. +.. _from-to-values: + From/To Values -------------- diff --git a/graphblas/io/__init__.py b/graphblas/io/__init__.py index b21b20963..a1b71db40 100644 --- a/graphblas/io/__init__.py +++ b/graphblas/io/__init__.py @@ -1,6 +1,5 @@ from ._awkward import from_awkward, to_awkward from ._matrixmarket import mmread, mmwrite from ._networkx import from_networkx, to_networkx -from ._numpy import from_numpy, to_numpy # deprecated from ._scipy import from_scipy_sparse, to_scipy_sparse from ._sparse import from_pydata_sparse, to_pydata_sparse diff --git a/graphblas/io/_numpy.py b/graphblas/io/_numpy.py deleted file mode 100644 index 954d28df7..000000000 --- a/graphblas/io/_numpy.py +++ /dev/null @@ -1,104 +0,0 @@ -from warnings import warn - -from ..core.utils import output_type -from ..core.vector import Vector -from ..dtypes import lookup_dtype -from ..exceptions import GraphblasException -from ._scipy import from_scipy_sparse, to_scipy_sparse - - -def from_numpy(m): # pragma: no cover (deprecated) - """Create a sparse Vector or Matrix from a dense numpy array. - - .. deprecated:: 2023.2.0 - ``from_numpy`` will be removed in a future release. - Use ``Vector.from_dense`` or ``Matrix.from_dense`` instead. - Will be removed in version 2023.10.0 or later - - A value of 0 is considered as "missing". - - - m.ndim == 1 returns a ``Vector`` - - m.ndim == 2 returns a ``Matrix`` - - m.ndim > 2 raises an error - - dtype is inferred from m.dtype - - Parameters - ---------- - m : np.ndarray - Input array - - See Also - -------- - Matrix.from_dense - Vector.from_dense - from_scipy_sparse - - Returns - ------- - Vector or Matrix - """ - warn( - "`graphblas.io.from_numpy` is deprecated; " - "use `Matrix.from_dense` and `Vector.from_dense` instead.", - DeprecationWarning, - stacklevel=2, - ) - if m.ndim > 2: - raise GraphblasException("m.ndim must be <= 2") - - try: - from scipy.sparse import coo_array, csr_array - except ImportError: # pragma: no cover (import) - raise ImportError("scipy is required to import from numpy") from None - - if m.ndim == 1: - A = csr_array(m) - _, size = A.shape - dtype = lookup_dtype(m.dtype) - return Vector.from_coo(A.indices, A.data, size=size, dtype=dtype) - A = coo_array(m) - return from_scipy_sparse(A) - - -def to_numpy(m): # pragma: no cover (deprecated) - """Create a dense numpy array from a sparse Vector or Matrix. - - .. deprecated:: 2023.2.0 - ``to_numpy`` will be removed in a future release. - Use ``Vector.to_dense`` or ``Matrix.to_dense`` instead. - Will be removed in version 2023.10.0 or later - - Missing values will become 0 in the output. - - numpy dtype will match the GraphBLAS dtype - - Parameters - ---------- - m : Vector or Matrix - GraphBLAS Vector or Matrix - - See Also - -------- - to_scipy_sparse - Matrix.to_dense - Vector.to_dense - - Returns - ------- - np.ndarray - """ - warn( - "`graphblas.io.to_numpy` is deprecated; " - "use `Matrix.to_dense` and `Vector.to_dense` instead.", - DeprecationWarning, - stacklevel=2, - ) - try: - import scipy # noqa: F401 - except ImportError: # pragma: no cover (import) - raise ImportError("scipy is required to export to numpy") from None - if output_type(m) is Vector: - return to_scipy_sparse(m).toarray()[0] - sparse = to_scipy_sparse(m, "coo") - return sparse.toarray() diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index 6ad92a950..109c90a2c 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -38,17 +38,6 @@ suitesparse = gb.backend == "suitesparse" -@pytest.mark.skipif("not ss") -def test_deprecated(): - a = np.array([0.0, 2.0, 4.1]) - with pytest.warns(DeprecationWarning): - v = gb.io.from_numpy(a) - assert v.isequal(gb.Vector.from_coo([1, 2], [2.0, 4.1]), check_dtype=True) - with pytest.warns(DeprecationWarning): - a2 = gb.io.to_numpy(v) - np.testing.assert_array_equal(a, a2) - - @pytest.mark.skipif("not ss") def test_vector_to_from_numpy(): a = np.array([0.0, 2.0, 4.1]) From 919f41c91769a5e44a6eee36a08426036da8d7ac Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 17 Dec 2023 11:31:38 -0600 Subject: [PATCH 54/66] Remove deprecated `gb.core.agg` (#527) * Remove deprecated `gb.core.agg` This was deprecated on 2023-03-31, so we can remove it after 2023-11-30 per our deprecation policy. --- .flake8 | 1 - .github/workflows/test_and_build.yml | 2 +- .pre-commit-config.yaml | 6 +++--- graphblas/agg/__init__.py | 2 +- graphblas/core/agg.py | 17 ----------------- graphblas/tests/test_op.py | 2 -- pyproject.toml | 1 - scripts/check_versions.sh | 2 +- 8 files changed, 6 insertions(+), 27 deletions(-) delete mode 100644 graphblas/core/agg.py diff --git a/.flake8 b/.flake8 index 80124c9e8..0dede3f1d 100644 --- a/.flake8 +++ b/.flake8 @@ -12,6 +12,5 @@ extend-ignore = per-file-ignores = scripts/create_pickle.py:F403,F405, graphblas/tests/*.py:T201, - graphblas/core/agg.py:F401,F403, graphblas/core/ss/matrix.py:SIM113, graphblas/**/__init__.py:F401, diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 190c1840e..29c6d4a5a 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -389,7 +389,7 @@ jobs: echo "from graphblas.agg import count" > script.py coverage run -a script.py echo "from graphblas import agg" > script.py # Does this still cover? - echo "from graphblas.core import agg" >> script.py + echo "from graphblas.core.operator import agg" >> script.py coverage run -a script.py # Tests lazy loading of lib, ffi, and NULL in gb.core echo "from graphblas.core import base" > script.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b1d264509..bc97547cc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: # We can probably remove `isort` if we come to trust `ruff --fix`, # but we'll need to figure out the configuration to do this in `ruff` - repo: https://github.com/pycqa/isort - rev: 5.13.1 + rev: 5.13.2 hooks: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.1.8 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.1.8 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/agg/__init__.py b/graphblas/agg/__init__.py index 9f6ead0b5..725c11aab 100644 --- a/graphblas/agg/__init__.py +++ b/graphblas/agg/__init__.py @@ -73,7 +73,7 @@ # - bxnor monoid: even bits # - bnor monoid: odd bits """ -# All items are dynamically added by classes in core/agg.py +# All items are dynamically added by classes in core/operator/agg.py # This module acts as a container of Aggregator instances _deprecated = {} diff --git a/graphblas/core/agg.py b/graphblas/core/agg.py deleted file mode 100644 index 23848d3b9..000000000 --- a/graphblas/core/agg.py +++ /dev/null @@ -1,17 +0,0 @@ -"""graphblas.core.agg namespace is deprecated; please use graphblas.core.operator.agg instead. - -.. deprecated:: 2023.3.0 -``graphblas.core.agg`` will be removed in a future release. -Use ``graphblas.core.operator.agg`` instead. -Will be removed in version 2023.11.0 or later. - -""" -import warnings - -from .operator.agg import * # pylint: disable=wildcard-import,unused-wildcard-import - -warnings.warn( - "graphblas.core.agg namespace is deprecated; please use graphblas.core.operator.agg instead.", - DeprecationWarning, - stacklevel=1, -) diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py index c7d1ce97c..41fae80ae 100644 --- a/graphblas/tests/test_op.py +++ b/graphblas/tests/test_op.py @@ -1450,8 +1450,6 @@ def test_deprecated(): gb.op.secondj with pytest.warns(DeprecationWarning, match="please use"): gb.agg.argmin - with pytest.warns(DeprecationWarning, match="please use"): - import graphblas.core.agg # noqa: F401 @pytest.mark.slow diff --git a/pyproject.toml b/pyproject.toml index 04ef28645..3bd4a4310 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -375,7 +375,6 @@ ignore = [ ] [tool.ruff.per-file-ignores] -"graphblas/core/agg.py" = ["F401", "F403"] # Deprecated "graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF "graphblas/core/ss/matrix.py" = ["NPY002"] # numba doesn't support rng generator yet "graphblas/core/ss/vector.py" = ["NPY002"] # numba doesn't support rng generator yet diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 958bf2210..75d6283f0 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -9,7 +9,7 @@ conda search 'numpy[channel=conda-forge]>=1.26.2' conda search 'pandas[channel=conda-forge]>=2.1.4' conda search 'scipy[channel=conda-forge]>=1.11.4' conda search 'networkx[channel=conda-forge]>=3.2.1' -conda search 'awkward[channel=conda-forge]>=2.5.0' +conda search 'awkward[channel=conda-forge]>=2.5.1' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.5' conda search 'numba[channel=conda-forge]>=0.58.1' From a9598c56d109d973d2615d9695e1920b21266a98 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 17 Dec 2023 12:00:50 -0600 Subject: [PATCH 55/66] Remove deprecated `to_values` and `from_values`. (#529) These have been deprecated since 2022-11-16, so it's been over a year. We wanted to give these a longer deprecation cycle to allow people to swith to `to_coo` and `from_coo`, but I think it's time to clean up. --- graphblas/core/automethods.py | 5 -- graphblas/core/infix.py | 2 - graphblas/core/matrix.py | 101 --------------------------------- graphblas/core/vector.py | 70 ----------------------- graphblas/tests/test_matrix.py | 11 ---- graphblas/tests/test_vector.py | 9 --- 6 files changed, 198 deletions(-) diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py index 0a2aa208a..31b349280 100644 --- a/graphblas/core/automethods.py +++ b/graphblas/core/automethods.py @@ -281,10 +281,6 @@ def to_edgelist(self): return self._get_value("to_edgelist") -def to_values(self): - return self._get_value("to_values") - - def value(self): return self._get_value("value") @@ -398,7 +394,6 @@ def _main(): "ss", "to_coo", "to_dense", - "to_values", } vector = { "_as_matrix", diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py index 51714633c..2c1014fe5 100644 --- a/graphblas/core/infix.py +++ b/graphblas/core/infix.py @@ -236,7 +236,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo)) to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense)) to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict)) - to_values = wrapdoc(Vector.to_values)(property(automethods.to_values)) vxm = wrapdoc(Vector.vxm)(property(automethods.vxm)) wait = wrapdoc(Vector.wait)(property(automethods.wait)) # These raise exceptions @@ -396,7 +395,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense)) to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts)) to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist)) - to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values)) wait = wrapdoc(Matrix.wait)(property(automethods.wait)) # These raise exceptions __array__ = Matrix.__array__ diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 1ea24f479..16483c2a1 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -1,5 +1,4 @@ import itertools -import warnings from collections.abc import Sequence import numpy as np @@ -515,42 +514,6 @@ def resize(self, nrows, ncols): self._nrows = nrows.value self._ncols = ncols.value - def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=True): - """Extract the indices and values as a 3-tuple of numpy arrays - corresponding to the COO format of the Matrix. - - .. deprecated:: 2022.11.0 - ``Matrix.to_values`` will be removed in a future release. - Use ``Matrix.to_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - dtype : - Requested dtype for the output values array. - rows : bool, default=True - Whether to return rows; will return ``None`` for rows if ``False`` - columns : bool, default=True - Whether to return columns; will return ``None`` for columns if ``False`` - values : bool, default=True - Whether to return values; will return ``None`` for values if ``False`` - sort : bool, default=True - Whether to require sorted indices. - If internally stored rowwise, the sorting will be first by rows, then by column. - If internally stored columnwise, the sorting will be first by column, then by row. - - Returns - ------- - np.ndarray[dtype=uint64] : Rows - np.ndarray[dtype=uint64] : Columns - np.ndarray : Values - """ - warnings.warn( - "`Matrix.to_values(...)` is deprecated; please use `Matrix.to_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.to_coo(dtype, rows=rows, columns=columns, values=values, sort=sort) - def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True): """Extract the indices and values as a 3-tuple of numpy arrays corresponding to the COO format of the Matrix. @@ -837,61 +800,6 @@ def get(self, row, col, default=None): "Indices should get a single element, which will be extracted as a Python scalar." ) - @classmethod - def from_values( - cls, - rows, - columns, - values, - dtype=None, - *, - nrows=None, - ncols=None, - dup_op=None, - name=None, - ): - """Create a new Matrix from row and column indices and values. - - .. deprecated:: 2022.11.0 - ``Matrix.from_values`` will be removed in a future release. - Use ``Matrix.from_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - rows : list or np.ndarray - Row indices. - columns : list or np.ndarray - Column indices. - values : list or np.ndarray or scalar - List of values. If a scalar is provided, all values will be set to this single value. - dtype : - Data type of the Matrix. If not provided, the values will be inspected - to choose an appropriate dtype. - nrows : int, optional - Number of rows in the Matrix. If not provided, ``nrows`` is computed - from the maximum row index found in ``rows``. - ncols : int, optional - Number of columns in the Matrix. If not provided, ``ncols`` is computed - from the maximum column index found in ``columns``. - dup_op : :class:`~graphblas.core.operator.BinaryOp`, optional - Function used to combine values if duplicate indices are found. - Leaving ``dup_op=None`` will raise an error if duplicates are found. - name : str, optional - Name to give the Matrix. - - Returns - ------- - Matrix - """ - warnings.warn( - "`Matrix.from_values(...)` is deprecated; please use `Matrix.from_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return cls.from_coo( - rows, columns, values, dtype, nrows=nrows, ncols=ncols, dup_op=dup_op, name=name - ) - @classmethod def from_coo( cls, @@ -3751,7 +3659,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense)) to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts)) to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist)) - to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values)) wait = wrapdoc(Matrix.wait)(property(automethods.wait)) # These raise exceptions __array__ = Matrix.__array__ @@ -3852,7 +3759,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_dense = wrapdoc(Matrix.to_dense)(property(automethods.to_dense)) to_dicts = wrapdoc(Matrix.to_dicts)(property(automethods.to_dicts)) to_edgelist = wrapdoc(Matrix.to_edgelist)(property(automethods.to_edgelist)) - to_values = wrapdoc(Matrix.to_values)(property(automethods.to_values)) wait = wrapdoc(Matrix.wait)(property(automethods.wait)) # These raise exceptions __array__ = Matrix.__array__ @@ -3927,13 +3833,6 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True) ) return cols, rows, vals - @wrapdoc(Matrix.to_values) - def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=True): - rows, cols, vals = self._matrix.to_values( - dtype, rows=rows, columns=columns, values=values, sort=sort - ) - return cols, rows, vals - @wrapdoc(Matrix.diag) def diag(self, k=0, dtype=None, *, name=None, **opts): return self._matrix.diag(-k, dtype, name=name, **opts) diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index feb95ed02..a631cc4af 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -1,5 +1,4 @@ import itertools -import warnings import numpy as np @@ -456,36 +455,6 @@ def resize(self, size): call("GrB_Vector_resize", [self, size]) self._size = size.value - def to_values(self, dtype=None, *, indices=True, values=True, sort=True): - """Extract the indices and values as a 2-tuple of numpy arrays. - - .. deprecated:: 2022.11.0 - ``Vector.to_values`` will be removed in a future release. - Use ``Vector.to_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - dtype : - Requested dtype for the output values array. - indices :bool, default=True - Whether to return indices; will return ``None`` for indices if ``False`` - values : bool, default=True - Whether to return values; will return ``None`` for values if ``False`` - sort : bool, default=True - Whether to require sorted indices. - - Returns - ------- - np.ndarray[dtype=uint64] : Indices - np.ndarray : Values - """ - warnings.warn( - "`Vector.to_values(...)` is deprecated; please use `Vector.to_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.to_coo(dtype, indices=indices, values=values, sort=sort) - def to_coo(self, dtype=None, *, indices=True, values=True, sort=True): """Extract the indices and values as a 2-tuple of numpy arrays. @@ -697,43 +666,6 @@ def get(self, index, default=None): "A single index should be given, and the result will be a Python scalar." ) - @classmethod - def from_values(cls, indices, values, dtype=None, *, size=None, dup_op=None, name=None): - """Create a new Vector from indices and values. - - .. deprecated:: 2022.11.0 - ``Vector.from_values`` will be removed in a future release. - Use ``Vector.from_coo`` instead. Will be removed in version 2023.9.0 or later - - Parameters - ---------- - indices : list or np.ndarray - Vector indices. - values : list or np.ndarray or scalar - List of values. If a scalar is provided, all values will be set to this single value. - dtype : - Data type of the Vector. If not provided, the values will be inspected - to choose an appropriate dtype. - size : int, optional - Size of the Vector. If not provided, ``size`` is computed from - the maximum index found in ``indices``. - dup_op : BinaryOp, optional - Function used to combine values if duplicate indices are found. - Leaving ``dup_op=None`` will raise an error if duplicates are found. - name : str, optional - Name to give the Vector. - - Returns - ------- - Vector - """ - warnings.warn( - "`Vector.from_values(...)` is deprecated; please use `Vector.from_coo(...)` instead.", - DeprecationWarning, - stacklevel=2, - ) - return cls.from_coo(indices, values, dtype, size=size, dup_op=dup_op, name=name) - @classmethod def from_coo(cls, indices, values=1.0, dtype=None, *, size=None, dup_op=None, name=None): """Create a new Vector from indices and values. @@ -2271,7 +2203,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo)) to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense)) to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict)) - to_values = wrapdoc(Vector.to_values)(property(automethods.to_values)) vxm = wrapdoc(Vector.vxm)(property(automethods.vxm)) wait = wrapdoc(Vector.wait)(property(automethods.wait)) # These raise exceptions @@ -2359,7 +2290,6 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): to_coo = wrapdoc(Vector.to_coo)(property(automethods.to_coo)) to_dense = wrapdoc(Vector.to_dense)(property(automethods.to_dense)) to_dict = wrapdoc(Vector.to_dict)(property(automethods.to_dict)) - to_values = wrapdoc(Vector.to_values)(property(automethods.to_values)) vxm = wrapdoc(Vector.vxm)(property(automethods.vxm)) wait = wrapdoc(Vector.wait)(property(automethods.wait)) # These raise exceptions diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 233fc9a9b..06e4ee868 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -2952,7 +2952,6 @@ def test_expr_is_like_matrix(A): "from_dicts", "from_edgelist", "from_scalar", - "from_values", "resize", "setdiag", "update", @@ -3018,7 +3017,6 @@ def test_index_expr_is_like_matrix(A): "from_dicts", "from_edgelist", "from_scalar", - "from_values", "resize", "setdiag", } @@ -3557,15 +3555,6 @@ def compare(A, expected, isequal=True, **kwargs): A.ss.compactify("bad_how") -def test_deprecated(A): - with pytest.warns(DeprecationWarning): - A.to_values() - with pytest.warns(DeprecationWarning): - A.T.to_values() - with pytest.warns(DeprecationWarning): - A.from_values([1], [2], [3]) - - def test_ndim(A): assert A.ndim == 2 assert A.ewise_mult(A).ndim == 2 diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index 8a2cd0824..77f608969 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -1675,7 +1675,6 @@ def test_expr_is_like_vector(v): "from_dict", "from_pairs", "from_scalar", - "from_values", "resize", "update", } @@ -1725,7 +1724,6 @@ def test_index_expr_is_like_vector(v): "from_dict", "from_pairs", "from_scalar", - "from_values", "resize", } ignore = {"__sizeof__", "_ewise_add", "_ewise_mult", "_ewise_union", "_inner", "_vxm"} @@ -2012,13 +2010,6 @@ def test_ss_split(v): assert x2.name == "split_1" -def test_deprecated(v): - with pytest.warns(DeprecationWarning): - v.to_values() - with pytest.warns(DeprecationWarning): - Vector.from_values([1], [2]) - - def test_ndim(A, v): assert v.ndim == 1 assert v.ewise_mult(v).ndim == 1 From a4e3bf6cb09a146651b306df90704f29567d2384 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 05:34:29 -0600 Subject: [PATCH 56/66] chore: update pre-commit hooks (#533) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/psf/black: 23.12.0 → 23.12.1](https://github.com/psf/black/compare/23.12.0...23.12.1) - [github.com/astral-sh/ruff-pre-commit: v0.1.8 → v0.1.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.8...v0.1.9) - [github.com/astral-sh/ruff-pre-commit: v0.1.8 → v0.1.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.8...v0.1.9) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bc97547cc..67600553b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -61,12 +61,12 @@ repos: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.12.0 + rev: 23.12.1 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.8 + rev: v0.1.9 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.8 + rev: v0.1.9 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint From 522b696e157a0fb00b63a6a846913e075106a1d5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 06:07:12 -0600 Subject: [PATCH 57/66] Bump actions/upload-artifact from 3 to 4 (#531) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3 to 4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish_pypi.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index 406f7c269..366d01e97 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -27,7 +27,7 @@ jobs: python -m pip install build twine - name: Build wheel and sdist run: python -m build --sdist --wheel - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: releases path: dist From 8f36d463468e880fd6c1fc67576161cfa69f058d Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 5 Feb 2024 22:00:04 -0600 Subject: [PATCH 58/66] Update to numba 0.59 and other version updates (#536) --- .github/workflows/test_and_build.yml | 55 +++++++------ .pre-commit-config.yaml | 14 ++-- graphblas/agg/__init__.py | 1 + graphblas/binary/numpy.py | 1 + graphblas/core/automethods.py | 1 + graphblas/core/dtypes.py | 3 +- graphblas/core/expr.py | 8 +- graphblas/core/matrix.py | 45 +++++++++- graphblas/core/operator/base.py | 3 +- graphblas/core/operator/binary.py | 2 + graphblas/core/operator/indexunary.py | 2 + graphblas/core/operator/monoid.py | 2 + graphblas/core/operator/select.py | 2 + graphblas/core/operator/semiring.py | 2 + graphblas/core/operator/unary.py | 2 + graphblas/core/operator/utils.py | 1 + graphblas/core/scalar.py | 11 +++ graphblas/core/ss/binary.py | 1 + graphblas/core/ss/descriptor.py | 1 + graphblas/core/ss/indexunary.py | 1 + graphblas/core/ss/matrix.py | 113 ++++++++++++-------------- graphblas/core/ss/select.py | 1 + graphblas/core/ss/unary.py | 1 + graphblas/core/ss/vector.py | 56 ++++++------- graphblas/core/utils.py | 4 +- graphblas/core/vector.py | 29 +++++++ graphblas/io/_awkward.py | 1 + graphblas/io/_matrixmarket.py | 2 + graphblas/io/_networkx.py | 2 + graphblas/io/_scipy.py | 1 + graphblas/io/_sparse.py | 1 + graphblas/monoid/numpy.py | 1 + graphblas/select/__init__.py | 15 +--- graphblas/semiring/numpy.py | 1 + graphblas/ss/_core.py | 6 +- graphblas/tests/conftest.py | 16 ++-- graphblas/tests/test_descriptor.py | 3 +- graphblas/tests/test_dtype.py | 2 +- graphblas/tests/test_infix.py | 2 +- graphblas/unary/numpy.py | 1 + graphblas/viz.py | 2 + pyproject.toml | 29 ++++--- scripts/check_versions.sh | 16 ++-- 43 files changed, 290 insertions(+), 173 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 29c6d4a5a..7086d8779 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -167,35 +167,38 @@ jobs: # First let's randomly get versions of dependencies to install. # Consider removing old versions when they become problematic or very old (>=2 years). nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", "=3.2", ""]))') - yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') - sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') - fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", "=0.15", ""]))') # Randomly choosing versions of dependencies based on Python version works surprisingly well... if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", "=2.2", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", "=2.2", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') + yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') else # Python 3.12 npver=$(python -c 'import random ; print(random.choice(["=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.11", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=2.1", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.11", "=1.12", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))') akver=$(python -c 'import random ; print(random.choice(["=2.4", "=2.5", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.7", ""]))') + yamlver=$(python -c 'import random ; print(random.choice(["=6.0", ""]))') fi # But there may be edge cases of incompatibility we need to handle (more handled below) - if [[ ${pdver} == "=2.1" && ${npver} == "=1.21" ]]; then - pdver="=2.0" - fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when # installing python-suitesparse-grphblas from source or upstream. @@ -226,28 +229,32 @@ jobs: psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0", ""]))') fi if [[ ${npver} == "=1.26" ]] ; then - numbaver="" + numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", ""]))') if [[ ${spver} == "=1.9" ]] ; then spver=$(python -c 'import random ; print(random.choice(["=1.10", "=1.11", ""]))') fi elif [[ ${npver} == "=1.25" ]] ; then - numbaver="" + numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", ""]))') elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", "=0.59", ""]))') elif [[ ${npver} == "=1.21" ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.55", "=0.56", "=0.57", ""]))') else - numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", "=0.58", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", "=0.58", "=0.59", ""]))') + fi + # Only numba 0.59 support Python 3.12 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then + numbaver=$(python -c 'import random ; print(random.choice(["=0.59", ""]))') fi fmm=fast_matrix_market${fmmver} awkward=awkward${akver} if [[ ${{ contains(steps.pyver.outputs.selected, 'pypy') || - startsWith(steps.pyver.outputs.selected, '3.12') }} == true || + startsWith(steps.pyver.outputs.selected, '3.13') }} == true || ( ${{ matrix.slowtask != 'notebooks'}} == true && ( ( ${{ matrix.os == 'windows-latest' }} == true && $(python -c 'import random ; print(random.random() < .2)') == True ) || ( ${{ matrix.os == 'windows-latest' }} == false && $(python -c 'import random ; print(random.random() < .4)') == True ))) ]] then - # Some packages aren't available for pypy or Python 3.12; randomly otherwise (if not running notebooks) + # Some packages aren't available for pypy or Python 3.13; randomly otherwise (if not running notebooks) echo "skipping numba" numba="" numbaver=NA @@ -264,7 +271,7 @@ jobs: pdver="" yamlver="" fi - elif [[ ${npver} == "=1.25" || ${npver} == "=1.26" ]] ; then + elif [[ ${npver} == "=2.0" ]] ; then # Don't install numba for unsupported versions of numpy numba="" numbaver=NA @@ -421,7 +428,7 @@ jobs: coverage xml coverage report --show-missing - name: codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 - name: Notebooks Execution check if: matrix.slowtask == 'notebooks' run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 67600553b..fa563b639 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,7 +33,7 @@ repos: - id: name-tests-test args: ["--pytest-test-first"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.15 + rev: v0.16 hooks: - id: validate-pyproject name: Validate pyproject.toml @@ -61,25 +61,25 @@ repos: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 23.12.1 + rev: 24.1.1 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.2.1 hooks: - id: ruff args: [--fix-only, --show-fixes] # Let's keep `flake8` even though `ruff` does much of the same. # `flake8-bugbear` and `flake8-simplify` have caught things missed by `ruff`. - repo: https://github.com/PyCQA/flake8 - rev: 6.1.0 + rev: 7.0.0 hooks: - id: flake8 additional_dependencies: &flake8_dependencies # These versions need updated manually - - flake8==6.1.0 - - flake8-bugbear==23.12.2 + - flake8==7.0.0 + - flake8-bugbear==24.1.17 - flake8-simplify==0.21.0 - repo: https://github.com/asottile/yesqa rev: v1.5.0 @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.2.1 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/agg/__init__.py b/graphblas/agg/__init__.py index 725c11aab..da7c13591 100644 --- a/graphblas/agg/__init__.py +++ b/graphblas/agg/__init__.py @@ -73,6 +73,7 @@ # - bxnor monoid: even bits # - bnor monoid: odd bits """ + # All items are dynamically added by classes in core/operator/agg.py # This module acts as a container of Aggregator instances _deprecated = {} diff --git a/graphblas/binary/numpy.py b/graphblas/binary/numpy.py index 7c03977e4..bb22d0b07 100644 --- a/graphblas/binary/numpy.py +++ b/graphblas/binary/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import numpy as _np from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py index 31b349280..600a6e139 100644 --- a/graphblas/core/automethods.py +++ b/graphblas/core/automethods.py @@ -7,6 +7,7 @@ $ python -m graphblas.core.automethods """ + from .. import config diff --git a/graphblas/core/dtypes.py b/graphblas/core/dtypes.py index d7a83c99b..28ce60d03 100644 --- a/graphblas/core/dtypes.py +++ b/graphblas/core/dtypes.py @@ -375,8 +375,7 @@ def lookup_dtype(key, value=None): def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False): - """ - Returns a type that can hold both type1 and type2. + """Returns a type that can hold both type1 and type2. For example: unify(INT32, INT64) -> INT64 diff --git a/graphblas/core/expr.py b/graphblas/core/expr.py index d803939a5..efec2db5f 100644 --- a/graphblas/core/expr.py +++ b/graphblas/core/expr.py @@ -147,13 +147,13 @@ def py_indices(self): return self.indices[0]._py_index() def parse_indices(self, indices, shape): - """ - Returns + """Returns ------- [(rows, rowsize), (cols, colsize)] for Matrix [(idx, idx_size)] for Vector Within each tuple, if the index is of type int, the size will be None + """ if len(shape) == 1: if type(indices) is tuple: @@ -312,8 +312,8 @@ def update(self, expr, **opts): Updater(self.parent, opts=opts)._setitem(self.resolved_indexes, expr, is_submask=False) def new(self, dtype=None, *, mask=None, input_mask=None, name=None, **opts): - """ - Force extraction of the indexes into a new object + """Force extraction of the indexes into a new object. + dtype and mask are the only controllable parameters. """ if input_mask is not None: diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 16483c2a1..359477d4c 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -177,6 +177,7 @@ class Matrix(BaseType): Number of columns. name : str, optional Name to give the Matrix. This will be displayed in the ``__repr__``. + """ __slots__ = "_nrows", "_ncols", "_parent", "ss" @@ -296,6 +297,7 @@ def __delitem__(self, keys, **opts): Examples -------- >>> del M[1, 5] + """ del Updater(self, opts=opts)[keys] @@ -310,6 +312,7 @@ def __getitem__(self, keys): .. code-block:: python subM = M[[1, 3, 5], :].new() + """ resolved_indexes = IndexerResolver(self, keys) shape = resolved_indexes.shape @@ -331,6 +334,7 @@ def __setitem__(self, keys, expr, **opts): .. code-block:: python M[0, 0:3] = 17 + """ Updater(self, opts=opts)[keys] = expr @@ -342,6 +346,7 @@ def __contains__(self, index): .. code-block:: python (10, 15) in M + """ extractor = self[index] if not extractor._is_scalar: @@ -381,6 +386,7 @@ def isequal(self, other, *, check_dtype=False, **opts): See Also -------- :meth:`isclose` : For equality check of floating point dtypes + """ other = self._expect_type( other, (Matrix, TransposedMatrix), within="isequal", argname="other" @@ -427,6 +433,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False, **opts ------- bool Whether all values of the Matrix are close to the values in ``other``. + """ other = self._expect_type( other, (Matrix, TransposedMatrix), within="isclose", argname="other" @@ -544,6 +551,7 @@ def to_coo(self, dtype=None, *, rows=True, columns=True, values=True, sort=True) np.ndarray[dtype=uint64] : Rows np.ndarray[dtype=uint64] : Columns np.ndarray : Values + """ if sort and backend == "suitesparse": self.wait() # sort in SS @@ -610,6 +618,7 @@ def to_edgelist(self, dtype=None, *, values=True, sort=True): ------- np.ndarray[dtype=uint64] : Edgelist np.ndarray : Values + """ rows, columns, values = self.to_coo(dtype, values=values, sort=sort) return (np.column_stack([rows, columns]), values) @@ -690,6 +699,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): Returns ------- Matrix + """ if dtype is not None or mask is not None or clear: if dtype is None: @@ -721,6 +731,7 @@ def diag(self, k=0, dtype=None, *, name=None, **opts): Returns ------- :class:`~graphblas.Vector` + """ if backend == "suitesparse": from ..ss._core import diag @@ -764,6 +775,7 @@ def wait(self, how="materialize"): Use wait to force completion of the Matrix. Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__. + """ how = how.lower() if how == "materialize": @@ -790,6 +802,7 @@ def get(self, row, col, default=None): Returns ------- Python scalar + """ expr = self[row, col] if expr._is_scalar: @@ -847,6 +860,7 @@ def from_coo( Returns ------- Matrix + """ rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices") columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices") @@ -926,6 +940,7 @@ def from_edgelist( Returns ------- Matrix + """ edgelist_values = None if isinstance(edgelist, np.ndarray): @@ -1095,6 +1110,7 @@ def from_csr( to_csr Matrix.ss.import_csr io.from_scipy_sparse + """ return cls._from_csx(_CSR_FORMAT, indptr, col_indices, values, dtype, ncols, nrows, name) @@ -1142,6 +1158,7 @@ def from_csc( to_csc Matrix.ss.import_csc io.from_scipy_sparse + """ return cls._from_csx(_CSC_FORMAT, indptr, row_indices, values, dtype, nrows, ncols, name) @@ -1202,6 +1219,7 @@ def from_dcsr( to_dcsr Matrix.ss.import_hypercsr io.from_scipy_sparse + """ if backend == "suitesparse": return cls.ss.import_hypercsr( @@ -1286,6 +1304,7 @@ def from_dcsc( to_dcsc Matrix.ss.import_hypercsc io.from_scipy_sparse + """ if backend == "suitesparse": return cls.ss.import_hypercsc( @@ -1347,6 +1366,7 @@ def from_scalar(cls, value, nrows, ncols, dtype=None, *, name=None, **opts): Returns ------- Matrix + """ if type(value) is not Scalar: try: @@ -1400,6 +1420,7 @@ def from_dense(cls, values, missing_value=None, *, dtype=None, name=None, **opts Returns ------- Matrix + """ values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=2) if values.ndim == 0: @@ -1459,6 +1480,7 @@ def to_dense(self, fill_value=None, dtype=None, **opts): Returns ------- np.ndarray + """ max_nvals = self._nrows * self._ncols if fill_value is None or self._nvals == max_nvals: @@ -1534,6 +1556,7 @@ def from_dicts( Returns ------- Matrix + """ order = get_order(order) if isinstance(nested_dicts, Sequence): @@ -1643,6 +1666,7 @@ def to_csr(self, dtype=None, *, sort=True): from_csr Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("csr", sort=sort) @@ -1674,6 +1698,7 @@ def to_csc(self, dtype=None, *, sort=True): from_csc Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("csc", sort=sort) @@ -1708,6 +1733,7 @@ def to_dcsr(self, dtype=None, *, sort=True): from_dcsc Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("hypercsr", sort=sort) @@ -1750,6 +1776,7 @@ def to_dcsc(self, dtype=None, *, sort=True): from_dcsc Matrix.ss.export io.to_scipy_sparse + """ if backend == "suitesparse": info = self.ss.export("hypercsc", sort=sort) @@ -1787,6 +1814,7 @@ def to_dicts(self, order="rowwise"): Returns ------- dict + """ order = get_order(order) if order == "rowwise": @@ -1856,6 +1884,7 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax C << monoid.max(A | B) + """ return self._ewise_add(other, op) @@ -1946,6 +1975,7 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax C << binary.gt(A & B) + """ return self._ewise_mult(other, op) @@ -2040,6 +2070,7 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax C << binary.div(A | B, left_default=1, right_default=1) + """ return self._ewise_union(other, op, left_default, right_default) @@ -2193,6 +2224,7 @@ def mxv(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(A @ v) + """ return self._mxv(other, op) @@ -2253,6 +2285,7 @@ def mxm(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(A @ B) + """ return self._mxm(other, op) @@ -2317,6 +2350,7 @@ def kronecker(self, other, op=binary.times): .. code-block:: python C << A.kronecker(B, op=binary.times) + """ method_name = "kronecker" other = self._expect_type( @@ -2373,6 +2407,7 @@ def apply(self, op, right=None, *, left=None): # Functional syntax C << op.abs(A) + """ method_name = "apply" extra_message = ( @@ -2521,6 +2556,7 @@ def select(self, op, thunk=None): # Functional syntax C << select.value(A >= 1) + """ method_name = "select" if isinstance(op, str): @@ -2615,6 +2651,7 @@ def reduce_rowwise(self, op=monoid.plus): .. code-block:: python w << A.reduce_rowwise(monoid.plus) + """ method_name = "reduce_rowwise" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -2652,6 +2689,7 @@ def reduce_columnwise(self, op=monoid.plus): .. code-block:: python w << A.reduce_columnwise(monoid.plus) + """ method_name = "reduce_columnwise" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -2670,8 +2708,7 @@ def reduce_columnwise(self, op=monoid.plus): ) def reduce_scalar(self, op=monoid.plus, *, allow_empty=True): - """ - Reduce all values in the Matrix into a single value using ``op``. + """Reduce all values in the Matrix into a single value using ``op``. See the `Reduce <../user_guide/operations.html#reduce>`__ section in the User Guide for more details. @@ -2693,6 +2730,7 @@ def reduce_scalar(self, op=monoid.plus, *, allow_empty=True): .. code-block:: python total << A.reduce_scalar(monoid.plus) + """ method_name = "reduce_scalar" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -2753,6 +2791,7 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None): .. code-block:: python C = A.reposition(1, 2).new() + """ if nrows is None: nrows = self._nrows @@ -2834,6 +2873,7 @@ def power(self, n, op=semiring.plus_times): C = A.dup() for i in range(1, 4): C << A @ C + """ method_name = "power" if self._nrows != self._ncols: @@ -2878,6 +2918,7 @@ def setdiag(self, values, k=0, *, mask=None, accum=None, **opts): If it is Matrix Mask, then only the diagonal is used as the mask. accum : Monoid or BinaryOp, optional Operator to use to combine existing diagonal values and new values. + """ if (K := maybe_integral(k)) is None: raise TypeError(f"k must be an integer; got bad type: {type(k)}") diff --git a/graphblas/core/operator/base.py b/graphblas/core/operator/base.py index 59482b47d..4e19fbe96 100644 --- a/graphblas/core/operator/base.py +++ b/graphblas/core/operator/base.py @@ -405,7 +405,8 @@ def _find(cls, funcname): @classmethod def _initialize(cls, include_in_ops=True): - """ + """Initialize operators for this operator type. + include_in_ops determines whether the operators are included in the ``gb.ops`` namespace in addition to the defined module. """ diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 278ee3183..3ee089fe4 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -663,6 +663,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- BinaryOp or ParameterizedBinaryOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -725,6 +726,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> return x == y or abs(x - y) <= max(rel_tol * max(abs(x), abs(y)), abs_tol) >>> return inner >>> gb.binary.register_new("user_isclose", user_isclose, parameterized=True) + """ cls._check_supports_udf("register_new") module, funcname = cls._remove_nesting(name) diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py index b6fc74e91..6fdacbcc1 100644 --- a/graphblas/core/operator/indexunary.py +++ b/graphblas/core/operator/indexunary.py @@ -285,6 +285,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- return IndexUnaryOp or ParameterizedIndexUnaryOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -340,6 +341,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> gb.indexunary.register_new("row_mod", lambda x, i, j, thunk: i % max(thunk, 2)) >>> dir(gb.indexunary) [..., 'row_mod', ...] + """ cls._check_supports_udf("register_new") module, funcname = cls._remove_nesting(name) diff --git a/graphblas/core/operator/monoid.py b/graphblas/core/operator/monoid.py index 21d2b7cac..e3f218a90 100644 --- a/graphblas/core/operator/monoid.py +++ b/graphblas/core/operator/monoid.py @@ -270,6 +270,7 @@ def register_anonymous(cls, binaryop, identity, name=None, *, is_idempotent=Fals Returns ------- Monoid or ParameterizedMonoid + """ if type(binaryop) is ParameterizedBinaryOp: return ParameterizedMonoid( @@ -309,6 +310,7 @@ def register_new(cls, name, binaryop, identity, *, is_idempotent=False, lazy=Fal >>> gb.core.operator.Monoid.register_new("max_zero", gb.binary.max_zero, 0) >>> dir(gb.monoid) [..., 'max_zero', ...] + """ module, funcname = cls._remove_nesting(name) if lazy: diff --git a/graphblas/core/operator/select.py b/graphblas/core/operator/select.py index 4dd65ef16..6de4fa89a 100644 --- a/graphblas/core/operator/select.py +++ b/graphblas/core/operator/select.py @@ -208,6 +208,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- SelectOp or ParameterizedSelectOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -264,6 +265,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> gb.select.register_new("upper_left_triangle", lambda x, i, j, thunk: i + j <= thunk) >>> dir(gb.select) [..., 'upper_left_triangle', ...] + """ cls._check_supports_udf("register_new") iop = IndexUnaryOp.register_new( diff --git a/graphblas/core/operator/semiring.py b/graphblas/core/operator/semiring.py index d367461f6..a8d18f1bf 100644 --- a/graphblas/core/operator/semiring.py +++ b/graphblas/core/operator/semiring.py @@ -287,6 +287,7 @@ def register_anonymous(cls, monoid, binaryop, name=None): Returns ------- Semiring or ParameterizedSemiring + """ if type(monoid) is ParameterizedMonoid or type(binaryop) is ParameterizedBinaryOp: return ParameterizedSemiring(name, monoid, binaryop, anonymous=True) @@ -318,6 +319,7 @@ def register_new(cls, name, monoid, binaryop, *, lazy=False): >>> gb.core.operator.Semiring.register_new("max_max", gb.monoid.max, gb.binary.max) >>> dir(gb.semiring) [..., 'max_max', ...] + """ module, funcname = cls._remove_nesting(name) if lazy: diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py index 7484f74d9..26e0ca61c 100644 --- a/graphblas/core/operator/unary.py +++ b/graphblas/core/operator/unary.py @@ -304,6 +304,7 @@ def register_anonymous(cls, func, name=None, *, parameterized=False, is_udt=Fals Returns ------- UnaryOp or ParameterizedUnaryOp + """ cls._check_supports_udf("register_anonymous") if parameterized: @@ -349,6 +350,7 @@ def register_new(cls, name, func, *, parameterized=False, is_udt=False, lazy=Fal >>> gb.core.operator.UnaryOp.register_new("plus_one", lambda x: x + 1) >>> dir(gb.unary) [..., 'plus_one', ...] + """ cls._check_supports_udf("register_new") module, funcname = cls._remove_nesting(name) diff --git a/graphblas/core/operator/utils.py b/graphblas/core/operator/utils.py index 543df793e..1442a9b5e 100644 --- a/graphblas/core/operator/utils.py +++ b/graphblas/core/operator/utils.py @@ -170,6 +170,7 @@ def get_semiring(monoid, binaryop, name=None): semiring.register_anonymous semiring.register_new semiring.from_string + """ monoid, opclass = find_opclass(monoid) switched = False diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index 9cdf3043e..7e759e5d0 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -53,6 +53,7 @@ class Scalar(BaseType): with a proper GrB_Scalar object. name : str, optional Name to give the Scalar. This will be displayed in the ``__repr__``. + """ __slots__ = "_empty", "_is_cscalar" @@ -196,6 +197,7 @@ def isequal(self, other, *, check_dtype=False): See Also -------- :meth:`isclose` : For equality check of floating point dtypes + """ if type(other) is not Scalar: if other is None: @@ -245,6 +247,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False): Returns ------- bool + """ if type(other) is not Scalar: if other is None: @@ -428,6 +431,7 @@ def dup(self, dtype=None, *, clear=False, is_cscalar=None, name=None): Returns ------- Scalar + """ if is_cscalar is None: is_cscalar = self._is_cscalar @@ -473,6 +477,7 @@ def wait(self, how="materialize"): Use wait to force completion of the Scalar. Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__. + """ how = how.lower() if how == "materialize": @@ -496,6 +501,7 @@ def get(self, default=None): Returns ------- Python scalar + """ return default if self._is_empty else self.value @@ -519,6 +525,7 @@ def from_value(cls, value, dtype=None, *, is_cscalar=False, name=None): Returns ------- Scalar + """ typ = output_type(value) if dtype is None: @@ -628,6 +635,7 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax c << monoid.max(a | b) + """ return self._ewise_add(other, op) @@ -698,6 +706,7 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax c << binary.gt(a & b) + """ return self._ewise_mult(other, op) @@ -772,6 +781,7 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax c << binary.div(a | b, left_default=1, right_default=1) + """ return self._ewise_union(other, op, left_default, right_default) @@ -917,6 +927,7 @@ def apply(self, op, right=None, *, left=None): # Functional syntax b << op.abs(a) + """ expr = self._as_vector().apply(op, right, left=left) return ScalarExpression( diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py index 6965aeaf1..d53608818 100644 --- a/graphblas/core/ss/binary.py +++ b/graphblas/core/ss/binary.py @@ -71,6 +71,7 @@ def register_new(name, jit_c_definition, left_type, right_type, ret_type): gb.binary.register_new gb.binary.register_anonymous gb.unary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index 52c43b95d..781661b7b 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -157,6 +157,7 @@ def get_descriptor(**opts): Returns ------- Descriptor or None + """ if not opts or all(val is False or val is None for val in opts.values()): return diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py index d5f709526..b60837acf 100644 --- a/graphblas/core/ss/indexunary.py +++ b/graphblas/core/ss/indexunary.py @@ -70,6 +70,7 @@ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): gb.indexunary.register_new gb.indexunary.register_anonymous gb.select.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 64914cf02..0489cb5d6 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -250,8 +250,7 @@ def orientation(self): return "rowwise" def build_diag(self, vector, k=0, **opts): - """ - GxB_Matrix_diag. + """GxB_Matrix_diag. Construct a diagonal Matrix from the given vector. Existing entries in the Matrix are discarded. @@ -279,8 +278,7 @@ def build_diag(self, vector, k=0, **opts): ) def split(self, chunks, *, name=None, **opts): - """ - GxB_Matrix_split. + """GxB_Matrix_split. Split a Matrix into a 2D array of sub-matrices according to ``chunks``. @@ -302,6 +300,7 @@ def split(self, chunks, *, name=None, **opts): -------- Matrix.ss.concat graphblas.ss.concat + """ from ..matrix import Matrix @@ -361,8 +360,7 @@ def _concat(self, tiles, m, n, opts): ) def concat(self, tiles, **opts): - """ - GxB_Matrix_concat. + """GxB_Matrix_concat. Concatenate a 2D list of Matrix objects into the current Matrix. Any existing values in the current Matrix will be discarded. @@ -376,13 +374,13 @@ def concat(self, tiles, **opts): -------- Matrix.ss.split graphblas.ss.concat + """ tiles, m, n, is_matrix = _concat_mn(tiles, is_matrix=True) self._concat(tiles, m, n, opts) def build_scalar(self, rows, columns, value): - """ - GxB_Matrix_build_Scalar. + """GxB_Matrix_build_Scalar. Like ``build``, but uses a scalar for all the values. @@ -390,6 +388,7 @@ def build_scalar(self, rows, columns, value): -------- Matrix.build Matrix.from_coo + """ rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices") columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices") @@ -536,8 +535,7 @@ def iteritems(self, seek=0): lib.GxB_Iterator_free(it_ptr) def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts): - """ - GxB_Matrix_export_xxx. + """GxB_Matrix_export_xxx. Parameters ---------- @@ -718,6 +716,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** >>> pieces = A.ss.export() >>> A2 = Matrix.ss.import_any(**pieces) + """ return self._export( format, @@ -729,8 +728,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** ) def unpack(self, format=None, *, sort=False, raw=False, **opts): - """ - GxB_Matrix_unpack_xxx. + """GxB_Matrix_unpack_xxx. ``unpack`` is like ``export``, except that the Matrix remains valid but empty. ``pack_*`` methods are the opposite of ``unpack``. @@ -1179,8 +1177,7 @@ def import_csr( name=None, **opts, ): - """ - GxB_Matrix_import_CSR. + """GxB_Matrix_import_CSR. Create a new Matrix from standard CSR format. @@ -1220,6 +1217,7 @@ def import_csr( Returns ------- Matrix + """ return cls._import_csr( nrows=nrows, @@ -1256,8 +1254,7 @@ def pack_csr( name=None, **opts, ): - """ - GxB_Matrix_pack_CSR. + """GxB_Matrix_pack_CSR. ``pack_csr`` is like ``import_csr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csr")`` @@ -1369,8 +1366,7 @@ def import_csc( name=None, **opts, ): - """ - GxB_Matrix_import_CSC. + """GxB_Matrix_import_CSC. Create a new Matrix from standard CSC format. @@ -1410,6 +1406,7 @@ def import_csc( Returns ------- Matrix + """ return cls._import_csc( nrows=nrows, @@ -1446,8 +1443,7 @@ def pack_csc( name=None, **opts, ): - """ - GxB_Matrix_pack_CSC. + """GxB_Matrix_pack_CSC. ``pack_csc`` is like ``import_csc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csc")`` @@ -1561,8 +1557,7 @@ def import_hypercsr( name=None, **opts, ): - """ - GxB_Matrix_import_HyperCSR. + """GxB_Matrix_import_HyperCSR. Create a new Matrix from standard HyperCSR format. @@ -1606,6 +1601,7 @@ def import_hypercsr( Returns ------- Matrix + """ return cls._import_hypercsr( nrows=nrows, @@ -1646,8 +1642,7 @@ def pack_hypercsr( name=None, **opts, ): - """ - GxB_Matrix_pack_HyperCSR. + """GxB_Matrix_pack_HyperCSR. ``pack_hypercsr`` is like ``import_hypercsr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsr")`` @@ -1785,8 +1780,7 @@ def import_hypercsc( name=None, **opts, ): - """ - GxB_Matrix_import_HyperCSC. + """GxB_Matrix_import_HyperCSC. Create a new Matrix from standard HyperCSC format. @@ -1830,6 +1824,7 @@ def import_hypercsc( Returns ------- Matrix + """ return cls._import_hypercsc( nrows=nrows, @@ -1870,8 +1865,7 @@ def pack_hypercsc( name=None, **opts, ): - """ - GxB_Matrix_pack_HyperCSC. + """GxB_Matrix_pack_HyperCSC. ``pack_hypercsc`` is like ``import_hypercsc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsc")`` @@ -2006,8 +2000,7 @@ def import_bitmapr( name=None, **opts, ): - """ - GxB_Matrix_import_BitmapR. + """GxB_Matrix_import_BitmapR. Create a new Matrix from values and bitmap (as mask) arrays. @@ -2053,6 +2046,7 @@ def import_bitmapr( Returns ------- Matrix + """ return cls._import_bitmapr( bitmap=bitmap, @@ -2087,8 +2081,7 @@ def pack_bitmapr( name=None, **opts, ): - """ - GxB_Matrix_pack_BitmapR. + """GxB_Matrix_pack_BitmapR. ``pack_bitmapr`` is like ``import_bitmapr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapr")`` @@ -2199,8 +2192,7 @@ def import_bitmapc( name=None, **opts, ): - """ - GxB_Matrix_import_BitmapC. + """GxB_Matrix_import_BitmapC. Create a new Matrix from values and bitmap (as mask) arrays. @@ -2246,6 +2238,7 @@ def import_bitmapc( Returns ------- Matrix + """ return cls._import_bitmapc( bitmap=bitmap, @@ -2280,8 +2273,7 @@ def pack_bitmapc( name=None, **opts, ): - """ - GxB_Matrix_pack_BitmapC. + """GxB_Matrix_pack_BitmapC. ``pack_bitmapc`` is like ``import_bitmapc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapc")`` @@ -2390,8 +2382,7 @@ def import_fullr( name=None, **opts, ): - """ - GxB_Matrix_import_FullR. + """GxB_Matrix_import_FullR. Create a new Matrix from values. @@ -2432,6 +2423,7 @@ def import_fullr( Returns ------- Matrix + """ return cls._import_fullr( values=values, @@ -2462,8 +2454,7 @@ def pack_fullr( name=None, **opts, ): - """ - GxB_Matrix_pack_FullR. + """GxB_Matrix_pack_FullR. ``pack_fullr`` is like ``import_fullr`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullr")`` @@ -2549,8 +2540,7 @@ def import_fullc( name=None, **opts, ): - """ - GxB_Matrix_import_FullC. + """GxB_Matrix_import_FullC. Create a new Matrix from values. @@ -2591,6 +2581,7 @@ def import_fullc( Returns ------- Matrix + """ return cls._import_fullc( values=values, @@ -2621,8 +2612,7 @@ def pack_fullc( name=None, **opts, ): - """ - GxB_Matrix_pack_FullC. + """GxB_Matrix_pack_FullC. ``pack_fullc`` is like ``import_fullc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullc")`` @@ -2711,8 +2701,7 @@ def import_coo( name=None, **opts, ): - """ - GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. + """GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. Create a new Matrix from indices and values in coordinate format. @@ -2746,6 +2735,7 @@ def import_coo( Returns ------- Matrix + """ return cls._import_coo( rows=rows, @@ -2784,8 +2774,7 @@ def pack_coo( name=None, **opts, ): - """ - GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. + """GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. ``pack_coo`` is like ``import_coo`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coo")`` @@ -2897,8 +2886,7 @@ def import_coor( name=None, **opts, ): - """ - GxB_Matrix_import_CSR. + """GxB_Matrix_import_CSR. Create a new Matrix from indices and values in coordinate format. Rows must be sorted. @@ -2942,6 +2930,7 @@ def import_coor( Returns ------- Matrix + """ return cls._import_coor( rows=rows, @@ -2980,8 +2969,7 @@ def pack_coor( name=None, **opts, ): - """ - GxB_Matrix_pack_CSR. + """GxB_Matrix_pack_CSR. ``pack_coor`` is like ``import_coor`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coor")`` @@ -3066,8 +3054,7 @@ def import_cooc( name=None, **opts, ): - """ - GxB_Matrix_import_CSC. + """GxB_Matrix_import_CSC. Create a new Matrix from indices and values in coordinate format. Rows must be sorted. @@ -3111,6 +3098,7 @@ def import_cooc( Returns ------- Matrix + """ return cls._import_cooc( rows=rows, @@ -3149,8 +3137,7 @@ def pack_cooc( name=None, **opts, ): - """ - GxB_Matrix_pack_CSC. + """GxB_Matrix_pack_CSC. ``pack_cooc`` is like ``import_cooc`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("cooc")`` @@ -3251,8 +3238,7 @@ def import_any( nvals=None, # optional **opts, ): - """ - GxB_Matrix_import_xxx. + """GxB_Matrix_import_xxx. Dispatch to appropriate import method inferred from inputs. See the other import functions and ``Matrix.ss.export`` for details. @@ -3280,6 +3266,7 @@ def import_any( >>> pieces = A.ss.export() >>> A2 = Matrix.ss.import_any(**pieces) + """ return cls._import_any( values=values, @@ -3349,8 +3336,7 @@ def pack_any( name=None, **opts, ): - """ - GxB_Matrix_pack_xxx. + """GxB_Matrix_pack_xxx. ``pack_any`` is like ``import_any`` except it "packs" data into an existing Matrix. This is the opposite of ``unpack()`` @@ -3707,6 +3693,7 @@ def scan(self, op=monoid.plus, order="rowwise", *, name=None, **opts): Returns ------- Matrix + """ order = get_order(order) parent = self._parent @@ -3735,6 +3722,7 @@ def flatten(self, order="rowwise", *, name=None, **opts): See Also -------- Vector.ss.reshape : copy a Vector to a Matrix. + """ rv = self.reshape(-1, 1, order=order, name=name, **opts) return rv._as_vector() @@ -3771,6 +3759,7 @@ def reshape(self, nrows, ncols=None, order="rowwise", *, inplace=False, name=Non -------- Matrix.ss.flatten : flatten a Matrix into a Vector. Vector.ss.reshape : copy a Vector to a Matrix. + """ from ..matrix import Matrix @@ -3825,6 +3814,7 @@ def selectk(self, how, k, order="rowwise", *, name=None): The number of elements to choose from each row **THIS API IS EXPERIMENTAL AND MAY CHANGE** + """ # TODO: largest, smallest, random_weighted order = get_order(order) @@ -4021,6 +4011,7 @@ def sort(self, op=binary.lt, order="rowwise", *, values=True, permutation=True, See Also -------- Matrix.ss.compactify + """ from ..matrix import Matrix @@ -4082,6 +4073,7 @@ def serialize(self, compression="default", level=None, **opts): This method is intended to support all serialization options from SuiteSparse:GraphBLAS. *Warning*: Behavior of serializing UDTs is experimental and may change in a future release. + """ desc = get_descriptor(compression=compression, compression_level=level, **opts) blob_handle = ffi_new("void**") @@ -4121,6 +4113,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): nthreads : int, optional The maximum number of threads to use when deserializing. None, 0 or negative nthreads means to use the default number of threads. + """ if isinstance(data, np.ndarray): data = ints_to_numpy_buffer(data, np.uint8) diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py index ff12f80fa..3ba135eee 100644 --- a/graphblas/core/ss/select.py +++ b/graphblas/core/ss/select.py @@ -66,6 +66,7 @@ def register_new(name, jit_c_definition, input_type, thunk_type): gb.select.register_new gb.select.register_anonymous gb.indexunary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index 5a5c63632..0b7ced3c8 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -63,6 +63,7 @@ def register_new(name, jit_c_definition, input_type, ret_type): gb.unary.register_new gb.unary.register_anonymous gb.binary.ss.register_new + """ if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index a8bff4ee5..d1f7a5bcb 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -145,8 +145,7 @@ def format(self): return format def build_diag(self, matrix, k=0, **opts): - """ - GxB_Vector_diag. + """GxB_Vector_diag. Extract a diagonal from a Matrix or TransposedMatrix into a Vector. Existing entries in the Vector are discarded. @@ -183,8 +182,7 @@ def build_diag(self, matrix, k=0, **opts): ) def split(self, chunks, *, name=None, **opts): - """ - GxB_Matrix_split. + """GxB_Matrix_split. Split a Vector into a 1D array of sub-vectors according to ``chunks``. @@ -202,6 +200,7 @@ def split(self, chunks, *, name=None, **opts): -------- Vector.ss.concat graphblas.ss.concat + """ from ..vector import Vector @@ -249,8 +248,7 @@ def _concat(self, tiles, m, opts): ) def concat(self, tiles, **opts): - """ - GxB_Matrix_concat. + """GxB_Matrix_concat. Concatenate a 1D list of Vector objects into the current Vector. Any existing values in the current Vector will be discarded. @@ -262,13 +260,13 @@ def concat(self, tiles, **opts): -------- Vector.ss.split graphblas.ss.concat + """ tiles, m, n, is_matrix = _concat_mn(tiles, is_matrix=False) self._concat(tiles, m, opts) def build_scalar(self, indices, value): - """ - GxB_Vector_build_Scalar. + """GxB_Vector_build_Scalar. Like ``build``, but uses a scalar for all the values. @@ -276,6 +274,7 @@ def build_scalar(self, indices, value): -------- Vector.build Vector.from_coo + """ indices = ints_to_numpy_buffer(indices, np.uint64, name="indices") scalar = _as_scalar(value, self._parent.dtype, is_cscalar=False) # pragma: is_grbscalar @@ -410,8 +409,7 @@ def iteritems(self, seek=0): lib.GxB_Iterator_free(it_ptr) def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts): - """ - GxB_Vextor_export_xxx. + """GxB_Vextor_export_xxx. Parameters ---------- @@ -468,6 +466,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** >>> pieces = v.ss.export() >>> v2 = Vector.ss.import_any(**pieces) + """ return self._export( format=format, @@ -479,8 +478,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** ) def unpack(self, format=None, *, sort=False, raw=False, **opts): - """ - GxB_Vector_unpack_xxx. + """GxB_Vector_unpack_xxx. ``unpack`` is like ``export``, except that the Vector remains valid but empty. ``pack_*`` methods are the opposite of ``unpack``. @@ -655,8 +653,7 @@ def import_any( nvals=None, # optional **opts, ): - """ - GxB_Vector_import_xxx. + """GxB_Vector_import_xxx. Dispatch to appropriate import method inferred from inputs. See the other import functions and ``Vector.ss.export`` for details. @@ -679,6 +676,7 @@ def import_any( >>> pieces = v.ss.export() >>> v2 = Vector.ss.import_any(**pieces) + """ return cls._import_any( values=values, @@ -722,8 +720,7 @@ def pack_any( name=None, **opts, ): - """ - GxB_Vector_pack_xxx. + """GxB_Vector_pack_xxx. ``pack_any`` is like ``import_any`` except it "packs" data into an existing Vector. This is the opposite of ``unpack()`` @@ -844,8 +841,7 @@ def import_sparse( name=None, **opts, ): - """ - GxB_Vector_import_CSC. + """GxB_Vector_import_CSC. Create a new Vector from sparse input. @@ -886,6 +882,7 @@ def import_sparse( Returns ------- Vector + """ return cls._import_sparse( size=size, @@ -920,8 +917,7 @@ def pack_sparse( name=None, **opts, ): - """ - GxB_Vector_pack_CSC. + """GxB_Vector_pack_CSC. ``pack_sparse`` is like ``import_sparse`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("sparse")`` @@ -1029,8 +1025,7 @@ def import_bitmap( name=None, **opts, ): - """ - GxB_Vector_import_Bitmap. + """GxB_Vector_import_Bitmap. Create a new Vector from values and bitmap (as mask) arrays. @@ -1071,6 +1066,7 @@ def import_bitmap( Returns ------- Vector + """ return cls._import_bitmap( bitmap=bitmap, @@ -1103,8 +1099,7 @@ def pack_bitmap( name=None, **opts, ): - """ - GxB_Vector_pack_Bitmap. + """GxB_Vector_pack_Bitmap. ``pack_bitmap`` is like ``import_bitmap`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("bitmap")`` @@ -1214,8 +1209,7 @@ def import_full( name=None, **opts, ): - """ - GxB_Vector_import_Full. + """GxB_Vector_import_Full. Create a new Vector from values. @@ -1252,6 +1246,7 @@ def import_full( Returns ------- Vector + """ return cls._import_full( values=values, @@ -1280,8 +1275,7 @@ def pack_full( name=None, **opts, ): - """ - GxB_Vector_pack_Full. + """GxB_Vector_pack_Full. ``pack_full`` is like ``import_full`` except it "packs" data into an existing Vector. This is the opposite of ``unpack("full")`` @@ -1371,6 +1365,7 @@ def scan(self, op=monoid.plus, *, name=None, **opts): Returns ------- Scalar + """ return prefix_scan(self._parent, op, name=name, within="scan", **opts) @@ -1401,6 +1396,7 @@ def reshape(self, nrows, ncols=None, order="rowwise", *, name=None, **opts): See Also -------- Matrix.ss.flatten : flatten a Matrix into a Vector. + """ return self._parent._as_matrix().ss.reshape(nrows, ncols, order, name=name, **opts) @@ -1420,6 +1416,7 @@ def selectk(self, how, k, *, name=None): The number of elements to choose **THIS API IS EXPERIMENTAL AND MAY CHANGE** + """ how = how.lower() if k < 0: @@ -1588,6 +1585,7 @@ def sort(self, op=binary.lt, *, values=True, permutation=True, **opts): See Also -------- Vector.ss.compactify + """ from ..vector import Vector @@ -1648,6 +1646,7 @@ def serialize(self, compression="default", level=None, **opts): This method is intended to support all serialization options from SuiteSparse:GraphBLAS. *Warning*: Behavior of serializing UDTs is experimental and may change in a future release. + """ desc = get_descriptor(compression=compression, compression_level=level, **opts) blob_handle = ffi_new("void**") @@ -1687,6 +1686,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): nthreads : int, optional The maximum number of threads to use when deserializing. None, 0 or negative nthreads means to use the default number of threads. + """ if isinstance(data, np.ndarray): data = ints_to_numpy_buffer(data, np.uint8) diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 42fcf0685..184272124 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -43,7 +43,7 @@ def inner(func_wo_doc): object: object, type: type, } -_output_types.update((k, k) for k in np.cast) +_output_types.update((k, k) for k in set(np.sctypeDict.values())) def output_type(val): @@ -86,6 +86,7 @@ def values_to_numpy_buffer( ------- np.ndarray dtype + """ if dtype is not None: dtype = lookup_dtype(dtype) @@ -183,6 +184,7 @@ def normalize_chunks(chunks, shape): [(10,), (5, 15)] >>> normalize_chunks((5, (5, None)), shape) [(5, 5), (5, 15)] + """ if isinstance(chunks, (list, tuple)): pass diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index a631cc4af..863d186ec 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -149,6 +149,7 @@ class Vector(BaseType): Size of the Vector. name : str, optional Name to give the Vector. This will be displayed in the ``__repr__``. + """ __slots__ = "_size", "_parent", "ss" @@ -265,6 +266,7 @@ def __delitem__(self, keys, **opts): Examples -------- >>> del v[1:-1] + """ del Updater(self, opts=opts)[keys] @@ -279,6 +281,7 @@ def __getitem__(self, keys): .. code-block:: python sub_v = v[[1, 3, 5]].new() + """ resolved_indexes = IndexerResolver(self, keys) shape = resolved_indexes.shape @@ -298,6 +301,7 @@ def __setitem__(self, keys, expr, **opts): # This makes a dense iso-value vector v[:] = 1 + """ Updater(self, opts=opts)[keys] = expr @@ -310,6 +314,7 @@ def __contains__(self, index): # Check if v[15] is non-empty 15 in v + """ extractor = self[index] if not extractor._is_scalar: @@ -349,6 +354,7 @@ def isequal(self, other, *, check_dtype=False, **opts): See Also -------- :meth:`isclose` : For equality check of floating point dtypes + """ other = self._expect_type(other, Vector, within="isequal", argname="other") if check_dtype and self.dtype != other.dtype: @@ -391,6 +397,7 @@ def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False, **opts Returns ------- bool + """ other = self._expect_type(other, Vector, within="isclose", argname="other") if check_dtype and self.dtype != other.dtype: @@ -479,6 +486,7 @@ def to_coo(self, dtype=None, *, indices=True, values=True, sort=True): ------- np.ndarray[dtype=uint64] : Indices np.ndarray : Values + """ if sort and backend == "suitesparse": self.wait() # sort in SS @@ -578,6 +586,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): Returns ------- Vector + """ if dtype is not None or mask is not None or clear: if dtype is None: @@ -608,6 +617,7 @@ def diag(self, k=0, *, name=None): Returns ------- :class:`~graphblas.Matrix` + """ from .matrix import Matrix @@ -632,6 +642,7 @@ def wait(self, how="materialize"): Use wait to force completion of the Vector. Has no effect in `blocking mode <../user_guide/init.html#graphblas-modes>`__. + """ how = how.lower() if how == "materialize": @@ -656,6 +667,7 @@ def get(self, index, default=None): Returns ------- Python scalar + """ expr = self[index] if expr._is_scalar: @@ -698,6 +710,7 @@ def from_coo(cls, indices, values=1.0, dtype=None, *, size=None, dup_op=None, na Returns ------- Vector + """ indices = ints_to_numpy_buffer(indices, np.uint64, name="indices") values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=1) @@ -755,6 +768,7 @@ def from_pairs(cls, pairs, dtype=None, *, size=None, dup_op=None, name=None): Returns ------- Vector + """ if isinstance(pairs, np.ndarray): raise TypeError("pairs as NumPy array is not supported; use `Vector.from_coo` instead") @@ -806,6 +820,7 @@ def from_scalar(cls, value, size, dtype=None, *, name=None, **opts): Returns ------- Vector + """ if type(value) is not Scalar: try: @@ -858,6 +873,7 @@ def from_dense(cls, values, missing_value=None, *, dtype=None, name=None, **opts Returns ------- Vector + """ values, dtype = values_to_numpy_buffer(values, dtype, subarray_after=1) if values.ndim == 0: @@ -906,6 +922,7 @@ def to_dense(self, fill_value=None, dtype=None, **opts): Returns ------- np.ndarray + """ if fill_value is None or self._nvals == self._size: if self._nvals != self._size: @@ -976,6 +993,7 @@ def ewise_add(self, other, op=monoid.plus): # Functional syntax w << monoid.max(u | v) + """ return self._ewise_add(other, op) @@ -1067,6 +1085,7 @@ def ewise_mult(self, other, op=binary.times): # Functional syntax w << binary.gt(u & v) + """ return self._ewise_mult(other, op) @@ -1160,6 +1179,7 @@ def ewise_union(self, other, op, left_default, right_default): # Functional syntax w << binary.div(u | v, left_default=1, right_default=1) + """ return self._ewise_union(other, op, left_default, right_default) @@ -1314,6 +1334,7 @@ def vxm(self, other, op=semiring.plus_times): # Functional syntax C << semiring.min_plus(v @ A) + """ return self._vxm(other, op) @@ -1393,6 +1414,7 @@ def apply(self, op, right=None, *, left=None): # Functional syntax w << op.abs(v) + """ method_name = "apply" extra_message = ( @@ -1538,6 +1560,7 @@ def select(self, op, thunk=None): # Functional syntax w << select.value(v >= 1) + """ method_name = "select" if isinstance(op, str): @@ -1632,6 +1655,7 @@ def reduce(self, op=monoid.plus, *, allow_empty=True): .. code-block:: python total << v.reduce(monoid.plus) + """ method_name = "reduce" op = get_typed_op(op, self.dtype, kind="binary|aggregator") @@ -1684,6 +1708,7 @@ def inner(self, other, op=semiring.plus_times): *Note*: This is not a standard GraphBLAS function, but fits with other functions in the `Matrix Multiplication <../user_guide/operations.html#matrix-multiply>`__ family of functions. + """ return self._inner(other, op) @@ -1739,6 +1764,7 @@ def outer(self, other, op=binary.times): C << v.outer(w, op=binary.times) *Note*: This is not a standard GraphBLAS function. + """ from .matrix import MatrixExpression @@ -1787,6 +1813,7 @@ def reposition(self, offset, *, size=None): .. code-block:: python w = v.reposition(20).new() + """ if size is None: size = self._size @@ -2047,6 +2074,7 @@ def from_dict(cls, d, dtype=None, *, size=None, name=None): Returns ------- Vector + """ indices = np.fromiter(d.keys(), np.uint64) if dtype is None: @@ -2074,6 +2102,7 @@ def to_dict(self): Returns ------- dict + """ indices, values = self.to_coo(sort=False) return dict(zip(indices.tolist(), values.tolist())) diff --git a/graphblas/io/_awkward.py b/graphblas/io/_awkward.py index 6c476817f..b30984251 100644 --- a/graphblas/io/_awkward.py +++ b/graphblas/io/_awkward.py @@ -154,6 +154,7 @@ def from_awkward(A, *, name=None): function. If attempting to convert an arbitrary `awkward-array`, make sure that the top-level attributes and parameters contain the expected values. + """ params = A.layout.parameters if missing := {"format", "shape"} - params.keys(): diff --git a/graphblas/io/_matrixmarket.py b/graphblas/io/_matrixmarket.py index 558605328..8cf8738a3 100644 --- a/graphblas/io/_matrixmarket.py +++ b/graphblas/io/_matrixmarket.py @@ -32,6 +32,7 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs): Returns ------- :class:`~graphblas.Matrix` + """ try: # scipy is currently needed for *all* engines @@ -95,6 +96,7 @@ def mmwrite( Number of digits to write for real or complex values symmetry : str, optional {"general", "symmetric", "skew-symmetric", "hermetian"} + """ try: # scipy is currently needed for *all* engines diff --git a/graphblas/io/_networkx.py b/graphblas/io/_networkx.py index 2324a11c2..dab04c82d 100644 --- a/graphblas/io/_networkx.py +++ b/graphblas/io/_networkx.py @@ -21,6 +21,7 @@ def from_networkx(G, nodelist=None, dtype=None, weight="weight", name=None): Returns ------- :class:`~graphblas.Matrix` + """ import networkx as nx @@ -45,6 +46,7 @@ def to_networkx(m, edge_attribute="weight"): Returns ------- nx.DiGraph + """ import networkx as nx diff --git a/graphblas/io/_scipy.py b/graphblas/io/_scipy.py index 1eaa691dd..228432eed 100644 --- a/graphblas/io/_scipy.py +++ b/graphblas/io/_scipy.py @@ -22,6 +22,7 @@ def from_scipy_sparse(A, *, dup_op=None, name=None): Returns ------- :class:`~graphblas.Matrix` + """ nrows, ncols = A.shape dtype = lookup_dtype(A.dtype) diff --git a/graphblas/io/_sparse.py b/graphblas/io/_sparse.py index 2bbdad2e6..c0d4beabb 100644 --- a/graphblas/io/_sparse.py +++ b/graphblas/io/_sparse.py @@ -23,6 +23,7 @@ def from_pydata_sparse(s, *, dup_op=None, name=None): ------- :class:`~graphblas.Vector` :class:`~graphblas.Matrix` + """ try: import sparse diff --git a/graphblas/monoid/numpy.py b/graphblas/monoid/numpy.py index 5f6895e5d..b9ff2b502 100644 --- a/graphblas/monoid/numpy.py +++ b/graphblas/monoid/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import numpy as _np from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/select/__init__.py b/graphblas/select/__init__.py index aaf8e12d0..b55766ff8 100644 --- a/graphblas/select/__init__.py +++ b/graphblas/select/__init__.py @@ -88,9 +88,7 @@ def _match_expr(parent, expr): def value(expr): - """ - An advanced select method which allows for easily expressing - value comparison logic. + """An advanced select method for easily expressing value comparison logic. Example usage: >>> gb.select.value(A > 0) @@ -102,9 +100,7 @@ def value(expr): def row(expr): - """ - An advanced select method which allows for easily expressing - Matrix row index comparison logic. + """An advanced select method for easily expressing Matrix row index comparison logic. Example usage: >>> gb.select.row(A <= 5) @@ -116,9 +112,7 @@ def row(expr): def column(expr): - """ - An advanced select method which allows for easily expressing - Matrix column index comparison logic. + """An advanced select method for easily expressing Matrix column index comparison logic. Example usage: >>> gb.select.column(A <= 5) @@ -130,8 +124,7 @@ def column(expr): def index(expr): - """ - An advanced select method which allows for easily expressing + """An advanced select method which allows for easily expressing Vector index comparison logic. Example usage: diff --git a/graphblas/semiring/numpy.py b/graphblas/semiring/numpy.py index 97b90874b..10a680ea0 100644 --- a/graphblas/semiring/numpy.py +++ b/graphblas/semiring/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import itertools as _itertools from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index 29a67e08b..b42ea72b4 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -22,8 +22,7 @@ class _graphblas_ss: def diag(x, k=0, dtype=None, *, name=None, **opts): - """ - GxB_Matrix_diag, GxB_Vector_diag. + """GxB_Matrix_diag, GxB_Vector_diag. Extract a diagonal Vector from a Matrix, or construct a diagonal Matrix from a Vector. Unlike ``Matrix.diag`` and ``Vector.diag``, this function @@ -71,8 +70,7 @@ def diag(x, k=0, dtype=None, *, name=None, **opts): def concat(tiles, dtype=None, *, name=None, **opts): - """ - GxB_Matrix_concat. + """GxB_Matrix_concat. Concatenate a 2D list of Matrix objects into a new Matrix, or a 1D list of Vector objects into a new Vector. To concatenate into existing objects, diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index ce9e6488f..a3acb3a94 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -68,9 +68,11 @@ def save_records(): for key in dir(gb.semiring) if key != "ss" and isinstance( - getattr(gb.semiring, key) - if key not in gb.semiring._deprecated - else gb.semiring._deprecated[key], + ( + getattr(gb.semiring, key) + if key not in gb.semiring._deprecated + else gb.semiring._deprecated[key] + ), (gb.core.operator.Semiring, gb.core.operator.ParameterizedSemiring), ) ) @@ -79,9 +81,11 @@ def save_records(): for key in dir(gb.binary) if key != "ss" and isinstance( - getattr(gb.binary, key) - if key not in gb.binary._deprecated - else gb.binary._deprecated[key], + ( + getattr(gb.binary, key) + if key not in gb.binary._deprecated + else gb.binary._deprecated[key] + ), (gb.core.operator.BinaryOp, gb.core.operator.ParameterizedBinaryOp), ) ) diff --git a/graphblas/tests/test_descriptor.py b/graphblas/tests/test_descriptor.py index 9209a8055..6ec9df36a 100644 --- a/graphblas/tests/test_descriptor.py +++ b/graphblas/tests/test_descriptor.py @@ -2,8 +2,7 @@ def test_caching(): - """ - Test that building a descriptor is actually caching rather than building + """Test that building a descriptor is actually caching rather than building a new object for each call. """ tocr = descriptor.lookup( diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index 5797dda10..3bd65f2b4 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -241,7 +241,7 @@ def test_dtype_to_from_string(): def test_has_complex(): - """Only SuiteSparse has complex (with Windows support in Python after v7.4.3.1)""" + """Only SuiteSparse has complex (with Windows support in Python after v7.4.3.1).""" if not suitesparse: assert not dtypes._supports_complex return diff --git a/graphblas/tests/test_infix.py b/graphblas/tests/test_infix.py index e688086b9..601f282a7 100644 --- a/graphblas/tests/test_infix.py +++ b/graphblas/tests/test_infix.py @@ -346,7 +346,7 @@ def test_inplace_infix(s1, v1, v2, A1, A2): @autocompute def test_infix_expr_value_types(): - """Test bug where `infix_expr._value` was used as MatrixExpression or Matrix""" + """Test bug where `infix_expr._value` was used as MatrixExpression or Matrix.""" from graphblas.core.matrix import MatrixExpression A = Matrix(int, 3, 3) diff --git a/graphblas/unary/numpy.py b/graphblas/unary/numpy.py index 9b742d8bc..0c36565ec 100644 --- a/graphblas/unary/numpy.py +++ b/graphblas/unary/numpy.py @@ -5,6 +5,7 @@ https://numba.readthedocs.io/en/stable/reference/numpysupported.html#math-operations """ + import numpy as _np from .. import _STANDARD_OPERATOR_NAMES diff --git a/graphblas/viz.py b/graphblas/viz.py index f0367e119..b6d5f6ba7 100644 --- a/graphblas/viz.py +++ b/graphblas/viz.py @@ -79,6 +79,7 @@ def spy(M, *, centered=False, show=True, figure=None, axes=None, figsize=None, * See Also -------- datashade + """ mpl, plt, ss = _get_imports(["mpl", "plt", "ss"], "spy") A = to_scipy_sparse(M, "coo") @@ -129,6 +130,7 @@ def datashade(M, agg="count", *, width=None, height=None, opts_kwargs=None, **kw See Also -------- spy + """ np, pd, bk, hv, hp, ds = _get_imports(["np", "pd", "bk", "hv", "hp", "ds"], "datashade") if "df" not in kwargs: diff --git a/pyproject.toml b/pyproject.toml index 3bd4a4310..e9ce9da86 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,13 +58,13 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ - "numpy >=1.21", + "numpy >=1.22", "donfig >=0.6", "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead "suitesparse-graphblas >=7.4.0.0, <9", - "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported + "numba >=0.55; python_version<'3.13'", # make optional where numba is not supported ] [project.urls] @@ -97,9 +97,9 @@ repr = [ ] io = [ "python-graphblas[networkx,scipy]", - "python-graphblas[numba]; python_version<'3.12'", + "python-graphblas[numba]; python_version<'3.13'", "awkward >=1.9", - "sparse >=0.13; python_version<'3.12'", # make optional, b/c sparse needs numba + "sparse >=0.13; python_version<'3.13'", # make optional, b/c sparse needs numba "fast-matrix-market >=1.4.5", ] viz = [ @@ -119,11 +119,11 @@ test = [ ] default = [ "python-graphblas[suitesparse,pandas,scipy]", - "python-graphblas[numba]; python_version<'3.12'", # make optional where numba is not supported + "python-graphblas[numba]; python_version<'3.13'", # make optional where numba is not supported ] all = [ "python-graphblas[default,io,viz,test]", - "python-graphblas[datashade]; python_version<'3.12'", # make optional, b/c datashade needs numba + "python-graphblas[datashade]; python_version<'3.13'", # make optional, b/c datashade needs numba ] [tool.setuptools] @@ -211,6 +211,9 @@ filterwarnings = [ # Python 3.12 introduced this deprecation, which is triggered by pandas 2.1.1 "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:dateutil", + + # Pandas 2.2 warns that pyarrow will become a required dependency in pandas 3.0 + "ignore:\\nPyarrow will become a required dependency of pandas:DeprecationWarning:", ] [tool.coverage.run] @@ -239,6 +242,7 @@ ignore-words-list = "coo,ba" # https://github.com/charliermarsh/ruff/ line-length = 100 target-version = "py39" +[tool.ruff.lint] unfixable = [ "F841" # unused-variable (Note: can leave useless expression) ] @@ -308,23 +312,26 @@ ignore = [ "D103", # Missing docstring in public function "D104", # Missing docstring in public package "D105", # Missing docstring in magic method + "D107", # Missing docstring in `__init__` # "D107", # Missing docstring in `__init__` "D205", # 1 blank line required between summary line and description "D401", # First line of docstring should be in imperative mood: + "D417", # D417 Missing argument description in the docstring for ...: ... # "D417", # Missing argument description in the docstring: "PLE0605", # Invalid format for `__all__`, must be `tuple` or `list` (Note: broken in v0.0.237) # Maybe consider # "SIM300", # Yoda conditions are discouraged, use ... instead (Note: we're not this picky) # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) + "B904", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) - "TRY200", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) "PERF401", # Use a list comprehension to create a transformed list (Note: poorly implemented atm) # Intentionally ignored "COM812", # Trailing comma missing "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred) + "D213", # (Note: conflicts with D212, which is preferred) "D400", # First line should end with a period (Note: prefer D415, which also allows "?" and "!") "N801", # Class name ... should use CapWords convention (Note:we have a few exceptions to this) "N802", # Function name ... should be lowercase @@ -374,7 +381,7 @@ ignore = [ "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ] -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF "graphblas/core/ss/matrix.py" = ["NPY002"] # numba doesn't support rng generator yet "graphblas/core/ss/vector.py" = ["NPY002"] # numba doesn't support rng generator yet @@ -389,14 +396,14 @@ ignore = [ "docs/*.py" = ["INP001"] # Not a package -[tool.ruff.flake8-builtins] +[tool.ruff.lint.flake8-builtins] builtins-ignorelist = ["copyright", "format", "min", "max"] -[tool.ruff.flake8-pytest-style] +[tool.ruff.lint.flake8-pytest-style] fixture-parentheses = false mark-parentheses = false -[tool.ruff.pydocstyle] +[tool.lint.ruff.pydocstyle] convention = "numpy" [tool.pylint.messages_control] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 75d6283f0..59fb59d5f 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,15 +3,15 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'flake8-bugbear[channel=conda-forge]>=23.12.2' +conda search 'flake8-bugbear[channel=conda-forge]>=24.1.17' conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' -conda search 'numpy[channel=conda-forge]>=1.26.2' -conda search 'pandas[channel=conda-forge]>=2.1.4' -conda search 'scipy[channel=conda-forge]>=1.11.4' +conda search 'numpy[channel=conda-forge]>=1.26.3' +conda search 'pandas[channel=conda-forge]>=2.2.0' +conda search 'scipy[channel=conda-forge]>=1.12.0' conda search 'networkx[channel=conda-forge]>=3.2.1' -conda search 'awkward[channel=conda-forge]>=2.5.1' -conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.7.5' -conda search 'numba[channel=conda-forge]>=0.58.1' +conda search 'awkward[channel=conda-forge]>=2.5.2' +conda search 'sparse[channel=conda-forge]>=0.15.1' +conda search 'fast_matrix_market[channel=conda-forge]>=1.7.6' +conda search 'numba[channel=conda-forge]>=0.59.0' conda search 'pyyaml[channel=conda-forge]>=6.0.1' # conda search 'python[channel=conda-forge]>=3.9 *pypy*' From a621468d021b3e484fd749eed43525af67cb444b Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 18 Feb 2024 12:39:32 -0600 Subject: [PATCH 59/66] Adopt SPEC 0 (#537) --- .github/workflows/debug.yml | 2 +- .github/workflows/imports.yml | 4 +--- .github/workflows/publish_pypi.yml | 2 +- .github/workflows/test_and_build.yml | 23 ++++++----------------- .pre-commit-config.yaml | 2 +- docs/getting_started/faq.rst | 5 ++--- graphblas/core/formatting.py | 2 +- graphblas/core/matrix.py | 7 ++++--- graphblas/core/ss/matrix.py | 2 +- graphblas/core/ss/vector.py | 2 +- graphblas/core/utils.py | 2 +- graphblas/core/vector.py | 4 ++-- graphblas/io/_networkx.py | 6 ++++-- graphblas/tests/test_io.py | 2 +- graphblas/tests/test_matrix.py | 14 ++++++++------ graphblas/tests/test_vector.py | 2 +- pyproject.toml | 25 +++++++++++++------------ scripts/check_versions.sh | 2 +- 18 files changed, 50 insertions(+), 58 deletions(-) diff --git a/.github/workflows/debug.yml b/.github/workflows/debug.yml index c9dc231fe..64d4bc12b 100644 --- a/.github/workflows/debug.yml +++ b/.github/workflows/debug.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - pyver: [3.9] + pyver: [3.10] testopts: - "--blocking" # - "--non-blocking --record --runslow" diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 0116f615d..b9e9d4406 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -30,7 +30,6 @@ jobs: id: pyver with: contents: | - 3.9 3.10 3.11 3.12 @@ -38,14 +37,13 @@ jobs: 1 1 1 - 1 test_imports: needs: rngs runs-on: ${{ needs.rngs.outputs.os }} # runs-on: ${{ matrix.os }} # strategy: # matrix: - # python-version: ["3.9", "3.10", "3.11", "3.12"] + # python-version: ["3.10", "3.11", "3.12"] # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index 366d01e97..b01d2a502 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install build dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 7086d8779..6c55a0eca 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -105,11 +105,10 @@ jobs: uses: ddradar/choose-random-action@v2.0.2 id: pyver with: - # We should support major Python versions for at least 36-42 months + # We should support major Python versions for at least 36 months as per SPEC 0 # We may be able to support pypy if anybody asks for it # 3.9.16 0_73_pypy contents: | - 3.9 3.10 3.11 3.12 @@ -117,7 +116,6 @@ jobs: 1 1 1 - 1 - name: RNG for source of python-suitesparse-graphblas uses: ddradar/choose-random-action@v2.0.2 id: sourcetype @@ -166,20 +164,13 @@ jobs: # # First let's randomly get versions of dependencies to install. # Consider removing old versions when they become problematic or very old (>=2 years). - nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", "=3.2", ""]))') - sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", "=0.15", ""]))') + nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') # Randomly choosing versions of dependencies based on Python version works surprisingly well... - if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", "=2.2", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') - fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') - yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') - elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then + npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", "=2.2", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') @@ -237,8 +228,6 @@ jobs: numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", ""]))') elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", "=0.59", ""]))') - elif [[ ${npver} == "=1.21" ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.55", "=0.56", "=0.57", ""]))') else numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", "=0.58", "=0.59", ""]))') fi diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fa563b639..12e5dd865 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -54,7 +54,7 @@ repos: rev: v3.15.0 hooks: - id: pyupgrade - args: [--py39-plus] + args: [--py310-plus] - repo: https://github.com/MarcoGorelli/auto-walrus rev: v0.2.2 hooks: diff --git a/docs/getting_started/faq.rst b/docs/getting_started/faq.rst index 1e60a1bd4..2609e7929 100644 --- a/docs/getting_started/faq.rst +++ b/docs/getting_started/faq.rst @@ -101,11 +101,10 @@ Bugs are not considered deprecations and may be fixed immediately. What is the version support policy? +++++++++++++++++++++++++++++++++++ -Each major Python version will be supported for at least 36 to 42 months. +Each major Python version will be supported for at least 36. Major dependencies such as NumPy should be supported for at least 24 months. -This is motivated by these guidelines: +We aim to follow SPEC 0: -- https://numpy.org/neps/nep-0029-deprecation_policy.html - https://scientific-python.org/specs/spec-0000/ ``python-graphblas`` itself follows a "single trunk" versioning strategy. diff --git a/graphblas/core/formatting.py b/graphblas/core/formatting.py index aefb87f94..0b6252101 100644 --- a/graphblas/core/formatting.py +++ b/graphblas/core/formatting.py @@ -630,7 +630,7 @@ def create_header(type_name, keys, vals, *, lower_border=False, name="", quote=T name = f'"{name}"' key_text = [] val_text = [] - for key, val in zip(keys, vals): + for key, val in zip(keys, vals, strict=True): width = max(len(key), len(val)) + 2 key_text.append(key.rjust(width)) val_text.append(val.rjust(width)) diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 359477d4c..e28e92a65 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -360,7 +360,7 @@ def __contains__(self, index): def __iter__(self): """Iterate over (row, col) indices which are present in the matrix.""" rows, columns, _ = self.to_coo(values=False) - return zip(rows.flat, columns.flat) + return zip(rows.flat, columns.flat, strict=True) def __sizeof__(self): if backend == "suitesparse": @@ -961,7 +961,7 @@ def from_edgelist( rows = edgelist[:, 0] cols = edgelist[:, 1] else: - unzipped = list(zip(*edgelist)) + unzipped = list(zip(*edgelist, strict=True)) if len(unzipped) == 2: rows, cols = unzipped elif len(unzipped) == 3: @@ -1826,10 +1826,11 @@ def to_dicts(self, order="rowwise"): cols = cols.tolist() values = values.tolist() return { - row: dict(zip(cols[start:stop], values[start:stop])) + row: dict(zip(cols[start:stop], values[start:stop], strict=True)) for row, (start, stop) in zip( compressed_rows.tolist(), np.lib.stride_tricks.sliding_window_view(indptr, 2).tolist(), + strict=True, ) } # Alternative diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 0489cb5d6..0a08c50e2 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -58,7 +58,7 @@ def head(matrix, n=10, dtype=None, *, sort=False): dtype = matrix.dtype else: dtype = lookup_dtype(dtype) - rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n)) + rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n), strict=True) return np.array(rows, np.uint64), np.array(cols, np.uint64), np.array(vals, dtype.np_type) diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index d1f7a5bcb..a21d54de9 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -43,7 +43,7 @@ def head(vector, n=10, dtype=None, *, sort=False): dtype = vector.dtype else: dtype = lookup_dtype(dtype) - indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n)) + indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n), strict=True) return np.array(indices, np.uint64), np.array(vals, dtype.np_type) diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 184272124..6e91edd1b 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -202,7 +202,7 @@ def normalize_chunks(chunks, shape): f"chunks argument must be of length {len(shape)} (one for each dimension of a {typ})" ) chunksizes = [] - for size, chunk in zip(shape, chunks): + for size, chunk in zip(shape, chunks, strict=True): if chunk is None: cur_chunks = [size] elif (c := maybe_integral(chunk)) is not None: diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index 863d186ec..8bac4198e 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -772,7 +772,7 @@ def from_pairs(cls, pairs, dtype=None, *, size=None, dup_op=None, name=None): """ if isinstance(pairs, np.ndarray): raise TypeError("pairs as NumPy array is not supported; use `Vector.from_coo` instead") - unzipped = list(zip(*pairs)) + unzipped = list(zip(*pairs, strict=True)) if len(unzipped) == 2: indices, values = unzipped elif not unzipped: @@ -2105,7 +2105,7 @@ def to_dict(self): """ indices, values = self.to_coo(sort=False) - return dict(zip(indices.tolist(), values.tolist())) + return dict(zip(indices.tolist(), values.tolist(), strict=True)) if backend == "suitesparse": diff --git a/graphblas/io/_networkx.py b/graphblas/io/_networkx.py index dab04c82d..8cf84e576 100644 --- a/graphblas/io/_networkx.py +++ b/graphblas/io/_networkx.py @@ -55,7 +55,9 @@ def to_networkx(m, edge_attribute="weight"): cols = cols.tolist() G = nx.DiGraph() if edge_attribute is None: - G.add_edges_from(zip(rows, cols)) + G.add_edges_from(zip(rows, cols, strict=True)) else: - G.add_weighted_edges_from(zip(rows, cols, vals.tolist()), weight=edge_attribute) + G.add_weighted_edges_from( + zip(rows, cols, vals.tolist(), strict=True), weight=edge_attribute + ) return G diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index 109c90a2c..7e786f0da 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -146,7 +146,7 @@ def test_matrix_to_from_networkx(): M = gb.io.from_networkx(G, nodelist=range(7)) if suitesparse: assert M.ss.is_iso - rows, cols = zip(*edges) + rows, cols = zip(*edges, strict=True) expected = gb.Matrix.from_coo(rows, cols, 1) assert expected.isequal(M) # Test empty diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 06e4ee868..63561930b 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -2603,12 +2603,14 @@ def test_iter(A): zip( [3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1], [0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6], + strict=True, ) ) assert set(A.T) == set( zip( [0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6], [3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1], + strict=True, ) ) @@ -2731,8 +2733,8 @@ def test_ss_split(A): for results in [A.ss.split([4, 3]), A.ss.split([[4, None], 3], name="split")]: row_boundaries = [0, 4, 7] col_boundaries = [0, 3, 6, 7] - for i, (i1, i2) in enumerate(zip(row_boundaries[:-1], row_boundaries[1:])): - for j, (j1, j2) in enumerate(zip(col_boundaries[:-1], col_boundaries[1:])): + for i, (i1, i2) in enumerate(itertools.pairwise(row_boundaries)): + for j, (j1, j2) in enumerate(itertools.pairwise(col_boundaries)): expected = A[i1:i2, j1:j2].new() assert expected.isequal(results[i][j]) with pytest.raises(DimensionMismatch): @@ -3068,7 +3070,7 @@ def test_ss_flatten(A): [3, 2, 3, 1, 5, 3, 7, 8, 3, 1, 7, 4], ] # row-wise - indices = [row * A.ncols + col for row, col in zip(data[0], data[1])] + indices = [row * A.ncols + col for row, col in zip(data[0], data[1], strict=True)] expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols) for fmt in ["csr", "hypercsr", "bitmapr"]: B = Matrix.ss.import_any(**A.ss.export(format=fmt)) @@ -3087,7 +3089,7 @@ def test_ss_flatten(A): assert C.isequal(B) # column-wise - indices = [col * A.nrows + row for row, col in zip(data[0], data[1])] + indices = [col * A.nrows + row for row, col in zip(data[0], data[1], strict=True)] expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols) for fmt in ["csc", "hypercsc", "bitmapc"]: B = Matrix.ss.import_any(**A.ss.export(format=fmt)) @@ -3626,9 +3628,9 @@ def test_ss_iteration(A): assert not list(B.ss.itervalues()) assert not list(B.ss.iteritems()) rows, columns, values = A.to_coo() - assert sorted(zip(rows, columns)) == sorted(A.ss.iterkeys()) + assert sorted(zip(rows, columns, strict=True)) == sorted(A.ss.iterkeys()) assert sorted(values) == sorted(A.ss.itervalues()) - assert sorted(zip(rows, columns, values)) == sorted(A.ss.iteritems()) + assert sorted(zip(rows, columns, values, strict=True)) == sorted(A.ss.iteritems()) N = rows.size A = Matrix.ss.import_bitmapr(**A.ss.export("bitmapr")) diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index 77f608969..df1f5c86e 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -2270,7 +2270,7 @@ def test_ss_iteration(v): # This is what I would expect assert sorted(indices) == sorted(v.ss.iterkeys()) assert sorted(values) == sorted(v.ss.itervalues()) - assert sorted(zip(indices, values)) == sorted(v.ss.iteritems()) + assert sorted(zip(indices, values, strict=True)) == sorted(v.ss.iteritems()) N = indices.size v = Vector.ss.import_bitmap(**v.ss.export("bitmap")) diff --git a/pyproject.toml b/pyproject.toml index e9ce9da86..a3447b751 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ name = "python-graphblas" dynamic = ["version"] description = "Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics" readme = "README.md" -requires-python = ">=3.9" +requires-python = ">=3.10" license = {file = "LICENSE"} authors = [ {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, @@ -44,7 +44,6 @@ classifiers = [ "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -58,7 +57,7 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ - "numpy >=1.22", + "numpy >=1.23", "donfig >=0.6", "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 @@ -84,7 +83,7 @@ numba = [ "numba >=0.55", ] pandas = [ - "pandas >=1.2", + "pandas >=1.5", ] scipy = [ "scipy >=1.9", @@ -99,17 +98,17 @@ io = [ "python-graphblas[networkx,scipy]", "python-graphblas[numba]; python_version<'3.13'", "awkward >=1.9", - "sparse >=0.13; python_version<'3.13'", # make optional, b/c sparse needs numba + "sparse >=0.14; python_version<'3.13'", # make optional, b/c sparse needs numba "fast-matrix-market >=1.4.5", ] viz = [ "python-graphblas[networkx,scipy]", - "matplotlib >=3.5", + "matplotlib >=3.6", ] datashade = [ # datashade requires numba "python-graphblas[numba,pandas,scipy]", - "datashader >=0.12", - "hvplot >=0.7", + "datashader >=0.14", + "hvplot >=0.8", ] test = [ "python-graphblas[suitesparse,pandas,scipy]", @@ -157,7 +156,7 @@ dirty_template = "{tag}+{ccount}.g{sha}.dirty" [tool.black] line-length = 100 -target-version = ["py39", "py310", "py311", "py312"] +target-version = ["py310", "py311", "py312"] [tool.isort] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] @@ -241,10 +240,11 @@ ignore-words-list = "coo,ba" [tool.ruff] # https://github.com/charliermarsh/ruff/ line-length = 100 -target-version = "py39" +target-version = "py310" [tool.ruff.lint] unfixable = [ - "F841" # unused-variable (Note: can leave useless expression) + "F841", # unused-variable (Note: can leave useless expression) + "B905", # zip-without-explicit-strict (Note: prefer `zip(x, y, strict=True)`) ] select = [ # Have we enabled too many checks that they'll become a nuisance? We'll see... @@ -360,6 +360,7 @@ ignore = [ "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster) "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) "TRY003", # Avoid specifying long messages outside the exception class (Note: why?) + "UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)` (Note: using `|` is slower atm) # Ignored categories "C90", # mccabe (Too strict, but maybe we should make things less complex) @@ -409,7 +410,7 @@ convention = "numpy" [tool.pylint.messages_control] # To run a single check, do: pylint graphblas --disable E,W,R,C,I --enable assignment-from-no-return max-line-length = 100 -py-version = "3.9" +py-version = "3.10" enable = ["I"] disable = [ # Error diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 59fb59d5f..893f09539 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -14,4 +14,4 @@ conda search 'sparse[channel=conda-forge]>=0.15.1' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.6' conda search 'numba[channel=conda-forge]>=0.59.0' conda search 'pyyaml[channel=conda-forge]>=6.0.1' -# conda search 'python[channel=conda-forge]>=3.9 *pypy*' +# conda search 'python[channel=conda-forge]>=3.10 *pypy*' From 27b23e414a9a73daa9c3dda9698e227562d9a18c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 12:40:58 -0600 Subject: [PATCH 60/66] Bump pre-commit/action from 3.0.0 to 3.0.1 (#538) Bumps [pre-commit/action](https://github.com/pre-commit/action) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/pre-commit/action/releases) - [Commits](https://github.com/pre-commit/action/compare/v3.0.0...v3.0.1) --- updated-dependencies: - dependency-name: pre-commit/action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 97bb856f6..d0182dd0c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -20,4 +20,4 @@ jobs: - uses: actions/setup-python@v5 with: python-version: "3.10" - - uses: pre-commit/action@v3.0.0 + - uses: pre-commit/action@v3.0.1 From 3c389f8434de146c8391a8b7c46f075eb304463a Mon Sep 17 00:00:00 2001 From: Sophia Lockton <69818937+slockton24@users.noreply.github.com> Date: Wed, 23 Oct 2024 06:30:10 -0400 Subject: [PATCH 61/66] Update README.md (#548) Include quotes when installing with pip --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index de942f88e..96908989c 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ $ conda install -c conda-forge python-graphblas ``` or pip: ``` -$ pip install python-graphblas[default] +$ pip install 'python-graphblas[default]' ``` This will also install the [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS) compiled C library. We currently support the [GraphBLAS C API 2.0 specification](https://graphblas.org/docs/GraphBLAS_API_C_v2.0.0.pdf). From cf73b37d95c5497fe4b7d9bf17da82d9ed808f7b Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 17 Feb 2025 09:40:04 -0600 Subject: [PATCH 62/66] Update to support latest versions, including NumPy 2 (#546) Support latest versions: Python 3.13, numpy 2, numba 0.61, SS:GB 9.3.1, etc. Also, add pre-commit hooks: prettier, taplo, actionlint, check-jsonschema, yamllint, zizmore, meta --------- Co-authored-by: Jim Kitchen --- .github/dependabot.yml | 6 +- .github/workflows/debug.yml | 3 +- .github/workflows/imports.yml | 11 +- .github/workflows/lint.yml | 3 + .github/workflows/publish_pypi.yml | 5 +- .github/workflows/test_and_build.yml | 193 +++++-- .github/zizmor.yml | 16 + .pre-commit-config.yaml | 92 ++-- .yamllint.yaml | 6 + CODE_OF_CONDUCT.md | 14 +- README.md | 39 ++ binder/environment.yml | 18 +- docs/_static/custom.css | 54 +- docs/_static/matrix.css | 118 ++--- docs/env.yml | 40 +- docs/user_guide/operations.rst | 2 +- environment.yml | 195 ++++---- graphblas/core/base.py | 2 +- graphblas/core/dtypes.py | 19 +- graphblas/core/infix.py | 1 + graphblas/core/matrix.py | 4 + graphblas/core/operator/base.py | 3 +- graphblas/core/scalar.py | 2 +- graphblas/core/ss/__init__.py | 4 +- graphblas/core/ss/config.py | 2 +- graphblas/core/ss/matrix.py | 39 +- graphblas/core/ss/vector.py | 33 +- graphblas/core/utils.py | 18 +- graphblas/exceptions.py | 14 +- graphblas/tests/conftest.py | 8 + graphblas/tests/test_dtype.py | 4 + graphblas/tests/test_matrix.py | 9 +- graphblas/tests/test_numpyops.py | 10 + graphblas/tests/test_scalar.py | 6 +- graphblas/tests/test_ssjit.py | 52 +- graphblas/tests/test_vector.py | 19 +- pyproject.toml | 719 ++++++++++++++------------- scripts/check_versions.sh | 18 +- 38 files changed, 1092 insertions(+), 709 deletions(-) create mode 100644 .github/zizmor.yml create mode 100644 .yamllint.yaml diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b18fd2935..5ace4600a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ version: 2 updates: - - package-ecosystem: 'github-actions' - directory: '/' + - package-ecosystem: "github-actions" + directory: "/" schedule: - interval: 'weekly' + interval: "weekly" diff --git a/.github/workflows/debug.yml b/.github/workflows/debug.yml index 64d4bc12b..6c2b202b1 100644 --- a/.github/workflows/debug.yml +++ b/.github/workflows/debug.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: inputs: debug_enabled: - description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)' + description: "Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)" required: false default: false @@ -29,6 +29,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Setup conda env run: | source "$CONDA/etc/profile.d/conda.sh" diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index b9e9d4406..e24d0d4db 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -14,7 +14,7 @@ jobs: pyver: ${{ steps.pyver.outputs.selected }} steps: - name: RNG for os - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: os with: contents: | @@ -26,27 +26,32 @@ jobs: 1 1 - name: RNG for Python version - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: pyver with: contents: | 3.10 3.11 3.12 + 3.13 weights: | 1 1 1 + 1 test_imports: needs: rngs runs-on: ${{ needs.rngs.outputs.os }} # runs-on: ${{ matrix.os }} # strategy: # matrix: - # python-version: ["3.10", "3.11", "3.12"] + # python-version: ["3.10", "3.11", "3.12", "3.13"] # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false - uses: actions/setup-python@v5 with: python-version: ${{ needs.rngs.outputs.pyver }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d0182dd0c..655a576e5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,6 +17,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false - uses: actions/setup-python@v5 with: python-version: "3.10" diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index b01d2a502..a9ad0be8c 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -3,7 +3,7 @@ name: Publish to PyPI on: push: tags: - - '20*' + - "20*" jobs: build_and_deploy: @@ -17,6 +17,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Set up Python uses: actions/setup-python@v5 with: @@ -35,7 +36,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.11 + uses: pypa/gh-action-pypi-publish@v1.9.0 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 6c55a0eca..7a8f06900 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -50,7 +50,7 @@ jobs: backend: ${{ steps.backend.outputs.selected }} steps: - name: RNG for mapnumpy - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: mapnumpy with: contents: | @@ -64,7 +64,7 @@ jobs: 1 1 - name: RNG for backend - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: backend with: contents: | @@ -84,14 +84,15 @@ jobs: run: shell: bash -l {0} strategy: - # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask` - fail-fast: true + # To "stress test" in CI, set `fail-fast` to `false` and use `repeat` in matrix below + fail-fast: false # The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype]. # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. matrix: os: ["ubuntu-latest", "macos-latest", "windows-latest"] slowtask: ["pytest_normal", "pytest_bizarro", "notebooks"] + # repeat: [1, 2, 3] # For stress testing env: # Wheels on OS X come with an OpenMP that conflicts with OpenMP from conda-forge. # Setting this is a workaround. @@ -101,8 +102,9 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: RNG for Python version - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: pyver with: # We should support major Python versions for at least 36 months as per SPEC 0 @@ -112,12 +114,14 @@ jobs: 3.10 3.11 3.12 + 3.13 weights: | 1 1 1 + 1 - name: RNG for source of python-suitesparse-graphblas - uses: ddradar/choose-random-action@v2.0.2 + uses: ddradar/choose-random-action@v3.0.0 id: sourcetype with: # Weights must be natural numbers, so set weights to very large to skip one @@ -132,28 +136,14 @@ jobs: 1 1 1 - - name: Setup mamba - uses: conda-incubator/setup-miniconda@v3 - id: setup_mamba - continue-on-error: true - with: - miniforge-variant: Mambaforge - miniforge-version: latest - use-mamba: true - python-version: ${{ steps.pyver.outputs.selected }} - channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }} - channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} - activate-environment: graphblas - auto-activate-base: false - name: Setup conda uses: conda-incubator/setup-miniconda@v3 id: setup_conda - if: steps.setup_mamba.outcome == 'failure' - continue-on-error: false with: auto-update-conda: true python-version: ${{ steps.pyver.outputs.selected }} - channels: conda-forge,${{ contains(steps.pyver.outputs.selected, 'pypy') && 'defaults' || 'nodefaults' }} + channels: conda-forge${{ contains(steps.pyver.outputs.selected, 'pypy') && ',defaults' || '' }} + conda-remove-defaults: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'false' || 'true' }} channel-priority: ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'flexible' || 'strict' }} activate-environment: graphblas auto-activate-base: false @@ -164,81 +154,154 @@ jobs: # # First let's randomly get versions of dependencies to install. # Consider removing old versions when they become problematic or very old (>=2 years). - nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", ""]))') - sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') + # Randomly choosing versions of dependencies based on Python version works surprisingly well... if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') + nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", "=3.3", "=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.24", "=1.25", "=1.26", "=2.0", "=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", "=2.6", "=2.7", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') + nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", "=3.3", "=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.24", "=1.25", "=1.26", "=2.0", "=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", "=2.6", "=2.7", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') - else # Python 3.12 - npver=$(python -c 'import random ; print(random.choice(["=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.11", "=1.12", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') + elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]]; then + nxver=$(python -c 'import random ; print(random.choice(["=3.2", "=3.3", "=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=1.26", "=2.0", "=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.11", "=1.12", "=1.13", "=1.14", "=1.15", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=2.4", "=2.5", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=2.4", "=2.5", "=2.6", "=2.7", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.7", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=6.0", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') + else # Python 3.13 + nxver=$(python -c 'import random ; print(random.choice(["=3.4", ""]))') + npver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.14", "=1.15", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=2.7", ""]))') + fmmver=NA # Not yet supported + yamlver=$(python -c 'import random ; print(random.choice(["=6.0", ""]))') + sparsever=NA # Not yet supported fi + # But there may be edge cases of incompatibility we need to handle (more handled below) - if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then + if [[ ${{ steps.sourcetype.outputs.selected }} == "source" ]]; then # TODO: there are currently issues with some numpy versions when - # installing python-suitesparse-grphblas from source or upstream. + # installing python-suitesparse-grphblas from source. npver="" spver="" pdver="" fi + # We can have a tight coupling with python-suitesparse-graphblas. # That is, we don't need to support versions of it that are two years old. # But, it's still useful for us to test with different versions! psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "upstream" ]] ; then + # Upstream needs to build with numpy 2 psgver="" + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]]; then + npver=$(python -c 'import random ; print(random.choice(["=2.1", "=2.2", ""]))') + else + npver=$(python -c 'import random ; print(random.choice(["=2.0", "=2.1", "=2.2", ""]))') + fi + elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then + if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", ""]))') + psg=python-suitesparse-graphblas${psgver} + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + fi elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", "=8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", "=8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", ""]))') + fi psg=python-suitesparse-graphblas${psgver} else - psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", "==8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", "==8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + fi fi + # python-suitsparse-graphblas support is the same for Python 3.10 and 3.11 elif [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", "=8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", "=8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", ""]))') + fi psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + fi elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0", ""]))') + if [[ $npver == =1.* ]] ; then + psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))') + else + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + fi fi + + # Numba is tightly coupled to numpy versions if [[ ${npver} == "=1.26" ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", "=0.60", "=0.61", ""]))') if [[ ${spver} == "=1.9" ]] ; then spver=$(python -c 'import random ; print(random.choice(["=1.10", "=1.11", ""]))') fi elif [[ ${npver} == "=1.25" ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", "=0.60", "=0.61", ""]))') elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", "=0.59", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", "=0.59", "=0.60", "=0.61", ""]))') else - numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", "=0.58", "=0.59", ""]))') + numbaver="" fi - # Only numba 0.59 support Python 3.12 + # Only numba >=0.59 support Python 3.12 if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.59", ""]))') + numbaver=$(python -c 'import random ; print(random.choice(["=0.59", "=0.60", "=0.61", ""]))') + fi + + # Handle NumPy 2 + if [[ $npver != =1.* ]] ; then + # Only pandas >=2.2.2 supports NumPy 2 + pdver=$(python -c 'import random ; print(random.choice(["=2.2", ""]))') + + # Only awkward >=2.6.3 supports NumPy 2 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then + akver=$(python -c 'import random ; print(random.choice(["=2.7", ""]))') + else + akver=$(python -c 'import random ; print(random.choice(["=2.6", "=2.7", ""]))') + fi + + # Only scipy >=1.13 supports NumPy 2 + if [[ $spver == "=1.9" || $spver == "=1.10" || $spver == "=1.11" || $spver == "=1.12" ]] ; then + spver="=1.13" + fi fi + fmm=fast_matrix_market${fmmver} awkward=awkward${akver} + + # Don't install numba and sparse for some versions if [[ ${{ contains(steps.pyver.outputs.selected, 'pypy') || - startsWith(steps.pyver.outputs.selected, '3.13') }} == true || + startsWith(steps.pyver.outputs.selected, '3.14') }} == true || ( ${{ matrix.slowtask != 'notebooks'}} == true && ( ( ${{ matrix.os == 'windows-latest' }} == true && $(python -c 'import random ; print(random.random() < .2)') == True ) || ( ${{ matrix.os == 'windows-latest' }} == false && $(python -c 'import random ; print(random.random() < .4)') == True ))) ]] @@ -260,7 +323,7 @@ jobs: pdver="" yamlver="" fi - elif [[ ${npver} == "=2.0" ]] ; then + elif [[ ${npver} == =2.* ]] ; then # Don't install numba for unsupported versions of numpy numba="" numbaver=NA @@ -270,18 +333,34 @@ jobs: numba=numba${numbaver} sparse=sparse${sparsever} fi + + # sparse does not yet support Python 3.13 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then + sparse="" + sparsever=NA + fi + # fast_matrix_market does not yet support Python 3.13 or osx-arm64 + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true || + ${{ matrix.os == 'macos-latest' }} == true ]] + then + fmm="" + fmmver=NA + fi + echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psg${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage pytest-randomly cffi donfig tomli c-compiler make \ + $(command -v mamba || command -v conda) install -c nodefaults \ + packaging pytest coverage pytest-randomly cffi donfig tomli c-compiler make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7" drawsvg' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ - ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4"' || '' }} \ + ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4,<9.4"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \ - ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} + ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} \ + # ${{ matrix.os != 'windows-latest' && 'pytest-forked' || '' }} # to investigate crashes - name: Build extension module run: | if [[ ${{ steps.sourcetype.outputs.selected }} == "wheel" ]]; then @@ -307,7 +386,11 @@ jobs: # Don't use our conftest.py ; allow `test_print_jit_config` to fail if it doesn't exist (cd .. pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config || true - pytest -v --pyargs suitesparse_graphblas) + pytest -v --pyargs suitesparse_graphblas || true) + - name: Print platform and sysconfig variables + run: | + python -c "import platform ; print(platform.uname())" + python -c "import pprint, sysconfig ; pprint.pprint(sysconfig.get_config_vars())" - name: Unit tests run: | A=${{ needs.rngs.outputs.mapnumpy == 'A' || '' }} ; B=${{ needs.rngs.outputs.mapnumpy == 'B' || '' }} @@ -336,6 +419,8 @@ jobs: if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi) echo ${args} set -x # echo on + # pytest ${{ matrix.os != 'windows-latest' && '--forked' || '' }} \ # to investigate crashes + # --color=yes --randomly -v -s ${args} \ coverage run -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }} - name: Unit tests (bizarro scalars) @@ -372,6 +457,8 @@ jobs: if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi) echo ${args} set -x # echo on + # pytest ${{ matrix.os != 'windows-latest' && '--forked' || '' }} \ # to investigate crashes + # --color=yes --randomly -v -s ${args} \ coverage run -a -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_bizarro' && '--runslow' || '' }} git checkout . # Undo changes to scalar default diff --git a/.github/zizmor.yml b/.github/zizmor.yml new file mode 100644 index 000000000..61f32c2e0 --- /dev/null +++ b/.github/zizmor.yml @@ -0,0 +1,16 @@ +rules: + use-trusted-publishing: + # TODO: we should update to use trusted publishing + ignore: + - publish_pypi.yml + excessive-permissions: + # It is probably good practice to use narrow permissions + ignore: + - debug.yml + - imports.yml + - publish_pypi.yml + - test_and_build.yml + template-injection: + # We use templates pretty heavily + ignore: + - test_and_build.yml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 12e5dd865..43e28b8fe 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ ci: autoupdate_commit_msg: "chore: update pre-commit hooks" autofix_commit_msg: "style: pre-commit fixes" skip: [pylint, no-commit-to-branch] -fail_fast: true +fail_fast: false default_language_version: - python: python3 + python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - id: check-added-large-files - id: check-case-conflict @@ -25,6 +25,10 @@ repos: - id: check-ast - id: check-toml - id: check-yaml + - id: check-executables-have-shebangs + - id: check-vcs-permalinks + - id: destroyed-symlinks + - id: detect-private-key - id: debug-statements - id: end-of-file-fixer exclude_types: [svg] @@ -33,72 +37,68 @@ repos: - id: name-tests-test args: ["--pytest-test-first"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.16 + rev: v0.23 hooks: - id: validate-pyproject name: Validate pyproject.toml # I don't yet trust ruff to do what autoflake does - repo: https://github.com/PyCQA/autoflake - rev: v2.2.1 + rev: v2.3.1 hooks: - id: autoflake args: [--in-place] # We can probably remove `isort` if we come to trust `ruff --fix`, # but we'll need to figure out the configuration to do this in `ruff` - repo: https://github.com/pycqa/isort - rev: 5.13.2 + rev: 6.0.0 hooks: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.15.0 + rev: v3.19.1 hooks: - id: pyupgrade args: [--py310-plus] - repo: https://github.com/MarcoGorelli/auto-walrus - rev: v0.2.2 + rev: 0.3.4 hooks: - id: auto-walrus args: [--line-length, "100"] - repo: https://github.com/psf/black - rev: 24.1.1 + rev: 25.1.0 hooks: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.1 + rev: v0.9.6 hooks: - id: ruff args: [--fix-only, --show-fixes] # Let's keep `flake8` even though `ruff` does much of the same. # `flake8-bugbear` and `flake8-simplify` have caught things missed by `ruff`. - repo: https://github.com/PyCQA/flake8 - rev: 7.0.0 + rev: 7.1.2 hooks: - id: flake8 - additional_dependencies: &flake8_dependencies - # These versions need updated manually - - flake8==7.0.0 - - flake8-bugbear==24.1.17 - - flake8-simplify==0.21.0 - - repo: https://github.com/asottile/yesqa - rev: v1.5.0 - hooks: - - id: yesqa - additional_dependencies: *flake8_dependencies + args: ["--config=.flake8"] + additional_dependencies: + &flake8_dependencies # These versions need updated manually + - flake8==7.1.2 + - flake8-bugbear==24.12.12 + - flake8-simplify==0.21.0 - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.4.1 hooks: - id: codespell types_or: [python, rst, markdown] additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.1 + rev: v0.9.6 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.9.1 + rev: v1.0.0 hooks: - id: sphinx-lint args: [--enable, all, "--disable=line-too-long,leaked-markup"] @@ -110,9 +110,39 @@ repos: - id: pyroma args: [-n, "10", .] - repo: https://github.com/shellcheck-py/shellcheck-py - rev: "v0.9.0.6" + rev: "v0.10.0.1" hooks: - - id: shellcheck + - id: shellcheck + - repo: https://github.com/rbubley/mirrors-prettier + rev: v3.5.1 + hooks: + - id: prettier + - repo: https://github.com/ComPWA/taplo-pre-commit + rev: v0.9.3 + hooks: + - id: taplo-format + - repo: https://github.com/rhysd/actionlint + rev: v1.7.7 + hooks: + - id: actionlint + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.31.1 + hooks: + - id: check-dependabot + - id: check-github-workflows + - id: check-readthedocs + - repo: https://github.com/adrienverge/yamllint + rev: v1.35.1 + hooks: + - id: yamllint + - repo: https://github.com/woodruffw/zizmor-pre-commit + rev: v1.3.1 + hooks: + - id: zizmor + - repo: meta + hooks: + - id: check-hooks-apply + - id: check-useless-excludes - repo: local hooks: # Add `--hook-stage manual` to pre-commit command to run (very slow) @@ -126,9 +156,9 @@ repos: args: [graphblas/] pass_filenames: false - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - - id: no-commit-to-branch # no commit directly to main + - id: no-commit-to-branch # no commit directly to main # # Maybe: # @@ -145,8 +175,10 @@ repos: # additional_dependencies: [tomli] # # - repo: https://github.com/PyCQA/bandit -# rev: 1.7.4 +# rev: 1.8.2 # hooks: # - id: bandit +# args: ["-c", "pyproject.toml"] +# additional_dependencies: ["bandit[toml]"] # -# blacken-docs, blackdoc prettier, mypy, pydocstringformatter, velin, flynt, yamllint +# blacken-docs, blackdoc, mypy, pydocstringformatter, velin, flynt diff --git a/.yamllint.yaml b/.yamllint.yaml new file mode 100644 index 000000000..54e656293 --- /dev/null +++ b/.yamllint.yaml @@ -0,0 +1,6 @@ +--- +extends: default +rules: + document-start: disable + line-length: disable + truthy: disable diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 814c8052a..eebd2c372 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -13,13 +13,13 @@ educational level, family status, culture, or political belief. Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, such as physical or electronic +- The use of sexualized language or imagery +- Personal attacks +- Trolling or insulting/derogatory comments +- Public or private harassment +- Publishing other's private information, such as physical or electronic addresses, without explicit permission -* Other unethical or unprofessional conduct +- Other unethical or unprofessional conduct Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions @@ -52,7 +52,7 @@ that is deemed necessary and appropriate to the circumstances. Maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. -This Code of Conduct is adapted from the [Numba Code of Conduct][numba], which is based on the [Contributor Covenant][homepage], +This Code of Conduct is adapted from the [Numba Code of Conduct][numba], which is based on the [Contributor Covenant][homepage], version 1.3.0, available at [https://contributor-covenant.org/version/1/3/0/][version], and the [Swift Code of Conduct][swift]. diff --git a/README.md b/README.md index 96908989c..1080314c7 100644 --- a/README.md +++ b/README.md @@ -35,14 +35,19 @@ For algorithms, see

## Install + Install the latest version of Python-graphblas via conda: + ``` $ conda install -c conda-forge python-graphblas ``` + or pip: + ``` $ pip install 'python-graphblas[default]' ``` + This will also install the [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS) compiled C library. We currently support the [GraphBLAS C API 2.0 specification](https://graphblas.org/docs/GraphBLAS_API_C_v2.0.0.pdf). @@ -57,6 +62,7 @@ The following are not required by python-graphblas, but may be needed for certai - `fast-matrix-market` - for faster read/write of Matrix Market files with `gb.io.mmread` and `gb.io.mmwrite`. ## Description + Currently works with [SuiteSparse:GraphBLAS](https://github.com/DrTimothyAldenDavis/GraphBLAS), but the goal is to make it work with all implementations of the GraphBLAS spec. The approach taken with this library is to follow the C-API 2.0 specification as closely as possible while making improvements @@ -70,10 +76,12 @@ with how Python handles assignment, so instead we (ab)use the left-shift `<<` no assignment. This opens up all kinds of nice possibilities. This is an example of how the mapping works: + ```C // C call GrB_Matrix_mxm(M, mask, GrB_PLUS_INT64, GrB_MIN_PLUS_INT64, A, B, NULL) ``` + ```python # Python call M(mask.V, accum=binary.plus) << A.mxm(B, semiring.min_plus) @@ -91,10 +99,12 @@ is a much better approach, even if it doesn't feel very Pythonic. Descriptor flags are set on the appropriate elements to keep logic close to what it affects. Here is the same call with descriptor bits set. `ttcsr` indicates transpose the first and second matrices, complement the structure of the mask, and do a replacement on the output. + ```C // C call GrB_Matrix_mxm(M, mask, GrB_PLUS_INT64, GrB_MIN_PLUS_INT64, A, B, desc.ttcsr) ``` + ```python # Python call M(~mask.S, accum=binary.plus, replace=True) << A.T.mxm(B.T, semiring.min_plus) @@ -104,16 +114,20 @@ The objects receiving the flag operations (A.T, ~mask, etc) are also delayed obj do no computation, allowing the correct descriptor bits to be set in a single GraphBLAS call. **If no mask or accumulator is used, the call looks like this**: + ```python M << A.mxm(B, semiring.min_plus) ``` + The use of `<<` to indicate updating is actually just syntactic sugar for a real `.update()` method. The above expression could be written as: + ```python M.update(A.mxm(B, semiring.min_plus)) ``` ## Operations + ```python M(mask, accum) << A.mxm(B, semiring) # mxm w(mask, accum) << A.mxv(v, semiring) # mxv @@ -123,14 +137,18 @@ M(mask, accum) << A.ewise_mult(B, binaryop) # eWiseMult M(mask, accum) << A.kronecker(B, binaryop) # kronecker M(mask, accum) << A.T # transpose ``` + ## Extract + ```python M(mask, accum) << A[rows, cols] # rows and cols are a list or a slice w(mask, accum) << A[rows, col_index] # extract column w(mask, accum) << A[row_index, cols] # extract row s = A[row_index, col_index].value # extract single element ``` + ## Assign + ```python M(mask, accum)[rows, cols] << A # rows and cols are a list or a slice M(mask, accum)[rows, col_index] << v # assign column @@ -140,31 +158,42 @@ M[row_index, col_index] << s # assign scalar to single element # (mask and accum not allowed) del M[row_index, col_index] # remove single element ``` + ## Apply + ```python M(mask, accum) << A.apply(unaryop) M(mask, accum) << A.apply(binaryop, left=s) # bind-first M(mask, accum) << A.apply(binaryop, right=s) # bind-second ``` + ## Reduce + ```python v(mask, accum) << A.reduce_rowwise(op) # reduce row-wise v(mask, accum) << A.reduce_columnwise(op) # reduce column-wise s(accum) << A.reduce_scalar(op) s(accum) << v.reduce(op) ``` + ## Creating new Vectors / Matrices + ```python A = Matrix.new(dtype, num_rows, num_cols) # new_type B = A.dup() # dup A = Matrix.from_coo([row_indices], [col_indices], [values]) # build ``` + ## New from delayed + Delayed objects can be used to create a new object using `.new()` method + ```python C = A.mxm(B, semiring).new() ``` + ## Properties + ```python size = v.size # size nrows = M.nrows # nrows @@ -172,10 +201,13 @@ ncols = M.ncols # ncols nvals = M.nvals # nvals rindices, cindices, vals = M.to_coo() # extractTuples ``` + ## Initialization + There is a mechanism to initialize `graphblas` with a context prior to use. This allows for setting the backend to use as well as the blocking/non-blocking mode. If the context is not initialized, a default initialization will be performed automatically. + ```python import graphblas as gb @@ -186,10 +218,13 @@ gb.init("suitesparse", blocking=True) from graphblas import binary, semiring from graphblas import Matrix, Vector, Scalar ``` + ## Performant User Defined Functions + Python-graphblas requires `numba` which enables compiling user-defined Python functions to native C for use in GraphBLAS. Example customized UnaryOp: + ```python from graphblas import unary @@ -204,9 +239,11 @@ v = Vector.from_coo([0, 1, 3], [1, 2, 3]) w = v.apply(unary.force_odd).new() w # indexes=[0, 1, 3], values=[1, 3, 3] ``` + Similar methods exist for BinaryOp, Monoid, and Semiring. ## Relation to other network analysis libraries + Python-graphblas aims to provide an efficient and consistent expression of graph operations using linear algebra. This allows the development of high-performance implementations of existing and new graph algorithms @@ -223,7 +260,9 @@ other libraries, `graphblas.io` contains multiple connectors, see the following section. ## Import/Export connectors to the Python ecosystem + `graphblas.io` contains functions for converting to and from: + ```python import graphblas as gb diff --git a/binder/environment.yml b/binder/environment.yml index 11cd98e0c..9548f2126 100644 --- a/binder/environment.yml +++ b/binder/environment.yml @@ -1,12 +1,12 @@ name: graphblas channels: - - conda-forge + - conda-forge dependencies: - - python=3.11 - - python-graphblas - - matplotlib - - networkx - - pandas - - scipy - - drawsvg - - cairosvg + - python=3.11 + - python-graphblas + - matplotlib + - networkx + - pandas + - scipy + - drawsvg + - cairosvg diff --git a/docs/_static/custom.css b/docs/_static/custom.css index 1b14402cd..f7dd59b74 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,78 +1,78 @@ - /* Main Page Stylings */ .intro-card { - background-color: var(--pst-color-background); - margin-bottom: 30px; + background-color: var(--pst-color-background); + margin-bottom: 30px; } .intro-card:hover { - box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-link) !important; + box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-link) !important; } .intro-card .card-header { - background-color: inherit; + background-color: inherit; } .intro-card .card-header .card-text { - font-weight: bold; + font-weight: bold; } .intro-card .card-body { - margin-top: 0; + margin-top: 0; } .intro-card .card-body .card-text:first-child { - margin-bottom: 0; + margin-bottom: 0; } .shadow { - box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-text-muted) !important; + box-shadow: 0.2rem 0.5rem 1rem var(--pst-color-text-muted) !important; } .table { - font-size: smaller; - width: inherit; + font-size: smaller; + width: inherit; } -.table td, .table th { - padding: 0 .75rem; +.table td, +.table th { + padding: 0 0.75rem; } .table.inline { - display: inline-table; - margin-right: 30px; + display: inline-table; + margin-right: 30px; } p.rubric { - border-bottom: none; + border-bottom: none; } button.navbar-btn.rounded-circle { - padding: 0.25rem; + padding: 0.25rem; } button.navbar-btn.search-button { - color: var(--pst-color-text-muted); - padding: 0; + color: var(--pst-color-text-muted); + padding: 0; } -button.navbar-btn:hover -{ - color: var(--pst-color-primary); +button.navbar-btn:hover { + color: var(--pst-color-primary); } button.theme-switch-button { - font-size: calc(var(--pst-font-size-icon) - .1rem); - border: none; + font-size: calc(var(--pst-font-size-icon) - 0.1rem); + border: none; } button span.theme-switch:hover { - color: var(--pst-color-primary); + color: var(--pst-color-primary); } /* Styling for Jupyter Notebook ReST Exports */ -.dataframe tbody th, .dataframe tbody td { - padding: 10px; +.dataframe tbody th, +.dataframe tbody td { + padding: 10px; } diff --git a/docs/_static/matrix.css b/docs/_static/matrix.css index 5700ea3fc..1937178e5 100644 --- a/docs/_static/matrix.css +++ b/docs/_static/matrix.css @@ -1,104 +1,104 @@ /* Based on the stylesheet used by matrepr (https://github.com/alugowski/matrepr) and modified for sphinx */ -table.matrix { - border-collapse: collapse; - border: 0px; +table.matrix { + border-collapse: collapse; + border: 0px; } /* Disable a horizintal line from the default stylesheet */ .table.matrix > :not(caption) > * > * { - border-bottom-width: 0px; + border-bottom-width: 0px; } /* row indices */ table.matrix > tbody tr th { - font-size: smaller; - font-weight: bolder; - vertical-align: middle; - text-align: right; + font-size: smaller; + font-weight: bolder; + vertical-align: middle; + text-align: right; } /* row indices are often made bold in the source data; here make them match the boldness of the th column label style*/ table.matrix strong { - font-weight: bold; + font-weight: bold; } /* column indices */ table.matrix > thead tr th { - font-size: smaller; - font-weight: bolder; - vertical-align: middle; - text-align: center; + font-size: smaller; + font-weight: bolder; + vertical-align: middle; + text-align: center; } /* cells */ table.matrix > tbody tr td { - vertical-align: middle; - text-align: center; - position: relative; + vertical-align: middle; + text-align: center; + position: relative; } /* left border */ table.matrix > tbody tr td:first-of-type { - border-left: solid 2px var(--pst-color-text-base); + border-left: solid 2px var(--pst-color-text-base); } /* right border */ table.matrix > tbody tr td:last-of-type { - border-right: solid 2px var(--pst-color-text-base); + border-right: solid 2px var(--pst-color-text-base); } /* prevents empty cells from collapsing, especially empty rows */ table.matrix > tbody tr td:empty::before { - /* basicaly fills empty cells with   */ - content: "\00a0\00a0\00a0"; - visibility: hidden; + /* basicaly fills empty cells with   */ + content: "\00a0\00a0\00a0"; + visibility: hidden; } table.matrix > tbody tr td:empty::after { - content: "\00a0\00a0\00a0"; - visibility: hidden; + content: "\00a0\00a0\00a0"; + visibility: hidden; } /* matrix bracket ticks */ table.matrix > tbody > tr:first-child > td:first-of-type::before { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: 0; - right: auto; - border-top: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: 0; + right: auto; + border-top: solid 2px var(--pst-color-text-base); } table.matrix > tbody > tr:last-child > td:first-of-type::before { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: 0; - right: auto; - border-bottom: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: 0; + right: auto; + border-bottom: solid 2px var(--pst-color-text-base); } table.matrix > tbody > tr:first-child > td:last-of-type::after { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: auto; - right: 0; - border-top: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: auto; + right: 0; + border-top: solid 2px var(--pst-color-text-base); } table.matrix > tbody > tr:last-child > td:last-of-type::after { - content: ""; - width: 4px; - position: absolute; - top: 0; - bottom: 0; - visibility: visible; - left: auto; - right: 0; - border-bottom: solid 2px var(--pst-color-text-base); + content: ""; + width: 4px; + position: absolute; + top: 0; + bottom: 0; + visibility: visible; + left: auto; + right: 0; + border-bottom: solid 2px var(--pst-color-text-base); } diff --git a/docs/env.yml b/docs/env.yml index c0c4c8999..78a50afbe 100644 --- a/docs/env.yml +++ b/docs/env.yml @@ -1,23 +1,23 @@ name: python-graphblas-docs channels: - - conda-forge - - nodefaults + - conda-forge + - nodefaults dependencies: - - python=3.10 - - pip - # python-graphblas dependencies - - donfig - - numba - - python-suitesparse-graphblas>=7.4.0.0 - - pyyaml - # extra dependencies - - matplotlib - - networkx - - pandas - - scipy>=1.7.0 - # docs dependencies - - commonmark # For RTD - - nbsphinx - - numpydoc - - pydata-sphinx-theme=0.13.1 - - sphinx-panels=0.6.0 + - python=3.10 + - pip + # python-graphblas dependencies + - donfig + - numba + - python-suitesparse-graphblas>=7.4.0.0 + - pyyaml + # extra dependencies + - matplotlib + - networkx + - pandas + - scipy>=1.7.0 + # docs dependencies + - commonmark # For RTD + - nbsphinx + - numpydoc + - pydata-sphinx-theme=0.13.1 + - sphinx-panels=0.6.0 diff --git a/docs/user_guide/operations.rst b/docs/user_guide/operations.rst index 3f710dc23..18d0352d7 100644 --- a/docs/user_guide/operations.rst +++ b/docs/user_guide/operations.rst @@ -8,7 +8,7 @@ Matrix Multiply The GraphBLAS spec contains three methods for matrix multiplication, depending on whether the inputs are Matrix or Vector. - - **mxm** -- Matrix-Matrix multplications + - **mxm** -- Matrix-Matrix multiplication - **mxv** -- Matrix-Vector multiplication - **vxm** -- Vector-Matrix multiplication diff --git a/environment.yml b/environment.yml index 1863d4006..2bae0b76e 100644 --- a/environment.yml +++ b/environment.yml @@ -11,103 +11,100 @@ # It is okay to comment out sections below that you don't need such as viz or building docs. name: graphblas-dev channels: - - conda-forge - - nodefaults # Only install packages from conda-forge for faster solving + - conda-forge + - nodefaults # Only install packages from conda-forge for faster solving dependencies: - - python - - donfig - - numba - - python-suitesparse-graphblas - - pyyaml - # For repr - - pandas - # For I/O - - awkward - - fast_matrix_market - - networkx - - scipy - - sparse - # For viz - - datashader - - hvplot - - matplotlib - # For linting - - pre-commit - # For testing - - packaging - - pytest-cov - - tomli - # For debugging - - icecream - - ipykernel - - ipython - # For type annotations - - mypy - # For building docs - - nbsphinx - - numpydoc - - pydata-sphinx-theme - - sphinx-panels - # For building logo - - drawsvg - - cairosvg - # EXTRA (optional; uncomment as desired) - # - autoflake - # - black - # - black-jupyter - # - build - # - codespell - # - commonmark - # - cython - # - cytoolz - # - distributed - # - flake8 - # - flake8-bugbear - # - flake8-comprehensions - # - flake8-print - # - flake8-quotes - # - flake8-simplify - # - gcc - # - gh - # - git - # - graph-tool - # - xorg-libxcursor # for graph-tool - # - grayskull - # - h5py - # - hiveplot - # - igraph - # - ipycytoscape - # - isort - # - jupyter - # - jupyterlab - # - line_profiler - # - lxml - # - make - # - memory_profiler - # - nbqa - # - netcdf4 - # - networkit - # - nxviz - # - pycodestyle - # - pydot - # - pygraphviz - # - pylint - # - pytest-runner - # - pytest-xdist - # - python-graphviz - # - python-igraph - # - python-louvain - # - pyupgrade - # - rich - # - ruff - # - scalene - # - scikit-network - # - setuptools-git-versioning - # - snakeviz - # - sphinx-lint - # - sympy - # - tuna - # - twine - # - vim - # - yesqa - # - zarr + - python + - donfig + - numba + - python-suitesparse-graphblas + - pyyaml + # For repr + - pandas + # For I/O + - awkward + - networkx + - scipy + - sparse + # For viz + - datashader + - hvplot + - matplotlib + # For linting + - pre-commit + # For testing + - packaging + - pytest-cov + - tomli + # For debugging + - icecream + - ipykernel + - ipython + # For type annotations + - mypy + # For building docs + - nbsphinx + - numpydoc + - pydata-sphinx-theme + - sphinx-panels + # For building logo + - drawsvg + - cairosvg + # EXTRA (optional; uncomment as desired) + # - autoflake + # - black + # - black-jupyter + # - codespell + # - commonmark + # - cython + # - cytoolz + # - distributed + # - flake8 + # - flake8-bugbear + # - flake8-comprehensions + # - flake8-print + # - flake8-quotes + # - flake8-simplify + # - gcc + # - gh + # - git + # - graph-tool + # - xorg-libxcursor # for graph-tool + # - grayskull + # - h5py + # - hiveplot + # - igraph + # - ipycytoscape + # - isort + # - jupyter + # - jupyterlab + # - line_profiler + # - lxml + # - make + # - memory_profiler + # - nbqa + # - netcdf4 + # - networkit + # - nxviz + # - pycodestyle + # - pydot + # - pygraphviz + # - pylint + # - pytest-runner + # - pytest-xdist + # - python-graphviz + # - python-igraph + # - python-louvain + # - pyupgrade + # - rich + # - ruff + # - scalene + # - scikit-network + # - setuptools-git-versioning + # - snakeviz + # - sphinx-lint + # - sympy + # - tuna + # - twine + # - vim + # - zarr diff --git a/graphblas/core/base.py b/graphblas/core/base.py index 5658e99c1..24a49ba1a 100644 --- a/graphblas/core/base.py +++ b/graphblas/core/base.py @@ -513,7 +513,7 @@ def _name_html(self): _expect_op = _expect_op # Don't let non-scalars be coerced to numpy arrays - def __array__(self, dtype=None): + def __array__(self, dtype=None, *, copy=None): raise TypeError( f"{type(self).__name__} can't be directly converted to a numpy array; " f"perhaps use `{self.name}.to_coo()` method instead." diff --git a/graphblas/core/dtypes.py b/graphblas/core/dtypes.py index 28ce60d03..2d4178b14 100644 --- a/graphblas/core/dtypes.py +++ b/graphblas/core/dtypes.py @@ -1,4 +1,5 @@ import warnings +from ast import literal_eval import numpy as np from numpy import promote_types, result_type @@ -97,7 +98,7 @@ def register_anonymous(dtype, name=None): # Allow dtypes such as `"INT64[3, 4]"` for convenience base_dtype, shape = dtype.split("[", 1) base_dtype = lookup_dtype(base_dtype) - shape = np.lib.format.safe_eval(f"[{shape}") + shape = literal_eval(f"[{shape}") dtype = np.dtype((base_dtype.np_type, shape)) else: raise @@ -115,7 +116,17 @@ def register_anonymous(dtype, name=None): from ..exceptions import check_status_carg gb_obj = ffi.new("GrB_Type*") - if backend == "suitesparse": + + if hasattr(lib, "GrB_Type_set_String"): + # We name this so that we can serialize and deserialize UDTs + # We don't yet have C definitions + np_repr = _dtype_to_string(dtype) + status = lib.GrB_Type_new(gb_obj, dtype.itemsize) + check_status_carg(status, "Type", gb_obj[0]) + val_obj = ffi.new("char[]", np_repr.encode()) + status = lib.GrB_Type_set_String(gb_obj[0], val_obj, lib.GrB_NAME) + elif backend == "suitesparse": + # For SuiteSparse < 9 # We name this so that we can serialize and deserialize UDTs # We don't yet have C definitions np_repr = _dtype_to_string(dtype).encode() @@ -429,7 +440,7 @@ def _dtype_to_string(dtype): np_type = dtype.np_type s = str(np_type) try: - if np.dtype(np.lib.format.safe_eval(s)) == np_type: # pragma: no branch (safety) + if np.dtype(literal_eval(s)) == np_type: # pragma: no branch (safety) return s except Exception: pass @@ -448,5 +459,5 @@ def _string_to_dtype(s): return lookup_dtype(s) except Exception: pass - np_type = np.dtype(np.lib.format.safe_eval(s)) + np_type = np.dtype(literal_eval(s)) return lookup_dtype(np_type) diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py index 2c1014fe5..24c109639 100644 --- a/graphblas/core/infix.py +++ b/graphblas/core/infix.py @@ -316,6 +316,7 @@ class MatrixInfixExpr(InfixExprBase): ndim = 2 output_type = MatrixExpression _is_transposed = False + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__(self, left, right): diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index e28e92a65..bf20cc953 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -184,6 +184,7 @@ class Matrix(BaseType): ndim = 2 _is_transposed = False _name_counter = itertools.count() + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __new__(cls, dtype=FP64, nrows=0, ncols=0, *, name=None): @@ -3583,6 +3584,7 @@ class MatrixExpression(BaseExpression): ndim = 2 output_type = Matrix _is_transposed = False + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__( @@ -3724,6 +3726,7 @@ class MatrixIndexExpr(AmbiguousAssignOrExtract): ndim = 2 output_type = Matrix _is_transposed = False + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__(self, parent, resolved_indexes, nrows, ncols): @@ -3824,6 +3827,7 @@ class TransposedMatrix: ndim = 2 _is_scalar = False _is_transposed = True + __networkx_backend__ = "graphblas" __networkx_plugin__ = "graphblas" def __init__(self, matrix): diff --git a/graphblas/core/operator/base.py b/graphblas/core/operator/base.py index 4e19fbe96..97b2c9fbd 100644 --- a/graphblas/core/operator/base.py +++ b/graphblas/core/operator/base.py @@ -251,8 +251,7 @@ def __init__(self, parent, name, type_, return_type, gb_obj, gb_name, dtype2=Non def __repr__(self): classname = self.opclass.lower() - if classname.endswith("op"): - classname = classname[:-2] + classname = classname.removesuffix("op") dtype2 = "" if self._type2 is None else f", {self._type2.name}" return f"{classname}.{self.name}[{self.type.name}{dtype2}]" diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index 7e759e5d0..25aef5743 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -165,7 +165,7 @@ def __index__(self): return self.__int__ raise AttributeError("Scalar object only has `__index__` for integral dtypes") - def __array__(self, dtype=None): + def __array__(self, dtype=None, *, copy=None): if dtype is None: dtype = self.dtype.np_type return np.array(self.value, dtype=dtype) diff --git a/graphblas/core/ss/__init__.py b/graphblas/core/ss/__init__.py index c2e83ddcc..10a6fed94 100644 --- a/graphblas/core/ss/__init__.py +++ b/graphblas/core/ss/__init__.py @@ -1,3 +1,5 @@ import suitesparse_graphblas as _ssgb -_IS_SSGB7 = _ssgb.__version__.split(".", 1)[0] == "7" +(version_major, version_minor, version_bug) = map(int, _ssgb.__version__.split(".")[:3]) + +_IS_SSGB7 = version_major == 7 diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py index 20cf318e8..70a7dd196 100644 --- a/graphblas/core/ss/config.py +++ b/graphblas/core/ss/config.py @@ -99,7 +99,7 @@ def __getitem__(self, key): return {reverse_bitwise[val]} rv = set() for k, v in self._bitwise[key].items(): - if isinstance(k, str) and val & v and bin(v).count("1") == 1: + if isinstance(k, str) and val & v and (v).bit_count() == 1: rv.add(k) return rv if is_bool: diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 0a08c50e2..509c56113 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -3650,8 +3650,10 @@ def _import_any( def unpack_hyperhash(self, *, compute=False, name=None, **opts): """Unpacks the hyper_hash of a hypersparse matrix if possible. - Will return None if the matrix is not hypersparse or if the hash is not computed. - Use ``compute=True`` to compute the hyper_hash if the input is hypersparse. + Will return None if the matrix is not hypersparse, if the hash is not computed, + or if the hash is not needed. Use ``compute=True`` to try to compute the hyper_hash + if the input is hypersparse. The hyper_hash is optional in SuiteSparse:GraphBLAS, + so it may not be computed even with ``compute=True``. Use ``pack_hyperhash`` to move a hyper_hash matrix that was previously unpacked back into a matrix. @@ -4079,6 +4081,21 @@ def serialize(self, compression="default", level=None, **opts): blob_handle = ffi_new("void**") blob_size_handle = ffi_new("GrB_Index*") parent = self._parent + if parent.dtype._is_udt and hasattr(lib, "GrB_Type_get_String"): + # Get the name from the dtype and set it to the name of the matrix so we can + # recreate the UDT. This is a bit hacky and we should restore the original name. + # First get the size of name. + dtype_size = ffi_new("size_t*") + status = lib.GrB_Type_get_SIZE(parent.dtype.gb_obj[0], dtype_size, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + status = lib.GrB_Type_get_String(parent.dtype.gb_obj[0], dtype_char, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then set the name + status = lib.GrB_Matrix_set_String(parent._carg, dtype_char, lib.GrB_NAME) + check_status_carg(status, "Matrix", parent._carg) + check_status( lib.GxB_Matrix_serialize( blob_handle, @@ -4120,8 +4137,8 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): else: data = np.frombuffer(data, np.uint8) data_obj = ffi.from_buffer("void*", data) - # Get the dtype name first if dtype is None: + # Get the dtype name first (for non-UDTs) cname = ffi_new(f"char[{lib.GxB_MAX_NAME_LEN}]") info = lib.GxB_deserialize_type_name( cname, @@ -4131,6 +4148,22 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): if info != lib.GrB_SUCCESS: raise _error_code_lookup[info]("Matrix deserialize failed to get the dtype name") dtype_name = b"".join(itertools.takewhile(b"\x00".__ne__, cname)).decode() + if not dtype_name and hasattr(lib, "GxB_Serialized_get_String"): + # Handle UDTs. First get the size of name + dtype_size = ffi_new("size_t*") + info = lib.GxB_Serialized_get_SIZE(data_obj, dtype_size, lib.GrB_NAME, data.nbytes) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]( + "Matrix deserialize failed to get the size of name" + ) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + info = lib.GxB_Serialized_get_String( + data_obj, dtype_char, lib.GrB_NAME, data.nbytes + ) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]("Matrix deserialize failed to get the name") + dtype_name = ffi.string(dtype_char).decode() dtype = _string_to_dtype(dtype_name) else: dtype = lookup_dtype(dtype) diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index a21d54de9..fdde7eb92 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -1652,6 +1652,21 @@ def serialize(self, compression="default", level=None, **opts): blob_handle = ffi_new("void**") blob_size_handle = ffi_new("GrB_Index*") parent = self._parent + if parent.dtype._is_udt and hasattr(lib, "GrB_Type_get_String"): + # Get the name from the dtype and set it to the name of the vector so we can + # recreate the UDT. This is a bit hacky and we should restore the original name. + # First get the size of name. + dtype_size = ffi_new("size_t*") + status = lib.GrB_Type_get_SIZE(parent.dtype.gb_obj[0], dtype_size, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + status = lib.GrB_Type_get_String(parent.dtype.gb_obj[0], dtype_char, lib.GrB_NAME) + check_status_carg(status, "Type", parent.dtype.gb_obj[0]) + # Then set the name + status = lib.GrB_Vector_set_String(parent._carg, dtype_char, lib.GrB_NAME) + check_status_carg(status, "Vector", parent._carg) + check_status( lib.GxB_Vector_serialize( blob_handle, @@ -1694,7 +1709,7 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): data = np.frombuffer(data, np.uint8) data_obj = ffi.from_buffer("void*", data) if dtype is None: - # Get the dtype name first + # Get the dtype name first (for non-UDTs) cname = ffi_new(f"char[{lib.GxB_MAX_NAME_LEN}]") info = lib.GxB_deserialize_type_name( cname, @@ -1704,6 +1719,22 @@ def deserialize(cls, data, dtype=None, *, name=None, **opts): if info != lib.GrB_SUCCESS: raise _error_code_lookup[info]("Vector deserialize failed to get the dtype name") dtype_name = b"".join(itertools.takewhile(b"\x00".__ne__, cname)).decode() + if not dtype_name and hasattr(lib, "GxB_Serialized_get_String"): + # Handle UDTs. First get the size of name + dtype_size = ffi_new("size_t*") + info = lib.GxB_Serialized_get_SIZE(data_obj, dtype_size, lib.GrB_NAME, data.nbytes) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]( + "Vector deserialize failed to get the size of name" + ) + # Then get the name + dtype_char = ffi_new(f"char[{dtype_size[0]}]") + info = lib.GxB_Serialized_get_String( + data_obj, dtype_char, lib.GrB_NAME, data.nbytes + ) + if info != lib.GrB_SUCCESS: + raise _error_code_lookup[info]("Vector deserialize failed to get the name") + dtype_name = ffi.string(dtype_char).decode() dtype = _string_to_dtype(dtype_name) else: dtype = lookup_dtype(dtype) diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 6e91edd1b..e9a29b3a9 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -5,6 +5,8 @@ from ..dtypes import _INDEX, lookup_dtype from . import ffi, lib +_NP2 = np.__version__.startswith("2.") + def libget(name): """Helper to get items from GraphBLAS which might be GrB or GxB.""" @@ -60,7 +62,8 @@ def ints_to_numpy_buffer(array, dtype, *, name="array", copy=False, ownable=Fals and not np.issubdtype(array.dtype, np.bool_) ): raise ValueError(f"{name} must be integers, not {array.dtype.name}") - array = np.array(array, dtype, copy=copy, order=order) + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + array = np.array(array, dtype, copy=copy or _NP2 and None, order=order) if ownable and (not array.flags.owndata or not array.flags.writeable): array = array.copy(order) return array @@ -90,10 +93,14 @@ def values_to_numpy_buffer( """ if dtype is not None: dtype = lookup_dtype(dtype) - array = np.array(array, _get_subdtype(dtype.np_type), copy=copy, order=order) + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + array = np.array( + array, _get_subdtype(dtype.np_type), copy=copy or _NP2 and None, order=order + ) else: is_input_np = isinstance(array, np.ndarray) - array = np.array(array, copy=copy, order=order) + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + array = np.array(array, copy=copy or _NP2 and None, order=order) if array.dtype.hasobject: raise ValueError("object dtype for values is not allowed") if not is_input_np and array.dtype == np.int32: # pragma: no cover @@ -312,7 +319,10 @@ def __init__(self, array=None, dtype=_INDEX, *, size=None, name=None): if size is not None: self.array = np.empty(size, dtype=dtype.np_type) else: - self.array = np.array(array, dtype=_get_subdtype(dtype.np_type), copy=False, order="C") + # https://numpy.org/doc/stable/release/2.0.0-notes.html#new-copy-keyword-meaning-for-array-and-asarray-constructors + self.array = np.array( + array, dtype=_get_subdtype(dtype.np_type), copy=_NP2 and None, order="C" + ) c_type = dtype.c_type if dtype._is_udt else f"{dtype.c_type}*" self._carg = ffi.cast(c_type, ffi.from_buffer(self.array)) self.dtype = dtype diff --git a/graphblas/exceptions.py b/graphblas/exceptions.py index e7f3b3a83..05cac988a 100644 --- a/graphblas/exceptions.py +++ b/graphblas/exceptions.py @@ -1,4 +1,3 @@ -from . import backend as _backend from .core import ffi as _ffi from .core import lib as _lib from .core.utils import _Pointer @@ -85,9 +84,14 @@ class NotImplementedException(GraphblasException): """ +# SuiteSparse errors +class JitError(GraphblasException): + """SuiteSparse:GraphBLAS error using JIT.""" + + # Our errors class UdfParseError(GraphblasException): - """Unable to parse the user-defined function.""" + """SuiteSparse:GraphBLAS unable to parse the user-defined function.""" _error_code_lookup = { @@ -112,8 +116,12 @@ class UdfParseError(GraphblasException): } GrB_SUCCESS = _lib.GrB_SUCCESS GrB_NO_VALUE = _lib.GrB_NO_VALUE -if _backend == "suitesparse": + +# SuiteSparse-specific errors +if hasattr(_lib, "GxB_EXHAUSTED"): _error_code_lookup[_lib.GxB_EXHAUSTED] = StopIteration +if hasattr(_lib, "GxB_JIT_ERROR"): # Added in 9.4 + _error_code_lookup[_lib.GxB_JIT_ERROR] = JitError def check_status(response_code, args): diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index a3acb3a94..964325e0d 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -3,6 +3,7 @@ import functools import itertools import platform +import sys from pathlib import Path import numpy as np @@ -156,3 +157,10 @@ def compute(x): def shouldhave(module, opname): """Whether an "operator" module should have the given operator.""" return supports_udfs or hasattr(module, opname) + + +def dprint(*args, **kwargs): # pragma: no cover (debug) + """Print to stderr for debugging purposes.""" + kwargs["file"] = sys.stderr + kwargs["flush"] = True + print(*args, **kwargs) diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index 3bd65f2b4..e2478fe7b 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -224,6 +224,10 @@ def test_record_dtype_from_dict(): def test_dtype_to_from_string(): types = [dtypes.BOOL, dtypes.FP64] for c in string.ascii_letters: + if c == "T": + # See NEP 55 about StringDtype "T". Notably, this doesn't work: + # >>> np.dtype(np.dtype("T").str) + continue try: dtype = np.dtype(c) types.append(dtype) diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 63561930b..24f0e73d7 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -4074,10 +4074,11 @@ def test_ss_pack_hyperhash(A): Y = C.ss.unpack_hyperhash() Y = C.ss.unpack_hyperhash(compute=True) assert C.ss.unpack_hyperhash() is None - assert Y.nrows == C.nrows - C.ss.pack_hyperhash(Y) - assert Y.gb_obj[0] == gb.core.NULL - assert C.ss.unpack_hyperhash() is not None + if Y is not None: # hyperhash may or may not be computed + assert Y.nrows == C.nrows + C.ss.pack_hyperhash(Y) + assert Y.gb_obj[0] == gb.core.NULL + assert C.ss.unpack_hyperhash() is not None # May or may not be computed def test_to_dicts_from_dicts(A): diff --git a/graphblas/tests/test_numpyops.py b/graphblas/tests/test_numpyops.py index 25c52d7fd..999c6d5e0 100644 --- a/graphblas/tests/test_numpyops.py +++ b/graphblas/tests/test_numpyops.py @@ -5,6 +5,7 @@ import numpy as np import pytest +from packaging.version import parse import graphblas as gb import graphblas.binary.numpy as npbinary @@ -112,6 +113,15 @@ def test_npunary(): match(accum=gb.binary.lor) << gb_result.apply(npunary.isnan) compare = match.reduce(gb.monoid.land).new() if not compare: # pragma: no cover (debug) + import numba + + if ( + unary_name in {"sign"} + and np.__version__.startswith("2.") + and parse(numba.__version__) < parse("0.61.0") + ): + # numba <0.61.0 does not match numpy 2.0 + continue print(unary_name, gb_input.dtype) print(compute(gb_result)) print(np_result) diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py index 3c7bffa9a..e93511914 100644 --- a/graphblas/tests/test_scalar.py +++ b/graphblas/tests/test_scalar.py @@ -50,7 +50,7 @@ def test_dup(s): s_empty = Scalar(dtypes.FP64) s_unempty = Scalar.from_value(0.0) if s_empty.is_cscalar: - # NumPy wraps around + # NumPy <2 wraps around; >=2 raises OverflowError uint_data = [ ("UINT8", 2**8 - 2), ("UINT16", 2**16 - 2), @@ -73,6 +73,10 @@ def test_dup(s): ("FP32", -2.5), *uint_data, ]: + if dtype.startswith("UINT") and s_empty.is_cscalar and not np.__version__.startswith("1."): + with pytest.raises(OverflowError, match="out of bounds for uint"): + s4.dup(dtype=dtype, name="s5") + continue s5 = s4.dup(dtype=dtype, name="s5") assert s5.dtype == dtype assert s5.value == val diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 3c974c50d..4cea0b563 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -1,6 +1,8 @@ import os import pathlib +import platform import sys +import sysconfig import numpy as np import pytest @@ -26,11 +28,48 @@ @pytest.fixture(scope="module", autouse=True) def _setup_jit(): + """Set up the SuiteSparse:GraphBLAS JIT.""" + if _IS_SSGB7: + # SuiteSparse JIT was added in SSGB 8 + yield + return + + if not os.environ.get("GITHUB_ACTIONS"): + # Try to run the tests with defaults from sysconfig if not running in CI + prev = gb.ss.config["jit_c_control"] + cc = sysconfig.get_config_var("CC") + cflags = sysconfig.get_config_var("CFLAGS") + include = sysconfig.get_path("include") + libs = sysconfig.get_config_var("LIBS") + if not (cc is None or cflags is None or include is None or libs is None): + gb.ss.config["jit_c_control"] = "on" + gb.ss.config["jit_c_compiler_name"] = cc + gb.ss.config["jit_c_compiler_flags"] = f"{cflags} -I{include}" + gb.ss.config["jit_c_libraries"] = libs + else: + # Should we skip or try to run if sysconfig vars aren't set? + gb.ss.config["jit_c_control"] = "on" # "off" + try: + yield + finally: + gb.ss.config["jit_c_control"] = prev + return + + if ( + sys.platform == "darwin" + or sys.platform == "linux" + and "conda" not in gb.ss.config["jit_c_compiler_name"] + ): + # XXX TODO: tests for SuiteSparse JIT are not passing on linux when using wheels or on osx + # This should be understood and fixed! + gb.ss.config["jit_c_control"] = "off" + yield + return + # Configuration values below were obtained from the output of the JIT config # in CI, but with paths changed to use `{conda_prefix}` where appropriate. - if "CONDA_PREFIX" not in os.environ or _IS_SSGB7: - return conda_prefix = os.environ["CONDA_PREFIX"] + prev = gb.ss.config["jit_c_control"] gb.ss.config["jit_c_control"] = "on" if sys.platform == "linux": gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/x86_64-conda-linux-gnu-cc" @@ -59,7 +98,7 @@ def _setup_jit(): gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " - "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64" + f"-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch {platform.machine()}" ) gb.ss.config["jit_c_linker_flags"] = ( "-Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs " @@ -72,6 +111,7 @@ def _setup_jit(): # This probably means we're testing a `python-suitesparse-graphblas` wheel # in a conda environment. This is not yet working. gb.ss.config["jit_c_control"] = "off" + yield return gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" @@ -86,6 +126,12 @@ def _setup_jit(): if not pathlib.Path(gb.ss.config["jit_c_compiler_name"]).exists(): # Can't use the JIT if we don't have a compiler! gb.ss.config["jit_c_control"] = "off" + yield + return + try: + yield + finally: + gb.ss.config["jit_c_control"] = prev @pytest.fixture diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index df1f5c86e..db80cdf71 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -29,6 +29,8 @@ suitesparse = backend == "suitesparse" +if suitesparse: + ss_version_major = gb.core.ss.version_major @pytest.fixture @@ -2205,7 +2207,10 @@ def test_udt(): long_dtype = np.dtype([("x", np.bool_), ("y" * 1000, np.float64)], align=True) if suitesparse: - with pytest.warns(UserWarning, match="too large"): + if ss_version_major < 9: + with pytest.warns(UserWarning, match="too large"): + long_udt = dtypes.register_anonymous(long_dtype) + else: long_udt = dtypes.register_anonymous(long_dtype) else: # UDTs don't currently have a name in vanilla GraphBLAS @@ -2216,13 +2221,19 @@ def test_udt(): if suitesparse: vv = Vector.ss.deserialize(v.ss.serialize(), dtype=long_udt) assert v.isequal(vv, check_dtype=True) - with pytest.raises(SyntaxError): - # The size of the UDT name is limited + if ss_version_major < 9: + with pytest.raises(SyntaxError): + # The size of the UDT name is limited + Vector.ss.deserialize(v.ss.serialize()) + else: Vector.ss.deserialize(v.ss.serialize()) # May be able to look up non-anonymous dtypes by name if their names are too long named_long_dtype = np.dtype([("x", np.bool_), ("y" * 1000, np.float64)], align=False) if suitesparse: - with pytest.warns(UserWarning, match="too large"): + if ss_version_major < 9: + with pytest.warns(UserWarning, match="too large"): + named_long_udt = dtypes.register_new("LongUDT", named_long_dtype) + else: named_long_udt = dtypes.register_new("LongUDT", named_long_dtype) else: named_long_udt = dtypes.register_new("LongUDT", named_long_dtype) diff --git a/pyproject.toml b/pyproject.toml index a3447b751..1bad95118 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,6 @@ [build-system] build-backend = "setuptools.build_meta" -requires = [ - "setuptools >=64", - "setuptools-git-versioning", -] +requires = ["setuptools >=64", "setuptools-git-versioning"] [project] name = "python-graphblas" @@ -11,59 +8,61 @@ dynamic = ["version"] description = "Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics" readme = "README.md" requires-python = ">=3.10" -license = {file = "LICENSE"} +license = { file = "LICENSE" } authors = [ - {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, - {name = "Jim Kitchen"}, - {name = "Python-graphblas contributors"}, + { name = "Erik Welch", email = "erik.n.welch@gmail.com" }, + { name = "Jim Kitchen" }, + { name = "Python-graphblas contributors" }, ] maintainers = [ - {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, - {name = "Jim Kitchen", email = "jim22k@gmail.com"}, - {name = "Sultan Orazbayev", email = "contact@econpoint.com"}, + { name = "Erik Welch", email = "erik.n.welch@gmail.com" }, + { name = "Jim Kitchen", email = "jim22k@gmail.com" }, + { name = "Sultan Orazbayev", email = "contact@econpoint.com" }, ] keywords = [ - "graphblas", - "graph", - "sparse", - "matrix", - "lagraph", - "suitesparse", - "Networks", - "Graph Theory", - "Mathematics", - "network", - "discrete mathematics", - "math", + "graphblas", + "graph", + "sparse", + "matrix", + "lagraph", + "suitesparse", + "Networks", + "Graph Theory", + "Mathematics", + "network", + "discrete mathematics", + "math", ] classifiers = [ - "Development Status :: 5 - Production/Stable", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: POSIX :: Linux", - "Operating System :: Microsoft :: Windows", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3 :: Only", - "Intended Audience :: Developers", - "Intended Audience :: Other Audience", - "Intended Audience :: Science/Research", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Information Analysis", - "Topic :: Scientific/Engineering :: Mathematics", - "Topic :: Software Development :: Libraries :: Python Modules", + "Development Status :: 5 - Production/Stable", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3 :: Only", + "Intended Audience :: Developers", + "Intended Audience :: Other Audience", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ - "numpy >=1.23", - "donfig >=0.6", - "pyyaml >=5.4", - # These won't be installed by default after 2024.3.0 - # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >=7.4.0.0, <9", - "numba >=0.55; python_version<'3.13'", # make optional where numba is not supported + "numpy >=1.23", + "donfig >=0.6", + "pyyaml >=5.4", + # These won't be installed by default after 2024.3.0 + # once pep-771 is supported: https://peps.python.org/pep-0771/ + # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead + "suitesparse-graphblas >=7.4.0.0, <10", + "numba >=0.55; python_version<'3.14'", # make optional where numba is not supported ] [project.urls] @@ -73,56 +72,41 @@ repository = "https://github.com/python-graphblas/python-graphblas" changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] -suitesparse = [ - "suitesparse-graphblas >=7.4.0.0, <9", -] -networkx = [ - "networkx >=2.8", -] -numba = [ - "numba >=0.55", -] -pandas = [ - "pandas >=1.5", -] -scipy = [ - "scipy >=1.9", -] -suitesparse-udf = [ # udf requires numba - "python-graphblas[suitesparse,numba]", -] -repr = [ - "python-graphblas[pandas]", +suitesparse = ["suitesparse-graphblas >=7.4.0.0, <10"] +networkx = ["networkx >=2.8"] +numba = ["numba >=0.55"] +pandas = ["pandas >=1.5"] +scipy = ["scipy >=1.9"] +suitesparse-udf = [ # udf requires numba + "python-graphblas[suitesparse,numba]", ] +repr = ["python-graphblas[pandas]"] io = [ - "python-graphblas[networkx,scipy]", - "python-graphblas[numba]; python_version<'3.13'", - "awkward >=1.9", - "sparse >=0.14; python_version<'3.13'", # make optional, b/c sparse needs numba - "fast-matrix-market >=1.4.5", + "python-graphblas[networkx,scipy]", + "python-graphblas[numba]; python_version<'3.14'", + "awkward >=2.0", + "sparse >=0.14; python_version<'3.13'", # make optional, b/c sparse needs numba + "fast-matrix-market >=1.4.5; python_version<'3.13'", # py3.13 not supported yet ] -viz = [ - "python-graphblas[networkx,scipy]", - "matplotlib >=3.6", -] -datashade = [ # datashade requires numba - "python-graphblas[numba,pandas,scipy]", - "datashader >=0.14", - "hvplot >=0.8", +viz = ["python-graphblas[networkx,scipy]", "matplotlib >=3.6"] +datashade = [ # datashade requires numba + "python-graphblas[numba,pandas,scipy]", + "datashader >=0.14", + "hvplot >=0.8", ] test = [ - "python-graphblas[suitesparse,pandas,scipy]", - "packaging >=21", - "pytest >=6.2", - "tomli >=1", + "python-graphblas[suitesparse,pandas,scipy]", + "packaging >=21", + "pytest >=6.2", + "tomli >=1", ] default = [ - "python-graphblas[suitesparse,pandas,scipy]", - "python-graphblas[numba]; python_version<'3.13'", # make optional where numba is not supported + "python-graphblas[suitesparse,pandas,scipy]", + "python-graphblas[numba]; python_version<'3.14'", # make optional where numba is not supported ] all = [ - "python-graphblas[default,io,viz,test]", - "python-graphblas[datashade]; python_version<'3.13'", # make optional, b/c datashade needs numba + "python-graphblas[default,io,viz,test]", + "python-graphblas[datashade]; python_version<'3.14'", # make optional, b/c datashade needs numba ] [tool.setuptools] @@ -131,22 +115,22 @@ all = [ # $ find graphblas/ -name __init__.py -print | sort | sed -e 's/\/__init__.py//g' -e 's/\//./g' # $ python -c 'import tomli ; [print(x) for x in sorted(tomli.load(open("pyproject.toml", "rb"))["tool"]["setuptools"]["packages"])]' packages = [ - "graphblas", - "graphblas.agg", - "graphblas.binary", - "graphblas.core", - "graphblas.core.operator", - "graphblas.core.ss", - "graphblas.dtypes", - "graphblas.indexunary", - "graphblas.io", - "graphblas.monoid", - "graphblas.op", - "graphblas.semiring", - "graphblas.select", - "graphblas.ss", - "graphblas.tests", - "graphblas.unary", + "graphblas", + "graphblas.agg", + "graphblas.binary", + "graphblas.core", + "graphblas.core.operator", + "graphblas.core.ss", + "graphblas.dtypes", + "graphblas.indexunary", + "graphblas.io", + "graphblas.monoid", + "graphblas.op", + "graphblas.semiring", + "graphblas.select", + "graphblas.ss", + "graphblas.tests", + "graphblas.unary", ] [tool.setuptools-git-versioning] @@ -156,7 +140,7 @@ dirty_template = "{tag}+{ccount}.g{sha}.dirty" [tool.black] line-length = 100 -target-version = ["py310", "py311", "py312"] +target-version = ["py310", "py311", "py312", "py313"] [tool.isort] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] @@ -170,56 +154,54 @@ line_length = 100 [tool.pytest.ini_options] minversion = "6.0" testpaths = "graphblas/tests" -xfail_strict = false # 2023-07-23: awkward and numpy 1.25 sometimes conflict +xfail_strict = false # 2023-07-23: awkward and numpy 1.25 sometimes conflict addopts = [ - "--strict-config", # Force error if config is mispelled - "--strict-markers", # Force error if marker is mispelled (must be defined in config) - "-ra", # Print summary of all fails/errors -] -markers = [ - "slow: Skipped unless --runslow passed", + "--strict-config", # Force error if config is mispelled + "--strict-markers", # Force error if marker is mispelled (must be defined in config) + "-ra", # Print summary of all fails/errors ] +markers = ["slow: Skipped unless --runslow passed"] log_cli_level = "info" filterwarnings = [ - # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters - # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings - "error", + # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters + # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings + "error", - # sparse 0.14.0 (2022-02-24) began raising this warning; it has been reported and fixed upstream. - "ignore:coords should be an ndarray. This will raise a ValueError:DeprecationWarning:sparse._coo.core", + # sparse 0.14.0 (2022-02-24) began raising this warning; it has been reported and fixed upstream. + "ignore:coords should be an ndarray. This will raise a ValueError:DeprecationWarning:sparse._coo.core", - # setuptools v67.3.0 deprecated `pkg_resources.declare_namespace` on 13 Feb 2023. See: - # https://setuptools.pypa.io/en/latest/history.html#v67-3-0 - # MAINT: check if this is still necessary in 2025 - "ignore:Deprecated call to `pkg_resources.declare_namespace:DeprecationWarning:pkg_resources", + # setuptools v67.3.0 deprecated `pkg_resources.declare_namespace` on 13 Feb 2023. See: + # https://setuptools.pypa.io/en/latest/history.html#v67-3-0 + # MAINT: check if this is still necessary in 2025 + "ignore:Deprecated call to `pkg_resources.declare_namespace:DeprecationWarning:pkg_resources", - # This deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See: - # https://setuptools.pypa.io/en/latest/history.html#v67-5-0 - "ignore:pkg_resources is deprecated as an API:DeprecationWarning:", + # This deprecation warning was added in setuptools v67.5.0 (8 Mar 2023). See: + # https://setuptools.pypa.io/en/latest/history.html#v67-5-0 + "ignore:pkg_resources is deprecated as an API:DeprecationWarning:", - # sre_parse deprecated in 3.11; this is triggered by awkward 0.10 - "ignore:module 'sre_parse' is deprecated:DeprecationWarning:", - "ignore:module 'sre_constants' is deprecated:DeprecationWarning:", + # sre_parse deprecated in 3.11; this is triggered by awkward 0.10 + "ignore:module 'sre_parse' is deprecated:DeprecationWarning:", + "ignore:module 'sre_constants' is deprecated:DeprecationWarning:", - # numpy 1.25.0 (2023-06-17) deprecated `np.find_common_type`; many other dependencies use it. - # See if we can remove this filter in 2025. - "ignore:np.find_common_type is deprecated:DeprecationWarning:", + # numpy 1.25.0 (2023-06-17) deprecated `np.find_common_type`; many other dependencies use it. + # See if we can remove this filter in 2025. + "ignore:np.find_common_type is deprecated:DeprecationWarning:", - # pypy gives this warning - "ignore:can't resolve package from __spec__ or __package__:ImportWarning:", + # pypy gives this warning + "ignore:can't resolve package from __spec__ or __package__:ImportWarning:", - # Python 3.12 introduced this deprecation, which is triggered by pandas 2.1.1 - "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:dateutil", + # Python 3.12 introduced this deprecation, which is triggered by pandas 2.1.1 + "ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:dateutil", - # Pandas 2.2 warns that pyarrow will become a required dependency in pandas 3.0 - "ignore:\\nPyarrow will become a required dependency of pandas:DeprecationWarning:", + # Pandas 2.2 warns that pyarrow will become a required dependency in pandas 3.0 + "ignore:\\nPyarrow will become a required dependency of pandas:DeprecationWarning:", ] [tool.coverage.run] branch = true source = ["graphblas"] omit = [ - "graphblas/viz.py", # TODO: test and get coverage for viz.py + "graphblas/viz.py", # TODO: test and get coverage for viz.py ] [tool.coverage.report] @@ -229,9 +211,9 @@ fail_under = 0 skip_covered = true skip_empty = true exclude_lines = [ - "pragma: no cover", - "raise AssertionError", - "raise NotImplementedError", + "pragma: no cover", + "raise AssertionError", + "raise NotImplementedError", ] [tool.codespell] @@ -241,164 +223,189 @@ ignore-words-list = "coo,ba" # https://github.com/charliermarsh/ruff/ line-length = 100 target-version = "py310" + +[tool.ruff.format] +exclude = ["*.ipynb"] # Consider enabling auto-formatting of notebooks + [tool.ruff.lint] +exclude = ["*.ipynb"] # Consider enabling auto-formatting of notebooks unfixable = [ - "F841", # unused-variable (Note: can leave useless expression) - "B905", # zip-without-explicit-strict (Note: prefer `zip(x, y, strict=True)`) + "F841", # unused-variable (Note: can leave useless expression) + "B905", # zip-without-explicit-strict (Note: prefer `zip(x, y, strict=True)`) ] select = [ - # Have we enabled too many checks that they'll become a nuisance? We'll see... - "F", # pyflakes - "E", # pycodestyle Error - "W", # pycodestyle Warning - # "C90", # mccabe (Too strict, but maybe we should make things less complex) - # "I", # isort (Should we replace `isort` with this?) - "N", # pep8-naming - "D", # pydocstyle - "UP", # pyupgrade - "YTT", # flake8-2020 - # "ANN", # flake8-annotations (We don't use annotations yet) - "S", # bandit - # "BLE", # flake8-blind-except (Maybe consider) - # "FBT", # flake8-boolean-trap (Why?) - "B", # flake8-bugbear - "A", # flake8-builtins - "COM", # flake8-commas - "C4", # flake8-comprehensions - "DTZ", # flake8-datetimez - "T10", # flake8-debugger - # "DJ", # flake8-django (We don't use django) - # "EM", # flake8-errmsg (Perhaps nicer, but too much work) - "EXE", # flake8-executable - "ISC", # flake8-implicit-str-concat - # "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) - "G", # flake8-logging-format - "INP", # flake8-no-pep420 - "PIE", # flake8-pie - "T20", # flake8-print - # "PYI", # flake8-pyi (We don't have stub files yet) - "PT", # flake8-pytest-style - "Q", # flake8-quotes - "RSE", # flake8-raise - "RET", # flake8-return - # "SLF", # flake8-self (We can use our own private variables--sheesh!) - "SIM", # flake8-simplify - # "TID", # flake8-tidy-imports (Rely on isort and our own judgement) - # "TCH", # flake8-type-checking (Note: figure out type checking later) - # "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) - "PTH", # flake8-use-pathlib (Often better, but not always) - # "ERA", # eradicate (We like code in comments!) - # "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) - "PGH", # pygrep-hooks - "PL", # pylint - "PLC", # pylint Convention - "PLE", # pylint Error - "PLR", # pylint Refactor - "PLW", # pylint Warning - "TRY", # tryceratops - "NPY", # NumPy-specific rules - "RUF", # ruff-specific rules - "ALL", # Try new categories by default (making the above list unnecessary) + # Have we enabled too many checks that they'll become a nuisance? We'll see... + "F", # pyflakes + "E", # pycodestyle Error + "W", # pycodestyle Warning + # "C90", # mccabe (Too strict, but maybe we should make things less complex) + # "I", # isort (Should we replace `isort` with this?) + "N", # pep8-naming + "D", # pydocstyle + "UP", # pyupgrade + "YTT", # flake8-2020 + # "ANN", # flake8-annotations (We don't use annotations yet) + "S", # bandit + # "BLE", # flake8-blind-except (Maybe consider) + # "FBT", # flake8-boolean-trap (Why?) + "B", # flake8-bugbear + "A", # flake8-builtins + "COM", # flake8-commas + "C4", # flake8-comprehensions + "DTZ", # flake8-datetimez + "T10", # flake8-debugger + # "DJ", # flake8-django (We don't use django) + # "EM", # flake8-errmsg (Perhaps nicer, but too much work) + "EXE", # flake8-executable + "ISC", # flake8-implicit-str-concat + # "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) + "G", # flake8-logging-format + "INP", # flake8-no-pep420 + "PIE", # flake8-pie + "T20", # flake8-print + # "PYI", # flake8-pyi (We don't have stub files yet) + "PT", # flake8-pytest-style + "Q", # flake8-quotes + "RSE", # flake8-raise + "RET", # flake8-return + # "SLF", # flake8-self (We can use our own private variables--sheesh!) + "SIM", # flake8-simplify + # "TID", # flake8-tidy-imports (Rely on isort and our own judgement) + # "TCH", # flake8-type-checking (Note: figure out type checking later) + # "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) + "PTH", # flake8-use-pathlib (Often better, but not always) + # "ERA", # eradicate (We like code in comments!) + # "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) + "PGH", # pygrep-hooks + "PL", # pylint + "PLC", # pylint Convention + "PLE", # pylint Error + "PLR", # pylint Refactor + "PLW", # pylint Warning + "TRY", # tryceratops + "NPY", # NumPy-specific rules + "RUF", # ruff-specific rules + "ALL", # Try new categories by default (making the above list unnecessary) ] external = [ - # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external - "F811", + # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external + "F811", ] ignore = [ - # Would be nice to fix these - "D100", # Missing docstring in public module - "D101", # Missing docstring in public class - "D102", # Missing docstring in public method - "D103", # Missing docstring in public function - "D104", # Missing docstring in public package - "D105", # Missing docstring in magic method - "D107", # Missing docstring in `__init__` - # "D107", # Missing docstring in `__init__` - "D205", # 1 blank line required between summary line and description - "D401", # First line of docstring should be in imperative mood: - "D417", # D417 Missing argument description in the docstring for ...: ... - # "D417", # Missing argument description in the docstring: - "PLE0605", # Invalid format for `__all__`, must be `tuple` or `list` (Note: broken in v0.0.237) - - # Maybe consider - # "SIM300", # Yoda conditions are discouraged, use ... instead (Note: we're not this picky) - # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) - "B904", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) - "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) - "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) - "PERF401", # Use a list comprehension to create a transformed list (Note: poorly implemented atm) - - # Intentionally ignored - "COM812", # Trailing comma missing - "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred) - "D213", # (Note: conflicts with D212, which is preferred) - "D400", # First line should end with a period (Note: prefer D415, which also allows "?" and "!") - "N801", # Class name ... should use CapWords convention (Note:we have a few exceptions to this) - "N802", # Function name ... should be lowercase - "N803", # Argument name ... should be lowercase (Maybe okay--except in tests) - "N806", # Variable ... in function should be lowercase - "N807", # Function name should not start and end with `__` - "N818", # Exception name ... should be named with an Error suffix (Note: good advice) - "PERF203", # `try`-`except` within a loop incurs performance overhead (Note: too strict) - "PLC0205", # Class `__slots__` should be a non-string iterable (Note: string is fine) - "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict) - "PLR0911", # Too many return statements - "PLR0912", # Too many branches - "PLR0913", # Too many arguments to function call - "PLR0915", # Too many statements - "PLR2004", # Magic number used in comparison, consider replacing magic with a constant variable - "PLW0603", # Using the global statement to update ... is discouraged (Note: yeah, discouraged, but too strict) - "PLW2901", # Outer for loop variable ... overwritten by inner assignment target (Note: good advice, but too strict) - "RET502", # Do not implicitly `return None` in function able to return non-`None` value - "RET503", # Missing explicit `return` at the end of function able to return non-`None` value - "RET504", # Unnecessary variable assignment before `return` statement - "S110", # `try`-`except`-`pass` detected, consider logging the exception (Note: good advice, but we don't log) - "S112", # `try`-`except`-`continue` detected, consider logging the exception (Note: good advice, but we don't log) - "S603", # `subprocess` call: check for execution of untrusted input (Note: not important for us) - "S607", # Starting a process with a partial executable path (Note: not important for us) - "SIM102", # Use a single `if` statement instead of nested `if` statements (Note: often necessary) - "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster) - "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) - "TRY003", # Avoid specifying long messages outside the exception class (Note: why?) - "UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)` (Note: using `|` is slower atm) - - # Ignored categories - "C90", # mccabe (Too strict, but maybe we should make things less complex) - "I", # isort (Should we replace `isort` with this?) - "ANN", # flake8-annotations (We don't use annotations yet) - "BLE", # flake8-blind-except (Maybe consider) - "FBT", # flake8-boolean-trap (Why?) - "DJ", # flake8-django (We don't use django) - "EM", # flake8-errmsg (Perhaps nicer, but too much work) - "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) - "PYI", # flake8-pyi (We don't have stub files yet) - "SLF", # flake8-self (We can use our own private variables--sheesh!) - "TID", # flake8-tidy-imports (Rely on isort and our own judgement) - "TCH", # flake8-type-checking (Note: figure out type checking later) - "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) - "TD", # flake8-todos (Maybe okay to add some of these) - "FIX", # flake8-fixme (like flake8-todos) - "ERA", # eradicate (We like code in comments!) - "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) + # Would be nice to fix these + "D100", # Missing docstring in public module + "D101", # Missing docstring in public class + "D102", # Missing docstring in public method + "D103", # Missing docstring in public function + "D104", # Missing docstring in public package + "D105", # Missing docstring in magic method + "D107", # Missing docstring in `__init__` + "D205", # 1 blank line required between summary line and description + "D401", # First line of docstring should be in imperative mood: + "D417", # D417 Missing argument description in the docstring for ...: ... + "PLE0605", # Invalid format for `__all__`, must be `tuple` or `list` (Note: broken in v0.0.237) + + # Maybe consider + # "SIM300", # Yoda conditions are discouraged, use ... instead (Note: we're not this picky) + # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) + "B904", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) + "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) + "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) + "RUF021", # parenthesize-chained-operators (Note: results don't look good yet) + "RUF023", # unsorted-dunder-slots (Note: maybe fine, but noisy changes) + "PERF401", # Use a list comprehension to create a transformed list (Note: poorly implemented atm) + + # Intentionally ignored + "COM812", # Trailing comma missing + "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred) + "D213", # (Note: conflicts with D212, which is preferred) + "D400", # First line should end with a period (Note: prefer D415, which also allows "?" and "!") + "N801", # Class name ... should use CapWords convention (Note:we have a few exceptions to this) + "N802", # Function name ... should be lowercase + "N803", # Argument name ... should be lowercase (Maybe okay--except in tests) + "N806", # Variable ... in function should be lowercase + "N807", # Function name should not start and end with `__` + "N818", # Exception name ... should be named with an Error suffix (Note: good advice) + "PERF203", # `try`-`except` within a loop incurs performance overhead (Note: too strict) + "PLC0205", # Class `__slots__` should be a non-string iterable (Note: string is fine) + "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict) + "PLR0911", # Too many return statements + "PLR0912", # Too many branches + "PLR0913", # Too many arguments to function call + "PLR0915", # Too many statements + "PLR2004", # Magic number used in comparison, consider replacing magic with a constant variable + "PLW0603", # Using the global statement to update ... is discouraged (Note: yeah, discouraged, but too strict) + "PLW0642", # Reassigned `self` variable in instance method (Note: too strict for us) + "PLW2901", # Outer for loop variable ... overwritten by inner assignment target (Note: good advice, but too strict) + "RET502", # Do not implicitly `return None` in function able to return non-`None` value + "RET503", # Missing explicit `return` at the end of function able to return non-`None` value + "RET504", # Unnecessary variable assignment before `return` statement + "S110", # `try`-`except`-`pass` detected, consider logging the exception (Note: good advice, but we don't log) + "S112", # `try`-`except`-`continue` detected, consider logging the exception (Note: good advice, but we don't log) + "S603", # `subprocess` call: check for execution of untrusted input (Note: not important for us) + "S607", # Starting a process with a partial executable path (Note: not important for us) + "SIM102", # Use a single `if` statement instead of nested `if` statements (Note: often necessary) + "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster) + "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) + "TRY003", # Avoid specifying long messages outside the exception class (Note: why?) + "UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)` (Note: using `|` is slower atm) + + # Ignored categories + "C90", # mccabe (Too strict, but maybe we should make things less complex) + "I", # isort (Should we replace `isort` with this?) + "ANN", # flake8-annotations (We don't use annotations yet) + "BLE", # flake8-blind-except (Maybe consider) + "FBT", # flake8-boolean-trap (Why?) + "DJ", # flake8-django (We don't use django) + "EM", # flake8-errmsg (Perhaps nicer, but too much work) + "ICN", # flake8-import-conventions (Doesn't allow "_" prefix such as `_np`) + "PYI", # flake8-pyi (We don't have stub files yet) + "SLF", # flake8-self (We can use our own private variables--sheesh!) + "TID", # flake8-tidy-imports (Rely on isort and our own judgement) + "TCH", # flake8-type-checking (Note: figure out type checking later) + "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) + "TD", # flake8-todos (Maybe okay to add some of these) + "FIX", # flake8-fixme (like flake8-todos) + "ERA", # eradicate (We like code in comments!) + "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ] [tool.ruff.lint.per-file-ignores] -"graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF -"graphblas/core/ss/matrix.py" = ["NPY002"] # numba doesn't support rng generator yet -"graphblas/core/ss/vector.py" = ["NPY002"] # numba doesn't support rng generator yet -"graphblas/core/utils.py" = ["PLE0302"] # `__set__` is used as a property -"graphblas/ss/_core.py" = ["N999"] # We want _core.py to be underscopre +"graphblas/core/operator/__init__.py" = ["A005"] +"graphblas/io/__init__.py" = ["A005"] # shadows a standard-library module +"graphblas/core/operator/base.py" = ["S102"] # exec is used for UDF +"graphblas/core/ss/matrix.py" = [ + "NPY002", # numba doesn't support rng generator yet + "PLR1730", +] +"graphblas/core/ss/vector.py" = [ + "NPY002", # numba doesn't support rng generator yet +] +"graphblas/core/utils.py" = ["PLE0302"] # `__set__` is used as a property +"graphblas/ss/_core.py" = ["N999"] # We want _core.py to be underscopre # Allow useless expressions, assert, pickle, RNG, print, no docstring, and yoda in tests -"graphblas/tests/*py" = ["B018", "S101", "S301", "S311", "T201", "D103", "D100", "SIM300"] -"graphblas/tests/test_formatting.py" = ["E501"] # Allow long lines -"graphblas/**/__init__.py" = ["F401"] # Allow unused imports (w/o defining `__all__`) -"scripts/*.py" = ["INP001"] # Not a package -"scripts/create_pickle.py" = ["F403", "F405"] # Allow `from foo import *` -"docs/*.py" = ["INP001"] # Not a package +"graphblas/tests/*py" = [ + "B018", + "S101", + "S301", + "S311", + "T201", + "D103", + "D100", + "SIM300", +] +"graphblas/tests/test_formatting.py" = ["E501"] # Allow long lines +"graphblas/**/__init__.py" = [ + "F401", # Allow unused imports (w/o defining `__all__`) +] +"scripts/*.py" = ["INP001"] # Not a package +"scripts/create_pickle.py" = ["F403", "F405"] # Allow `from foo import *` +"docs/*.py" = ["INP001"] # Not a package [tool.ruff.lint.flake8-builtins] builtins-ignorelist = ["copyright", "format", "min", "max"] +builtins-allowed-modules = ["select"] [tool.ruff.lint.flake8-pytest-style] fixture-parentheses = false @@ -407,80 +414,86 @@ mark-parentheses = false [tool.lint.ruff.pydocstyle] convention = "numpy" +[tool.bandit] +exclude_dirs = ["graphblas/tests", "scripts"] +skips = [ + "B110", # Try, Except, Pass detected. (Note: it would be nice to not have this pattern) +] + [tool.pylint.messages_control] # To run a single check, do: pylint graphblas --disable E,W,R,C,I --enable assignment-from-no-return max-line-length = 100 py-version = "3.10" enable = ["I"] disable = [ - # Error - "assignment-from-no-return", - - # Warning - "arguments-differ", - "arguments-out-of-order", - "expression-not-assigned", - "fixme", - "global-statement", - "non-parent-init-called", - "redefined-builtin", - "redefined-outer-name", - "super-init-not-called", - "unbalanced-tuple-unpacking", - "unnecessary-lambda", - "unspecified-encoding", - "unused-argument", - "unused-variable", - - # Refactor - "cyclic-import", - "duplicate-code", - "inconsistent-return-statements", - "too-few-public-methods", - - # Convention - "missing-class-docstring", - "missing-function-docstring", - "missing-module-docstring", - "too-many-lines", - - # Intentionally turned off - # error - "class-variable-slots-conflict", - "invalid-unary-operand-type", - "no-member", - "no-name-in-module", - "not-an-iterable", - "too-many-function-args", - "unexpected-keyword-arg", - # warning - "broad-except", - "pointless-statement", - "protected-access", - "undefined-loop-variable", - "unused-import", - # refactor - "comparison-with-itself", - "too-many-arguments", - "too-many-boolean-expressions", - "too-many-branches", - "too-many-instance-attributes", - "too-many-locals", - "too-many-nested-blocks", - "too-many-public-methods", - "too-many-return-statements", - "too-many-statements", - # convention - "import-outside-toplevel", - "invalid-name", - "line-too-long", - "singleton-comparison", - "single-string-used-for-slots", - "unidiomatic-typecheck", - "unnecessary-dunder-call", - "wrong-import-order", - "wrong-import-position", - # informative - "locally-disabled", - "suppressed-message", + # Error + "assignment-from-no-return", + + # Warning + "arguments-differ", + "arguments-out-of-order", + "expression-not-assigned", + "fixme", + "global-statement", + "non-parent-init-called", + "redefined-builtin", + "redefined-outer-name", + "super-init-not-called", + "unbalanced-tuple-unpacking", + "unnecessary-lambda", + "unspecified-encoding", + "unused-argument", + "unused-variable", + + # Refactor + "cyclic-import", + "duplicate-code", + "inconsistent-return-statements", + "too-few-public-methods", + + # Convention + "missing-class-docstring", + "missing-function-docstring", + "missing-module-docstring", + "too-many-lines", + + # Intentionally turned off + # error + "class-variable-slots-conflict", + "invalid-unary-operand-type", + "no-member", + "no-name-in-module", + "not-an-iterable", + "too-many-function-args", + "unexpected-keyword-arg", + # warning + "broad-except", + "pointless-statement", + "protected-access", + "undefined-loop-variable", + "unused-import", + # refactor + "comparison-with-itself", + "too-many-arguments", + "too-many-boolean-expressions", + "too-many-branches", + "too-many-instance-attributes", + "too-many-locals", + "too-many-nested-blocks", + "too-many-public-methods", + "too-many-return-statements", + "too-many-statements", + # convention + "import-outside-toplevel", + "invalid-name", + "line-too-long", + "singleton-comparison", + "single-string-used-for-slots", + "unidiomatic-typecheck", + "unnecessary-dunder-call", + "wrong-import-order", + "wrong-import-position", + # informative + "locally-disabled", + "suppressed-message", ] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 893f09539..cd3451905 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -3,15 +3,15 @@ # Use, adjust, copy/paste, etc. as necessary to answer your questions. # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. -conda search 'flake8-bugbear[channel=conda-forge]>=24.1.17' +conda search 'flake8-bugbear[channel=conda-forge]>=24.12.12' conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' -conda search 'numpy[channel=conda-forge]>=1.26.3' -conda search 'pandas[channel=conda-forge]>=2.2.0' -conda search 'scipy[channel=conda-forge]>=1.12.0' -conda search 'networkx[channel=conda-forge]>=3.2.1' -conda search 'awkward[channel=conda-forge]>=2.5.2' -conda search 'sparse[channel=conda-forge]>=0.15.1' +conda search 'numpy[channel=conda-forge]>=2.2.3' +conda search 'pandas[channel=conda-forge]>=2.2.3' +conda search 'scipy[channel=conda-forge]>=1.15.1' +conda search 'networkx[channel=conda-forge]>=3.4.2' +conda search 'awkward[channel=conda-forge]>=2.7.4' +conda search 'sparse[channel=conda-forge]>=0.15.5' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.6' -conda search 'numba[channel=conda-forge]>=0.59.0' -conda search 'pyyaml[channel=conda-forge]>=6.0.1' +conda search 'numba[channel=conda-forge]>=0.61.0' +conda search 'pyyaml[channel=conda-forge]>=6.0.2' # conda search 'python[channel=conda-forge]>=3.10 *pypy*' From 9bf2ae233c202e8f13a74ae80858b8631ad3b308 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Feb 2025 09:51:19 -0600 Subject: [PATCH 63/66] Bump codecov/codecov-action from 4 to 5 (#554) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4 to 5. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4...v5) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test_and_build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 7a8f06900..bfc17834b 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -504,7 +504,7 @@ jobs: coverage xml coverage report --show-missing - name: codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 - name: Notebooks Execution check if: matrix.slowtask == 'notebooks' run: | From a1e1904925a4e7a2eca12b903d0abcf772b4f8c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Feb 2025 13:26:27 -0600 Subject: [PATCH 64/66] Bump pypa/gh-action-pypi-publish from 1.9.0 to 1.12.4 (#553) Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.9.0 to 1.12.4. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.9.0...v1.12.4) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish_pypi.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index a9ad0be8c..32926c5c8 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -36,7 +36,7 @@ jobs: - name: Check with twine run: python -m twine check --strict dist/* - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.9.0 + uses: pypa/gh-action-pypi-publish@v1.12.4 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From 2a4891f9b1701afe3019f4d9cfcca3f9c7e505b8 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 17 Feb 2025 17:06:11 -0600 Subject: [PATCH 65/66] Fix a numpy 2 deprecation warning (dtype "a" code) (#556) --- graphblas/tests/test_dtype.py | 4 ++++ scripts/check_versions.sh | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index e2478fe7b..ecbca707f 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -9,6 +9,7 @@ import graphblas as gb from graphblas import core, dtypes from graphblas.core import lib +from graphblas.core.utils import _NP2 from graphblas.dtypes import lookup_dtype suitesparse = gb.backend == "suitesparse" @@ -228,6 +229,9 @@ def test_dtype_to_from_string(): # See NEP 55 about StringDtype "T". Notably, this doesn't work: # >>> np.dtype(np.dtype("T").str) continue + if _NP2 and c == "a": + # Data type alias 'a' was deprecated in NumPy 2.0. Use the 'S' alias instead. + continue try: dtype = np.dtype(c) types.append(dtype) diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index cd3451905..5aa88e045 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -7,7 +7,7 @@ conda search 'flake8-bugbear[channel=conda-forge]>=24.12.12' conda search 'flake8-simplify[channel=conda-forge]>=0.21.0' conda search 'numpy[channel=conda-forge]>=2.2.3' conda search 'pandas[channel=conda-forge]>=2.2.3' -conda search 'scipy[channel=conda-forge]>=1.15.1' +conda search 'scipy[channel=conda-forge]>=1.15.2' conda search 'networkx[channel=conda-forge]>=3.4.2' conda search 'awkward[channel=conda-forge]>=2.7.4' conda search 'sparse[channel=conda-forge]>=0.15.5' From 22d42f615187a3a2c89b6400a898c5b72ef396a8 Mon Sep 17 00:00:00 2001 From: Jim Kitchen <2807270+jim22k@users.noreply.github.com> Date: Wed, 26 Feb 2025 10:02:04 -0600 Subject: [PATCH 66/66] Add 9.4.5.0 to tests (#557) --- .github/workflows/test_and_build.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index bfc17834b..af7525928 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -217,24 +217,24 @@ jobs: fi elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.13') }} == true ]] ; then if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))') psg=python-suitesparse-graphblas${psgver} else - psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') fi elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.12') }} == true ]] ; then if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then if [[ $npver == =1.* ]] ; then psgver=$(python -c 'import random ; print(random.choice(["=8.2.0.1", "=8.2.1.0"]))') else - psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))') fi psg=python-suitesparse-graphblas${psgver} else if [[ $npver == =1.* ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==8.2.0.1", "==8.2.1.0"]))') else - psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') fi fi # python-suitsparse-graphblas support is the same for Python 3.10 and 3.11 @@ -242,21 +242,21 @@ jobs: if [[ $npver == =1.* ]] ; then psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", "=8.2.0.1", "=8.2.1.0"]))') else - psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=9.3.1.0", "=9.4.5.0", ""]))') fi psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then if [[ $npver == =1.* ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))') else - psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') fi elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions if [[ $npver == =1.* ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", "==8.2.0.1", "==8.2.1.0"]))') else - psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==9.3.1.0", "==9.4.5.0", ""]))') fi fi @@ -357,7 +357,7 @@ jobs: ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7" drawsvg' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ - ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4,<9.4"' || '' }} \ + ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4,<9.5"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \ ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} \ # ${{ matrix.os != 'windows-latest' && 'pytest-forked' || '' }} # to investigate crashes