diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index d93b4c25c..4c1c0e312 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -430,7 +430,7 @@ jobs: id: coverageAttempt3 if: steps.coverageAttempt2.outcome == 'failure' # Continue even if it failed 3 times... (sheesh! use codecov instead) - continue-on-error: false + continue-on-error: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b8d767f05..fef625a70 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.278 + rev: v0.0.280 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -79,7 +79,7 @@ repos: additional_dependencies: &flake8_dependencies # These versions need updated manually - flake8==6.0.0 - - flake8-bugbear==23.6.5 + - flake8-bugbear==23.7.10 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa rev: v1.5.0 @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.278 + rev: v0.0.280 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py index 937e331fd..0a2aa208a 100644 --- a/graphblas/core/automethods.py +++ b/graphblas/core/automethods.py @@ -213,6 +213,10 @@ def outer(self): return self._get_value("outer") +def power(self): + return self._get_value("power") + + def reduce(self): return self._get_value("reduce") @@ -410,6 +414,7 @@ def _main(): "kronecker", "mxm", "mxv", + "power", "reduce_columnwise", "reduce_rowwise", "reduce_scalar", diff --git a/graphblas/core/infix.py b/graphblas/core/infix.py index bd1d10a92..88fc52dbe 100644 --- a/graphblas/core/infix.py +++ b/graphblas/core/infix.py @@ -330,6 +330,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv)) name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name) nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals)) + power = wrapdoc(Matrix.power)(property(automethods.power)) reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise)) reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise)) reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar)) diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 4696d8ead..d820ca424 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -28,6 +28,7 @@ class_property, get_order, ints_to_numpy_buffer, + maybe_integral, normalize_values, output_type, values_to_numpy_buffer, @@ -91,6 +92,68 @@ def _reposition(updater, indices, chunk): updater[indices] = chunk +def _power(updater, A, n, op): + opts = updater.opts + if n == 1: + updater << A + return + # Use repeated squaring: compute A^2, A^4, A^8, etc., and combine terms as needed. + # See `numpy.linalg.matrix_power` for a simpler implementation to understand how this works. + # We reuse `result` and `square` outputs, and use `square_expr` so masks can be applied. + result = square = square_expr = None + n, bit = divmod(n, 2) + while True: + if bit != 0: + # Need to multiply `square_expr` or `A` into the result + if square_expr is not None: + # Need to evaluate `square_expr`; either into final result, or into `square` + if n == 0 and result is None: + # Handle `updater << A @ A` without an intermediate value + updater << square_expr + return + if square is None: + # Create `square = A @ A` + square = square_expr.new(name="Squares", **opts) + else: + # Compute `square << square @ square` + square(**opts) << square_expr + square_expr = None + if result is None: + # First time needing the intermediate result! + if square is None: + # Use `A` if possible to avoid unnecessary copying + # We will detect and handle `result is A` below + result = A + else: + # Copy square as intermediate result + result = square.dup(name="Power", **opts) + elif n == 0: + # All done! No more terms to compute + updater << op(result @ square) + return + elif result is A: + # Now we need to create a new matrix for the intermediate result + result = op(result @ square).new(name="Power", **opts) + else: + # Main branch: multiply `square` into `result` + result(**opts) << op(result @ square) + n, bit = divmod(n, 2) + if square_expr is not None: + # We need to perform another squaring, so evaluate current `square_expr` first + if square is None: + # Create `square` + square = square_expr.new(name="Squares", **opts) + else: + # Compute `square` + square << square_expr + if square is None: + # First iteration! Create expression for first square + square_expr = op(A @ A) + else: + # Expression for repeated squaring + square_expr = op(square @ square) + + class Matrix(BaseType): """Create a new GraphBLAS Sparse Matrix. @@ -155,8 +218,6 @@ def _as_vector(self, *, name=None): This is SuiteSparse-specific and may change in the future. This does not copy the matrix. """ - from .vector import Vector - if self._ncols != 1: raise ValueError( f"Matrix must have a single column (not {self._ncols}) to be cast to a Vector" @@ -2690,6 +2751,60 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None): dtype=self.dtype, ) + def power(self, n, op=semiring.plus_times): + """Raise a square Matrix to the (positive integer) power ``n``. + + Matrix power is computed by repeated matrix squaring and matrix multiplication. + For a graph as an adjacency matrix, matrix power with default ``plus_times`` + semiring computes the number of walks connecting each pair of nodes. + The result can grow very quickly for large matrices and with larger ``n``. + + Parameters + ---------- + n : int + The exponent must be a positive integer. + op : :class:`~graphblas.core.operator.Semiring` + Semiring used in the computation + + Returns + ------- + MatrixExpression + + Examples + -------- + .. code-block:: python + + C << A.power(4, op=semiring.plus_times) + + # Is equivalent to: + tmp = (A @ A).new() + tmp << tmp @ tmp + C << tmp @ tmp + + # And is more efficient than the naive implementation: + C = A.dup() + for i in range(1, 4): + C << A @ C + """ + method_name = "power" + if self._nrows != self._ncols: + raise DimensionMismatch(f"power only works for square Matrix; shape is {self.shape}") + if (N := maybe_integral(n)) is None: + raise TypeError(f"n must be a positive integer; got bad type: {type(n)}") + if N <= 0: + raise ValueError(f"n must be a positive integer; got: {N}") + op = get_typed_op(op, self.dtype, kind="semiring") + self._expect_op(op, "Semiring", within=method_name, argname="op") + return MatrixExpression( + "power", + None, + [self, _power, (self, N, op)], # [*expr_args, func, args] + expr_repr=f"{{0.name}}.power({N}, op={op})", + nrows=self._nrows, + ncols=self._ncols, + dtype=self.dtype, + ) + ################################## # Extract and Assign index methods ################################## @@ -3358,6 +3473,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv)) name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name) nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals)) + power = wrapdoc(Matrix.power)(property(automethods.power)) reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise)) reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise)) reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar)) @@ -3458,6 +3574,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv)) name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name) nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals)) + power = wrapdoc(Matrix.power)(property(automethods.power)) reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise)) reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise)) reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar)) @@ -3619,6 +3736,7 @@ def to_dicts(self, order="rowwise"): reduce_columnwise = Matrix.reduce_columnwise reduce_scalar = Matrix.reduce_scalar reposition = Matrix.reposition + power = Matrix.power # Operator sugar __or__ = Matrix.__or__ diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index bc942bc49..80a66a524 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -4375,3 +4375,34 @@ def test_subarray_dtypes(): if suitesparse: Full2 = Matrix.ss.import_fullr(b2) assert Full1.isequal(Full2, check_dtype=True) + + +def test_power(A): + expected = A.dup() + for i in range(1, 50): + result = A.power(i).new() + assert result.isequal(expected) + expected << A @ expected + # Test transpose + expected = A.T.new() + for i in range(1, 10): + result = A.T.power(i).new() + assert result.isequal(expected) + expected << A.T @ expected + # Test other semiring + expected = A.dup() + for i in range(1, 10): + result = A.power(i, semiring.min_plus).new() + assert result.isequal(expected) + expected << semiring.min_plus(A @ expected) + # Exceptional + with pytest.raises(TypeError, match="must be a positive integer"): + A.power(1.5) + with pytest.raises(ValueError, match="must be a positive integer"): + A.power(-1) + with pytest.raises(ValueError, match="must be a positive integer"): + # Not implemented yet... could create identity matrix + A.power(0) + B = A[:2, :3].new() + with pytest.raises(DimensionMismatch): + B.power(2) diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index a1aabd183..e321d3e9b 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -999,10 +999,10 @@ def test_reduce_agg_firstlast_index(v): def test_reduce_agg_empty(): v = Vector("UINT8", size=3) - for _attr, aggr in vars(agg).items(): + for attr, aggr in vars(agg).items(): if not isinstance(aggr, agg.Aggregator): continue - s = v.reduce(aggr).new() + s = v.reduce(aggr).new(name=attr) assert compute(s.value) is None diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 263b1d8f7..ffa440c22 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -12,6 +12,6 @@ conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' conda search 'numba[channel=conda-forge]>=0.57.1' conda search 'pyyaml[channel=conda-forge]>=6.0' -conda search 'flake8-bugbear[channel=conda-forge]>=23.6.5' +conda search 'flake8-bugbear[channel=conda-forge]>=23.7.10' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' # conda search 'python[channel=conda-forge]>=3.8 *pypy*'