Skip to content

Add matrix.power to compute e.g. A @ A @ A @ ... #483

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jul 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test_and_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ jobs:
id: coverageAttempt3
if: steps.coverageAttempt2.outcome == 'failure'
# Continue even if it failed 3 times... (sheesh! use codecov instead)
continue-on-error: false
continue-on-error: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: ${{ matrix.os }}/${{ matrix.slowtask }}
Expand Down
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ repos:
- id: black
- id: black-jupyter
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.278
rev: v0.0.280
hooks:
- id: ruff
args: [--fix-only, --show-fixes]
Expand All @@ -79,7 +79,7 @@ repos:
additional_dependencies: &flake8_dependencies
# These versions need updated manually
- flake8==6.0.0
- flake8-bugbear==23.6.5
- flake8-bugbear==23.7.10
- flake8-simplify==0.20.0
- repo: https://github.com/asottile/yesqa
rev: v1.5.0
Expand All @@ -94,7 +94,7 @@ repos:
additional_dependencies: [tomli]
files: ^(graphblas|docs)/
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.278
rev: v0.0.280
hooks:
- id: ruff
- repo: https://github.com/sphinx-contrib/sphinx-lint
Expand Down
5 changes: 5 additions & 0 deletions graphblas/core/automethods.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,10 @@ def outer(self):
return self._get_value("outer")


def power(self):
return self._get_value("power")


def reduce(self):
return self._get_value("reduce")

Expand Down Expand Up @@ -410,6 +414,7 @@ def _main():
"kronecker",
"mxm",
"mxv",
"power",
"reduce_columnwise",
"reduce_rowwise",
"reduce_scalar",
Expand Down
1 change: 1 addition & 0 deletions graphblas/core/infix.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv))
name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name)
nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals))
power = wrapdoc(Matrix.power)(property(automethods.power))
reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise))
reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise))
reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar))
Expand Down
122 changes: 120 additions & 2 deletions graphblas/core/matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
class_property,
get_order,
ints_to_numpy_buffer,
maybe_integral,
normalize_values,
output_type,
values_to_numpy_buffer,
Expand Down Expand Up @@ -91,6 +92,68 @@ def _reposition(updater, indices, chunk):
updater[indices] = chunk


def _power(updater, A, n, op):
opts = updater.opts
if n == 1:
updater << A
return
# Use repeated squaring: compute A^2, A^4, A^8, etc., and combine terms as needed.
# See `numpy.linalg.matrix_power` for a simpler implementation to understand how this works.
# We reuse `result` and `square` outputs, and use `square_expr` so masks can be applied.
result = square = square_expr = None
n, bit = divmod(n, 2)
while True:
if bit != 0:
# Need to multiply `square_expr` or `A` into the result
if square_expr is not None:
# Need to evaluate `square_expr`; either into final result, or into `square`
if n == 0 and result is None:
# Handle `updater << A @ A` without an intermediate value
updater << square_expr
return
if square is None:
# Create `square = A @ A`
square = square_expr.new(name="Squares", **opts)
else:
# Compute `square << square @ square`
square(**opts) << square_expr
square_expr = None
if result is None:
# First time needing the intermediate result!
if square is None:
# Use `A` if possible to avoid unnecessary copying
# We will detect and handle `result is A` below
result = A
else:
# Copy square as intermediate result
result = square.dup(name="Power", **opts)
elif n == 0:
# All done! No more terms to compute
updater << op(result @ square)
return
elif result is A:
# Now we need to create a new matrix for the intermediate result
result = op(result @ square).new(name="Power", **opts)
else:
# Main branch: multiply `square` into `result`
result(**opts) << op(result @ square)
n, bit = divmod(n, 2)
if square_expr is not None:
# We need to perform another squaring, so evaluate current `square_expr` first
if square is None:
# Create `square`
square = square_expr.new(name="Squares", **opts)
else:
# Compute `square`
square << square_expr
if square is None:
# First iteration! Create expression for first square
square_expr = op(A @ A)
else:
# Expression for repeated squaring
square_expr = op(square @ square)


class Matrix(BaseType):
"""Create a new GraphBLAS Sparse Matrix.

Expand Down Expand Up @@ -155,8 +218,6 @@ def _as_vector(self, *, name=None):
This is SuiteSparse-specific and may change in the future.
This does not copy the matrix.
"""
from .vector import Vector

if self._ncols != 1:
raise ValueError(
f"Matrix must have a single column (not {self._ncols}) to be cast to a Vector"
Expand Down Expand Up @@ -2690,6 +2751,60 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None):
dtype=self.dtype,
)

def power(self, n, op=semiring.plus_times):
"""Raise a square Matrix to the (positive integer) power ``n``.

Matrix power is computed by repeated matrix squaring and matrix multiplication.
For a graph as an adjacency matrix, matrix power with default ``plus_times``
semiring computes the number of walks connecting each pair of nodes.
The result can grow very quickly for large matrices and with larger ``n``.

Parameters
----------
n : int
The exponent must be a positive integer.
op : :class:`~graphblas.core.operator.Semiring`
Semiring used in the computation

Returns
-------
MatrixExpression

Examples
--------
.. code-block:: python

C << A.power(4, op=semiring.plus_times)

# Is equivalent to:
tmp = (A @ A).new()
tmp << tmp @ tmp
C << tmp @ tmp

# And is more efficient than the naive implementation:
C = A.dup()
for i in range(1, 4):
C << A @ C
"""
method_name = "power"
if self._nrows != self._ncols:
raise DimensionMismatch(f"power only works for square Matrix; shape is {self.shape}")
if (N := maybe_integral(n)) is None:
raise TypeError(f"n must be a positive integer; got bad type: {type(n)}")
if N <= 0:
raise ValueError(f"n must be a positive integer; got: {N}")
op = get_typed_op(op, self.dtype, kind="semiring")
self._expect_op(op, "Semiring", within=method_name, argname="op")
return MatrixExpression(
"power",
None,
[self, _power, (self, N, op)], # [*expr_args, func, args]
expr_repr=f"{{0.name}}.power({N}, op={op})",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)

##################################
# Extract and Assign index methods
##################################
Expand Down Expand Up @@ -3358,6 +3473,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv))
name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name)
nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals))
power = wrapdoc(Matrix.power)(property(automethods.power))
reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise))
reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise))
reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar))
Expand Down Expand Up @@ -3458,6 +3574,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts):
mxv = wrapdoc(Matrix.mxv)(property(automethods.mxv))
name = wrapdoc(Matrix.name)(property(automethods.name)).setter(automethods._set_name)
nvals = wrapdoc(Matrix.nvals)(property(automethods.nvals))
power = wrapdoc(Matrix.power)(property(automethods.power))
reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(automethods.reduce_columnwise))
reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(automethods.reduce_rowwise))
reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(automethods.reduce_scalar))
Expand Down Expand Up @@ -3619,6 +3736,7 @@ def to_dicts(self, order="rowwise"):
reduce_columnwise = Matrix.reduce_columnwise
reduce_scalar = Matrix.reduce_scalar
reposition = Matrix.reposition
power = Matrix.power

# Operator sugar
__or__ = Matrix.__or__
Expand Down
31 changes: 31 additions & 0 deletions graphblas/tests/test_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -4375,3 +4375,34 @@ def test_subarray_dtypes():
if suitesparse:
Full2 = Matrix.ss.import_fullr(b2)
assert Full1.isequal(Full2, check_dtype=True)


def test_power(A):
expected = A.dup()
for i in range(1, 50):
result = A.power(i).new()
assert result.isequal(expected)
expected << A @ expected
# Test transpose
expected = A.T.new()
for i in range(1, 10):
result = A.T.power(i).new()
assert result.isequal(expected)
expected << A.T @ expected
# Test other semiring
expected = A.dup()
for i in range(1, 10):
result = A.power(i, semiring.min_plus).new()
assert result.isequal(expected)
expected << semiring.min_plus(A @ expected)
# Exceptional
with pytest.raises(TypeError, match="must be a positive integer"):
A.power(1.5)
with pytest.raises(ValueError, match="must be a positive integer"):
A.power(-1)
with pytest.raises(ValueError, match="must be a positive integer"):
# Not implemented yet... could create identity matrix
A.power(0)
B = A[:2, :3].new()
with pytest.raises(DimensionMismatch):
B.power(2)
4 changes: 2 additions & 2 deletions graphblas/tests/test_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -999,10 +999,10 @@ def test_reduce_agg_firstlast_index(v):

def test_reduce_agg_empty():
v = Vector("UINT8", size=3)
for _attr, aggr in vars(agg).items():
for attr, aggr in vars(agg).items():
if not isinstance(aggr, agg.Aggregator):
continue
s = v.reduce(aggr).new()
s = v.reduce(aggr).new(name=attr)
assert compute(s.value) is None


Expand Down
2 changes: 1 addition & 1 deletion scripts/check_versions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@ conda search 'sparse[channel=conda-forge]>=0.14.0'
conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2'
conda search 'numba[channel=conda-forge]>=0.57.1'
conda search 'pyyaml[channel=conda-forge]>=6.0'
conda search 'flake8-bugbear[channel=conda-forge]>=23.6.5'
conda search 'flake8-bugbear[channel=conda-forge]>=23.7.10'
conda search 'flake8-simplify[channel=conda-forge]>=0.20.0'
# conda search 'python[channel=conda-forge]>=3.8 *pypy*'