diff --git a/.github/workflows/debug.yml b/.github/workflows/debug.yml index c9dc231fe..64d4bc12b 100644 --- a/.github/workflows/debug.yml +++ b/.github/workflows/debug.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - pyver: [3.9] + pyver: [3.10] testopts: - "--blocking" # - "--non-blocking --record --runslow" diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 0116f615d..b9e9d4406 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -30,7 +30,6 @@ jobs: id: pyver with: contents: | - 3.9 3.10 3.11 3.12 @@ -38,14 +37,13 @@ jobs: 1 1 1 - 1 test_imports: needs: rngs runs-on: ${{ needs.rngs.outputs.os }} # runs-on: ${{ matrix.os }} # strategy: # matrix: - # python-version: ["3.9", "3.10", "3.11", "3.12"] + # python-version: ["3.10", "3.11", "3.12"] # os: ["ubuntu-latest", "macos-latest", "windows-latest"] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index 366d01e97..b01d2a502 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install build dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 7086d8779..6c55a0eca 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -105,11 +105,10 @@ jobs: uses: ddradar/choose-random-action@v2.0.2 id: pyver with: - # We should support major Python versions for at least 36-42 months + # We should support major Python versions for at least 36 months as per SPEC 0 # We may be able to support pypy if anybody asks for it # 3.9.16 0_73_pypy contents: | - 3.9 3.10 3.11 3.12 @@ -117,7 +116,6 @@ jobs: 1 1 1 - 1 - name: RNG for source of python-suitesparse-graphblas uses: ddradar/choose-random-action@v2.0.2 id: sourcetype @@ -166,20 +164,13 @@ jobs: # # First let's randomly get versions of dependencies to install. # Consider removing old versions when they become problematic or very old (>=2 years). - nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", "=3.2", ""]))') - sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", "=0.15", ""]))') + nxver=$(python -c 'import random ; print(random.choice(["=2.8", "=3.0", "=3.1", "=3.2", ""]))') + sparsever=$(python -c 'import random ; print(random.choice(["=0.14", "=0.15", ""]))') # Randomly choosing versions of dependencies based on Python version works surprisingly well... - if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", "=2.1", "=2.2", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') - fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') - yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') - elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then - npver=$(python -c 'import random ; print(random.choice(["=1.22", "=1.23", "=1.24", "=1.25", "=1.26", ""]))') + if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then + npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", "=1.26", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", "=1.12", ""]))') - pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", "=2.1", "=2.2", ""]))') + pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", "=2.1", "=2.2", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", "=2.4", "=2.5", ""]))') fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') @@ -237,8 +228,6 @@ jobs: numbaver=$(python -c 'import random ; print(random.choice(["=0.58", "=0.59", ""]))') elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.57", "=0.58", "=0.59", ""]))') - elif [[ ${npver} == "=1.21" ]] ; then - numbaver=$(python -c 'import random ; print(random.choice(["=0.55", "=0.56", "=0.57", ""]))') else numbaver=$(python -c 'import random ; print(random.choice(["=0.56", "=0.57", "=0.58", "=0.59", ""]))') fi diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fa563b639..12e5dd865 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -54,7 +54,7 @@ repos: rev: v3.15.0 hooks: - id: pyupgrade - args: [--py39-plus] + args: [--py310-plus] - repo: https://github.com/MarcoGorelli/auto-walrus rev: v0.2.2 hooks: diff --git a/docs/getting_started/faq.rst b/docs/getting_started/faq.rst index 1e60a1bd4..2609e7929 100644 --- a/docs/getting_started/faq.rst +++ b/docs/getting_started/faq.rst @@ -101,11 +101,10 @@ Bugs are not considered deprecations and may be fixed immediately. What is the version support policy? +++++++++++++++++++++++++++++++++++ -Each major Python version will be supported for at least 36 to 42 months. +Each major Python version will be supported for at least 36. Major dependencies such as NumPy should be supported for at least 24 months. -This is motivated by these guidelines: +We aim to follow SPEC 0: -- https://numpy.org/neps/nep-0029-deprecation_policy.html - https://scientific-python.org/specs/spec-0000/ ``python-graphblas`` itself follows a "single trunk" versioning strategy. diff --git a/graphblas/core/formatting.py b/graphblas/core/formatting.py index aefb87f94..0b6252101 100644 --- a/graphblas/core/formatting.py +++ b/graphblas/core/formatting.py @@ -630,7 +630,7 @@ def create_header(type_name, keys, vals, *, lower_border=False, name="", quote=T name = f'"{name}"' key_text = [] val_text = [] - for key, val in zip(keys, vals): + for key, val in zip(keys, vals, strict=True): width = max(len(key), len(val)) + 2 key_text.append(key.rjust(width)) val_text.append(val.rjust(width)) diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 359477d4c..e28e92a65 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -360,7 +360,7 @@ def __contains__(self, index): def __iter__(self): """Iterate over (row, col) indices which are present in the matrix.""" rows, columns, _ = self.to_coo(values=False) - return zip(rows.flat, columns.flat) + return zip(rows.flat, columns.flat, strict=True) def __sizeof__(self): if backend == "suitesparse": @@ -961,7 +961,7 @@ def from_edgelist( rows = edgelist[:, 0] cols = edgelist[:, 1] else: - unzipped = list(zip(*edgelist)) + unzipped = list(zip(*edgelist, strict=True)) if len(unzipped) == 2: rows, cols = unzipped elif len(unzipped) == 3: @@ -1826,10 +1826,11 @@ def to_dicts(self, order="rowwise"): cols = cols.tolist() values = values.tolist() return { - row: dict(zip(cols[start:stop], values[start:stop])) + row: dict(zip(cols[start:stop], values[start:stop], strict=True)) for row, (start, stop) in zip( compressed_rows.tolist(), np.lib.stride_tricks.sliding_window_view(indptr, 2).tolist(), + strict=True, ) } # Alternative diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 0489cb5d6..0a08c50e2 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -58,7 +58,7 @@ def head(matrix, n=10, dtype=None, *, sort=False): dtype = matrix.dtype else: dtype = lookup_dtype(dtype) - rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n)) + rows, cols, vals = zip(*itertools.islice(matrix.ss.iteritems(), n), strict=True) return np.array(rows, np.uint64), np.array(cols, np.uint64), np.array(vals, dtype.np_type) diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index d1f7a5bcb..a21d54de9 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -43,7 +43,7 @@ def head(vector, n=10, dtype=None, *, sort=False): dtype = vector.dtype else: dtype = lookup_dtype(dtype) - indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n)) + indices, vals = zip(*itertools.islice(vector.ss.iteritems(), n), strict=True) return np.array(indices, np.uint64), np.array(vals, dtype.np_type) diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index 184272124..6e91edd1b 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -202,7 +202,7 @@ def normalize_chunks(chunks, shape): f"chunks argument must be of length {len(shape)} (one for each dimension of a {typ})" ) chunksizes = [] - for size, chunk in zip(shape, chunks): + for size, chunk in zip(shape, chunks, strict=True): if chunk is None: cur_chunks = [size] elif (c := maybe_integral(chunk)) is not None: diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index 863d186ec..8bac4198e 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -772,7 +772,7 @@ def from_pairs(cls, pairs, dtype=None, *, size=None, dup_op=None, name=None): """ if isinstance(pairs, np.ndarray): raise TypeError("pairs as NumPy array is not supported; use `Vector.from_coo` instead") - unzipped = list(zip(*pairs)) + unzipped = list(zip(*pairs, strict=True)) if len(unzipped) == 2: indices, values = unzipped elif not unzipped: @@ -2105,7 +2105,7 @@ def to_dict(self): """ indices, values = self.to_coo(sort=False) - return dict(zip(indices.tolist(), values.tolist())) + return dict(zip(indices.tolist(), values.tolist(), strict=True)) if backend == "suitesparse": diff --git a/graphblas/io/_networkx.py b/graphblas/io/_networkx.py index dab04c82d..8cf84e576 100644 --- a/graphblas/io/_networkx.py +++ b/graphblas/io/_networkx.py @@ -55,7 +55,9 @@ def to_networkx(m, edge_attribute="weight"): cols = cols.tolist() G = nx.DiGraph() if edge_attribute is None: - G.add_edges_from(zip(rows, cols)) + G.add_edges_from(zip(rows, cols, strict=True)) else: - G.add_weighted_edges_from(zip(rows, cols, vals.tolist()), weight=edge_attribute) + G.add_weighted_edges_from( + zip(rows, cols, vals.tolist(), strict=True), weight=edge_attribute + ) return G diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index 109c90a2c..7e786f0da 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -146,7 +146,7 @@ def test_matrix_to_from_networkx(): M = gb.io.from_networkx(G, nodelist=range(7)) if suitesparse: assert M.ss.is_iso - rows, cols = zip(*edges) + rows, cols = zip(*edges, strict=True) expected = gb.Matrix.from_coo(rows, cols, 1) assert expected.isequal(M) # Test empty diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 06e4ee868..63561930b 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -2603,12 +2603,14 @@ def test_iter(A): zip( [3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1], [0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6], + strict=True, ) ) assert set(A.T) == set( zip( [0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6], [3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1], + strict=True, ) ) @@ -2731,8 +2733,8 @@ def test_ss_split(A): for results in [A.ss.split([4, 3]), A.ss.split([[4, None], 3], name="split")]: row_boundaries = [0, 4, 7] col_boundaries = [0, 3, 6, 7] - for i, (i1, i2) in enumerate(zip(row_boundaries[:-1], row_boundaries[1:])): - for j, (j1, j2) in enumerate(zip(col_boundaries[:-1], col_boundaries[1:])): + for i, (i1, i2) in enumerate(itertools.pairwise(row_boundaries)): + for j, (j1, j2) in enumerate(itertools.pairwise(col_boundaries)): expected = A[i1:i2, j1:j2].new() assert expected.isequal(results[i][j]) with pytest.raises(DimensionMismatch): @@ -3068,7 +3070,7 @@ def test_ss_flatten(A): [3, 2, 3, 1, 5, 3, 7, 8, 3, 1, 7, 4], ] # row-wise - indices = [row * A.ncols + col for row, col in zip(data[0], data[1])] + indices = [row * A.ncols + col for row, col in zip(data[0], data[1], strict=True)] expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols) for fmt in ["csr", "hypercsr", "bitmapr"]: B = Matrix.ss.import_any(**A.ss.export(format=fmt)) @@ -3087,7 +3089,7 @@ def test_ss_flatten(A): assert C.isequal(B) # column-wise - indices = [col * A.nrows + row for row, col in zip(data[0], data[1])] + indices = [col * A.nrows + row for row, col in zip(data[0], data[1], strict=True)] expected = Vector.from_coo(indices, data[2], size=A.nrows * A.ncols) for fmt in ["csc", "hypercsc", "bitmapc"]: B = Matrix.ss.import_any(**A.ss.export(format=fmt)) @@ -3626,9 +3628,9 @@ def test_ss_iteration(A): assert not list(B.ss.itervalues()) assert not list(B.ss.iteritems()) rows, columns, values = A.to_coo() - assert sorted(zip(rows, columns)) == sorted(A.ss.iterkeys()) + assert sorted(zip(rows, columns, strict=True)) == sorted(A.ss.iterkeys()) assert sorted(values) == sorted(A.ss.itervalues()) - assert sorted(zip(rows, columns, values)) == sorted(A.ss.iteritems()) + assert sorted(zip(rows, columns, values, strict=True)) == sorted(A.ss.iteritems()) N = rows.size A = Matrix.ss.import_bitmapr(**A.ss.export("bitmapr")) diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index 77f608969..df1f5c86e 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -2270,7 +2270,7 @@ def test_ss_iteration(v): # This is what I would expect assert sorted(indices) == sorted(v.ss.iterkeys()) assert sorted(values) == sorted(v.ss.itervalues()) - assert sorted(zip(indices, values)) == sorted(v.ss.iteritems()) + assert sorted(zip(indices, values, strict=True)) == sorted(v.ss.iteritems()) N = indices.size v = Vector.ss.import_bitmap(**v.ss.export("bitmap")) diff --git a/pyproject.toml b/pyproject.toml index e9ce9da86..a3447b751 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ name = "python-graphblas" dynamic = ["version"] description = "Python library for GraphBLAS: high-performance sparse linear algebra for scalable graph analytics" readme = "README.md" -requires-python = ">=3.9" +requires-python = ">=3.10" license = {file = "LICENSE"} authors = [ {name = "Erik Welch", email = "erik.n.welch@gmail.com"}, @@ -44,7 +44,6 @@ classifiers = [ "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -58,7 +57,7 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ - "numpy >=1.22", + "numpy >=1.23", "donfig >=0.6", "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 @@ -84,7 +83,7 @@ numba = [ "numba >=0.55", ] pandas = [ - "pandas >=1.2", + "pandas >=1.5", ] scipy = [ "scipy >=1.9", @@ -99,17 +98,17 @@ io = [ "python-graphblas[networkx,scipy]", "python-graphblas[numba]; python_version<'3.13'", "awkward >=1.9", - "sparse >=0.13; python_version<'3.13'", # make optional, b/c sparse needs numba + "sparse >=0.14; python_version<'3.13'", # make optional, b/c sparse needs numba "fast-matrix-market >=1.4.5", ] viz = [ "python-graphblas[networkx,scipy]", - "matplotlib >=3.5", + "matplotlib >=3.6", ] datashade = [ # datashade requires numba "python-graphblas[numba,pandas,scipy]", - "datashader >=0.12", - "hvplot >=0.7", + "datashader >=0.14", + "hvplot >=0.8", ] test = [ "python-graphblas[suitesparse,pandas,scipy]", @@ -157,7 +156,7 @@ dirty_template = "{tag}+{ccount}.g{sha}.dirty" [tool.black] line-length = 100 -target-version = ["py39", "py310", "py311", "py312"] +target-version = ["py310", "py311", "py312"] [tool.isort] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] @@ -241,10 +240,11 @@ ignore-words-list = "coo,ba" [tool.ruff] # https://github.com/charliermarsh/ruff/ line-length = 100 -target-version = "py39" +target-version = "py310" [tool.ruff.lint] unfixable = [ - "F841" # unused-variable (Note: can leave useless expression) + "F841", # unused-variable (Note: can leave useless expression) + "B905", # zip-without-explicit-strict (Note: prefer `zip(x, y, strict=True)`) ] select = [ # Have we enabled too many checks that they'll become a nuisance? We'll see... @@ -360,6 +360,7 @@ ignore = [ "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster) "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) "TRY003", # Avoid specifying long messages outside the exception class (Note: why?) + "UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)` (Note: using `|` is slower atm) # Ignored categories "C90", # mccabe (Too strict, but maybe we should make things less complex) @@ -409,7 +410,7 @@ convention = "numpy" [tool.pylint.messages_control] # To run a single check, do: pylint graphblas --disable E,W,R,C,I --enable assignment-from-no-return max-line-length = 100 -py-version = "3.9" +py-version = "3.10" enable = ["I"] disable = [ # Error diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 59fb59d5f..893f09539 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -14,4 +14,4 @@ conda search 'sparse[channel=conda-forge]>=0.15.1' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.6' conda search 'numba[channel=conda-forge]>=0.59.0' conda search 'pyyaml[channel=conda-forge]>=6.0.1' -# conda search 'python[channel=conda-forge]>=3.9 *pypy*' +# conda search 'python[channel=conda-forge]>=3.10 *pypy*'