diff --git a/.github/workflows/numpy.yml b/.github/workflows/numpy.yml
index 7ec3b5d3..82420452 100644
--- a/.github/workflows/numpy.yml
+++ b/.github/workflows/numpy.yml
@@ -25,7 +25,7 @@ jobs:
env:
ARRAY_API_TESTS_MODULE: numpy.array_api
run: |
- # Skip test cases with known issues
+ # Skip testing functions with known issues
cat << EOF >> skips.txt
# copy not implemented
@@ -35,7 +35,7 @@ jobs:
# https://github.com/numpy/numpy/issues/20870
array_api_tests/test_data_type_functions.py::test_can_cast
# The return dtype for trace is not consistent in the spec
- # (https://github.com/data-apis/array-api/issues/202#issuecomment-952529197)
+ # https://github.com/data-apis/array-api/issues/202#issuecomment-952529197
array_api_tests/test_linalg.py::test_trace
# waiting on NumPy to allow/revert distinct NaNs for np.unique
# https://github.com/numpy/numpy/issues/20326#issuecomment-1012380448
diff --git a/README.md b/README.md
index f2ca381f..1d4ad770 100644
--- a/README.md
+++ b/README.md
@@ -50,47 +50,6 @@ a specific test case, which is useful when developing functions.
$ pytest array_api_tests/test_creation_functions.py::test_zeros
```
-## Releases
-
-The test suite has tagged releases on
-[GitHub](https://github.com/data-apis/array-api-tests/releases). If you run
-the test suite in your CI system, we recommend pinning against a release tag.
-
-We use [calender versioning](https://calver.org/) for the releases. You should
-expect that any version may be "breaking" compared to the previous one, in the
-sense that there may have been additional tests added which cause a previously
-passing library to fail.
-
-For now, the test suite is
-not installable as a Python package. You can use it by cloning the repo and
-running `pytest` as described above. If it would help you to be able to
-install it as a package, [please let us
-know](https://github.com/data-apis/array-api-tests/issues/85).
-
-*Test suite maintainer note:* to make a release of the test suite, make an
-annotated tag with the version:
-
-```
-git tag -a 2022.1
-```
-
-(for the message, just write something like "array-api-tests version 2022.1").
-Be sure to use the calver version number for the tag name. Versioneer will
-automatically set the version number of the `array_api_tests` package based on
-the git tag.
-
-Then push the tag to GitHub
-
-```
-git push --tags origin 2022.1
-```
-
-Finally go to the [tags page on
-GitHub](https://github.com/data-apis/array-api-tests/tags) and convert the tag
-into a release. If you want, you can add release notes to the release page on
-GitHub.
-
-
## What the test suite covers
We are interested in array libraries conforming to the
@@ -101,12 +60,13 @@ so as to not unexpectedly fail the suite.
### Primary tests
-Every function—including array object methods—has a respective test method. We
-use [Hypothesis](https://hypothesis.readthedocs.io/en/latest/) to generate a
-diverse set of valid inputs. This means array inputs will cover different dtypes
-and shapes, as well as contain interesting elements. These examples generate
-with interesting arrangements of non-array positional arguments and keyword
-arguments.
+Every function—including array object methods—has a respective test
+method1. We use
+[Hypothesis](https://hypothesis.readthedocs.io/en/latest/)
+to generate a diverse set of valid inputs. This means array inputs will cover
+different dtypes and shapes, as well as contain interesting elements. These
+examples generate with interesting arrangements of non-array positional
+arguments and keyword arguments.
Each test case will cover the following areas if relevant:
@@ -147,7 +107,7 @@ of the functions and some miscellaneous things.
functions interact with them correctly.
Be aware that some aspects of the spec are impractical or impossible to actually
-test, so they are not covered in the suite
+test, so they are not covered in the suite.
## Interpreting errors
@@ -172,21 +132,99 @@ behaviour different from the spec, or test something that is not documented,
this is a bug—please [report such
issues](https://github.com/data-apis/array-api-tests/issues/) to us.
-## Configuration
+
+## Running on CI
+
+See our existing [GitHub Actions workflow for
+Numpy](https://github.com/data-apis/array-api-tests/blob/master/.github/workflows/numpy.yml)
+for an example of using the test suite on CI.
+
+### Releases
+
+We recommend pinning against a [release tag](https://github.com/data-apis/array-api-tests/releases)
+when running on CI.
+
+We use [calender versioning](https://calver.org/) for the releases. You should
+expect that any version may be "breaking" compared to the previous one, in that
+new tests (or improvements to existing tests) may cause a previously passing
+library to fail.
+
+### Configuration
+
+#### CI flag
+
+Use the `--ci` flag to run only the primary and special cases tests. You can
+ignore the other test cases as they are redundant for the purposes of checking
+compliance.
+
+#### Data-dependent shapes
+
+Use the `--disable-data-dependent-shapes` flag to skip testing functions which have
+[data-dependent shapes](https://data-apis.org/array-api/latest/design_topics/data_dependent_output_shapes.html).
+
+#### Extensions
By default, tests for the optional Array API extensions such as
[`linalg`](https://data-apis.org/array-api/latest/extensions/linear_algebra_functions.html)
will be skipped if not present in the specified array module. You can purposely
skip testing extension(s) via the `--disable-extension` option.
+#### Skip test cases
+
+Test cases you want to skip can be specified in a `skips.txt` file in the root
+of this repository, e.g.:
+
+```
+# ./skips.txt
+# Line comments can be denoted with the hash symbol (#)
+
+# Skip specific test case, e.g. when argsort() does not respect relative order
+# https://github.com/numpy/numpy/issues/20778
+array_api_tests/test_sorting_functions.py::test_argsort
+
+# Skip specific test case parameter, e.g. you forgot to implement in-place adds
+array_api_tests/test_add[__iadd__(x1, x2)]
+array_api_tests/test_add[__iadd__(x, s)]
+
+# Skip module, e.g. when your set functions treat NaNs as non-distinct
+# https://github.com/numpy/numpy/issues/20326
+array_api_tests/test_set_functions.py
+```
+
+For GitHub Actions, you might like to keep everything in the workflow config
+instead of having a seperate `skips.txt` file, e.g.:
+
+```yaml
+# ./.github/workflows/array_api.yml
+...
+ ...
+ - name: Run the test suite
+ env:
+ ARRAY_API_TESTS_MODULE: your.array.api.namespace
+ run: |
+ # Skip test cases with known issues
+ cat << EOF >> skips.txt
+
+ # Comments can still work here
+ array_api_tests/test_sorting_functions.py::test_argsort
+ array_api_tests/test_add[__iadd__(x1, x2)]
+ array_api_tests/test_add[__iadd__(x, s)]
+ array_api_tests/test_set_functions.py
+
+ EOF
+
+ pytest -v -rxXfE --ci
+```
+
+#### Max examples
+
The tests make heavy use
[Hypothesis](https://hypothesis.readthedocs.io/en/latest/). You can configure
how many examples are generated using the `--max-examples` flag, which defaults
to 100. Lower values can be useful for quick checks, and larger values should
-result in more rigorous runs. For example, `--max-examples 10000` may find bugs
-where default runs don't, but will take a much longer time.
+result in more rigorous runs. For example, `--max-examples 10_000` may find bugs
+where default runs don't but will take much longer to run.
-
## Contributing
@@ -200,7 +238,7 @@ many utilities that parralel NumPy's own test utils in the `*_helpers.py` files.
### Tools
-Hypothesis should always be used for the primary tests, and can be useful
+Hypothesis should almost always be used for the primary tests, and can be useful
elsewhere. Effort should be made so drawn arguments are labeled with their
respective names. For
[`st.data()`](https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.data),
@@ -231,6 +269,31 @@ where `path/to/array-api` is the path to a local clone of the [`array-api`
repo](https://github.com/data-apis/array-api/). Edit `generate_stubs.py` to make
changes to the generated files.
+
+### Release
+
+To make a release, first make an annotated tag with the version, e.g.:
+
+```
+git tag -a 2022.01.01
+```
+
+Be sure to use the calver version number for the tag name. Don't worry too much
+on the tag message, e.g. just write "2022.01.01".
+
+Versioneer will automatically set the version number of the `array_api_tests`
+package based on the git tag. Push the tag to GitHub:
+
+```
+git push --tags upstream 2022.1
+```
+
+Then go to the [tags page on
+GitHub](https://github.com/data-apis/array-api-tests/tags) and convert the tag
+into a release. If you want, you can add release notes, which GitHub can
+generate for you.
+
+
## Future plans
Keeping full coverage of the spec is an on-going priority as the Array API
@@ -250,3 +313,23 @@ come across.
for output values (particularly epsilons for floating-point outputs), so we
need to review these and either implement assertions or properly note the lack
thereof.
+
+---
+
+1The only exceptions to having just one primary test per function are:
+
+* [`asarray()`](https://data-apis.org/array-api/latest/API_specification/generated/signatures.creation_functions.asarray.html),
+ which is tested by `test_asarray_scalars` and `test_asarray_arrays` in
+ `test_creation_functions.py`. Testing `asarray()` works with scalars (and
+ nested sequences of scalars) is fundamental to testing that it works with
+ arrays, as said arrays can only be generated by passing scalar sequences to
+ `asarray()`.
+
+* Indexing methods
+ ([`__getitem__()`](https://data-apis.org/array-api/latest/API_specification/generated/signatures.array_object.array.__getitem__.html)
+ and
+ [`__setitem__()`](https://data-apis.org/array-api/latest/API_specification/generated/signatures.array_object.array.__setitem__.html)),
+ which respectively have both a test for non-array indices and a test for
+ boolean array indices. This is because [masking is
+ opt-in](https://data-apis.org/array-api/latest/API_specification/indexing.html#boolean-array-indexing)
+ (and boolean arrays need to be generated by indexing arrays anyway).
diff --git a/array_api_tests/hypothesis_helpers.py b/array_api_tests/hypothesis_helpers.py
index 38771225..7e436082 100644
--- a/array_api_tests/hypothesis_helpers.py
+++ b/array_api_tests/hypothesis_helpers.py
@@ -158,10 +158,21 @@ def matrix_shapes(draw, stack_shapes=shapes()):
square_matrix_shapes = matrix_shapes().filter(lambda shape: shape[-1] == shape[-2])
-finite_matrices = xps.arrays(dtype=xps.floating_dtypes(),
- shape=matrix_shapes(),
- elements=dict(allow_nan=False,
- allow_infinity=False))
+@composite
+def finite_matrices(draw, shape=matrix_shapes()):
+ return draw(xps.arrays(dtype=xps.floating_dtypes(),
+ shape=shape,
+ elements=dict(allow_nan=False,
+ allow_infinity=False)))
+
+rtol_shared_matrix_shapes = shared(matrix_shapes())
+# Should we set a max_value here?
+_rtol_float_kw = dict(allow_nan=False, allow_infinity=False, min_value=0)
+rtols = one_of(floats(**_rtol_float_kw),
+ xps.arrays(dtype=xps.floating_dtypes(),
+ shape=rtol_shared_matrix_shapes.map(lambda shape: shape[:-2]),
+ elements=_rtol_float_kw))
+
def mutually_broadcastable_shapes(
num_shapes: int,
diff --git a/array_api_tests/meta/test_pytest_helpers.py b/array_api_tests/meta/test_pytest_helpers.py
index 21da2264..117e2b11 100644
--- a/array_api_tests/meta/test_pytest_helpers.py
+++ b/array_api_tests/meta/test_pytest_helpers.py
@@ -1,7 +1,7 @@
from pytest import raises
-from .. import pytest_helpers as ph
from .. import _array_module as xp
+from .. import pytest_helpers as ph
def test_assert_dtype():
@@ -11,3 +11,12 @@ def test_assert_dtype():
ph.assert_dtype("bool_func", [xp.uint8, xp.int8], xp.bool, xp.bool)
ph.assert_dtype("single_promoted_func", [xp.uint8], xp.uint8)
ph.assert_dtype("single_bool_func", [xp.uint8], xp.bool, xp.bool)
+
+
+def test_assert_array():
+ ph.assert_array("int zeros", xp.asarray(0), xp.asarray(0))
+ ph.assert_array("pos zeros", xp.asarray(0.0), xp.asarray(0.0))
+ with raises(AssertionError):
+ ph.assert_array("mixed sign zeros", xp.asarray(0.0), xp.asarray(-0.0))
+ with raises(AssertionError):
+ ph.assert_array("mixed sign zeros", xp.asarray(-0.0), xp.asarray(0.0))
diff --git a/array_api_tests/pytest_helpers.py b/array_api_tests/pytest_helpers.py
index 9a5ffbb2..989b486f 100644
--- a/array_api_tests/pytest_helpers.py
+++ b/array_api_tests/pytest_helpers.py
@@ -14,6 +14,8 @@
"doesnt_raise",
"nargs",
"fmt_kw",
+ "is_pos_zero",
+ "is_neg_zero",
"assert_dtype",
"assert_kw_dtype",
"assert_default_float",
@@ -22,6 +24,7 @@
"assert_shape",
"assert_result_shape",
"assert_keepdimable_shape",
+ "assert_0d_equals",
"assert_fill",
"assert_array",
]
@@ -69,6 +72,14 @@ def fmt_kw(kw: Dict[str, Any]) -> str:
return ", ".join(f"{k}={v}" for k, v in kw.items())
+def is_pos_zero(n: float) -> bool:
+ return n == 0 and math.copysign(1, n) == 1
+
+
+def is_neg_zero(n: float) -> bool:
+ return n == 0 and math.copysign(1, n) == -1
+
+
def assert_dtype(
func_name: str,
in_dtype: Union[DataType, Sequence[DataType]],
@@ -232,15 +243,28 @@ def assert_fill(
def assert_array(func_name: str, out: Array, expected: Array, /, **kw):
assert_dtype(func_name, out.dtype, expected.dtype)
assert_shape(func_name, out.shape, expected.shape, **kw)
- msg = f"out not as expected [{func_name}({fmt_kw(kw)})]\n{out=}\n{expected=}"
+ f_func = f"[{func_name}({fmt_kw(kw)})]"
if dh.is_float_dtype(out.dtype):
- neg_zeros = expected == -0.0
- assert xp.all((out == -0.0) == neg_zeros), msg
- pos_zeros = expected == +0.0
- assert xp.all((out == +0.0) == pos_zeros), msg
- nans = xp.isnan(expected)
- assert xp.all(xp.isnan(out) == nans), msg
- mask = ~(neg_zeros | pos_zeros | nans)
- assert xp.all(out[mask] == expected[mask]), msg
+ for idx in sh.ndindex(out.shape):
+ at_out = out[idx]
+ at_expected = expected[idx]
+ msg = (
+ f"{sh.fmt_idx('out', idx)}={at_out}, should be {at_expected} "
+ f"{f_func}"
+ )
+ if xp.isnan(at_expected):
+ assert xp.isnan(at_out), msg
+ elif at_expected == 0.0 or at_expected == -0.0:
+ scalar_at_expected = float(at_expected)
+ scalar_at_out = float(at_out)
+ if is_pos_zero(scalar_at_expected):
+ assert is_pos_zero(scalar_at_out), msg
+ else:
+ assert is_neg_zero(scalar_at_expected) # sanity check
+ assert is_neg_zero(scalar_at_out), msg
+ else:
+ assert at_out == at_expected, msg
else:
- assert xp.all(out == expected), msg
+ assert xp.all(out == expected), (
+ f"out not as expected {f_func}\n" f"{out=}\n{expected=}"
+ )
diff --git a/array_api_tests/test_creation_functions.py b/array_api_tests/test_creation_functions.py
index a81339d0..583eda76 100644
--- a/array_api_tests/test_creation_functions.py
+++ b/array_api_tests/test_creation_functions.py
@@ -280,7 +280,7 @@ def test_asarray_arrays(x, data):
if copy:
assert not xp.all(
out == x
- ), "xp.all(out == x)=True, but should be False after x was mutated\n{out=}"
+ ), f"xp.all(out == x)=True, but should be False after x was mutated\n{out=}"
elif copy is False:
pass # TODO
diff --git a/array_api_tests/test_linalg.py b/array_api_tests/test_linalg.py
index 764d0df4..be4a22ca 100644
--- a/array_api_tests/test_linalg.py
+++ b/array_api_tests/test_linalg.py
@@ -16,7 +16,7 @@
import pytest
from hypothesis import assume, given
from hypothesis.strategies import (booleans, composite, none, tuples, integers,
- shared, sampled_from, data, just)
+ shared, sampled_from, one_of, data, just)
from ndindex import iter_indices
from .array_helpers import assert_exactly_equal, asarray
@@ -26,7 +26,8 @@
invertible_matrices, two_mutual_arrays,
mutually_promotable_dtypes, one_d_shapes,
two_mutually_broadcastable_shapes,
- SQRT_MAX_ARRAY_SIZE, finite_matrices)
+ SQRT_MAX_ARRAY_SIZE, finite_matrices,
+ rtol_shared_matrix_shapes, rtols)
from . import dtype_helpers as dh
from . import pytest_helpers as ph
from . import shape_helpers as sh
@@ -37,18 +38,17 @@
pytestmark = pytest.mark.ci
-
-
# Standin strategy for not yet implemented tests
todo = none()
-def _test_stacks(f, *args, res=None, dims=2, true_val=None, matrix_axes=(-2, -1),
+def _test_stacks(f, *args, res=None, dims=2, true_val=None,
+ matrix_axes=(-2, -1),
assert_equal=assert_exactly_equal, **kw):
"""
Test that f(*args, **kw) maps across stacks of matrices
- dims is the number of dimensions f(*args) should have for a single n x m
- matrix stack.
+ dims is the number of dimensions f(*args, *kw) should have for a single n
+ x m matrix stack.
matrix_axes are the axes along which matrices (or vectors) are stacked in
the input.
@@ -65,9 +65,13 @@ def _test_stacks(f, *args, res=None, dims=2, true_val=None, matrix_axes=(-2, -1)
shapes = [x.shape for x in args]
+ # Assume the result is stacked along the last 'dims' axes of matrix_axes.
+ # This holds for all the functions tested in this file
+ res_axes = matrix_axes[::-1][:dims]
+
for (x_idxes, (res_idx,)) in zip(
iter_indices(*shapes, skip_axes=matrix_axes),
- iter_indices(res.shape, skip_axes=tuple(range(-dims, 0)))):
+ iter_indices(res.shape, skip_axes=res_axes)):
x_idxes = [x_idx.raw for x_idx in x_idxes]
res_idx = res_idx.raw
@@ -159,26 +163,18 @@ def test_cross(x1_x2_kw):
assert res.dtype == dh.result_type(x1.dtype, x2.dtype), "cross() did not return the correct dtype"
assert res.shape == shape, "cross() did not return the correct shape"
- # cross is too different from other functions to use _test_stacks, and it
- # is the only function that works the way it does, so it's not really
- # worth generalizing _test_stacks to handle it.
- a = axis if axis >= 0 else axis + len(shape)
- for _idx in sh.ndindex(shape[:a] + shape[a+1:]):
- idx = _idx[:a] + (slice(None),) + _idx[a:]
- assert len(idx) == len(shape), "Invalid index. This indicates a bug in the test suite."
- res_stack = res[idx]
- x1_stack = x1[idx]
- x2_stack = x2[idx]
- assert x1_stack.shape == x2_stack.shape == (3,), "Invalid cross() stack shapes. This indicates a bug in the test suite."
- decomp_res_stack = linalg.cross(x1_stack, x2_stack)
- assert_exactly_equal(res_stack, decomp_res_stack)
-
- exact_cross = asarray([
- x1_stack[1]*x2_stack[2] - x1_stack[2]*x2_stack[1],
- x1_stack[2]*x2_stack[0] - x1_stack[0]*x2_stack[2],
- x1_stack[0]*x2_stack[1] - x1_stack[1]*x2_stack[0],
- ], dtype=res.dtype)
- assert_exactly_equal(res_stack, exact_cross)
+ def exact_cross(a, b):
+ assert a.shape == b.shape == (3,), "Invalid cross() stack shapes. This indicates a bug in the test suite."
+ return asarray([
+ a[1]*b[2] - a[2]*b[1],
+ a[2]*b[0] - a[0]*b[2],
+ a[0]*b[1] - a[1]*b[0],
+ ], dtype=res.dtype)
+
+ # We don't want to pass in **kw here because that would pass axis to
+ # cross() on a single stack, but the axis is not meaningful on unstacked
+ # vectors.
+ _test_stacks(linalg.cross, x1, x2, dims=1, matrix_axes=(axis,), res=res, true_val=exact_cross)
@pytest.mark.xp_extension('linalg')
@given(
@@ -313,14 +309,30 @@ def test_matmul(x1, x2):
assert res.shape == stack_shape + (x1.shape[-2], x2.shape[-1])
_test_stacks(_array_module.matmul, x1, x2, res=res)
+matrix_norm_shapes = shared(matrix_shapes())
+
@pytest.mark.xp_extension('linalg')
@given(
- x=xps.arrays(dtype=xps.floating_dtypes(), shape=shapes()),
- kw=kwargs(axis=todo, keepdims=todo, ord=todo)
+ x=finite_matrices(),
+ kw=kwargs(keepdims=booleans(),
+ ord=sampled_from([-float('inf'), -2, -2, 1, 2, float('inf'), 'fro', 'nuc']))
)
def test_matrix_norm(x, kw):
- # res = linalg.matrix_norm(x, **kw)
- pass
+ res = linalg.matrix_norm(x, **kw)
+
+ keepdims = kw.get('keepdims', False)
+ # TODO: Check that the ord values give the correct norms.
+ # ord = kw.get('ord', 'fro')
+
+ if keepdims:
+ expected_shape = x.shape[:-2] + (1, 1)
+ else:
+ expected_shape = x.shape[:-2]
+ assert res.shape == expected_shape, f"matrix_norm({keepdims=}) did not return the correct shape"
+ assert res.dtype == x.dtype, "matrix_norm() did not return the correct dtype"
+
+ _test_stacks(linalg.matrix_norm, x, **kw, dims=2 if keepdims else 0,
+ res=res)
matrix_power_n = shared(integers(-1000, 1000), key='matrix_power n')
@pytest.mark.xp_extension('linalg')
@@ -347,12 +359,11 @@ def test_matrix_power(x, n):
@pytest.mark.xp_extension('linalg')
@given(
- x=xps.arrays(dtype=xps.floating_dtypes(), shape=shapes()),
- kw=kwargs(rtol=todo)
+ x=finite_matrices(shape=rtol_shared_matrix_shapes),
+ kw=kwargs(rtol=rtols)
)
def test_matrix_rank(x, kw):
- # res = linalg.matrix_rank(x, **kw)
- pass
+ linalg.matrix_rank(x, **kw)
@given(
x=xps.arrays(dtype=dtypes, shape=matrix_shapes()),
@@ -397,12 +408,11 @@ def test_outer(x1, x2):
@pytest.mark.xp_extension('linalg')
@given(
- x=xps.arrays(dtype=xps.floating_dtypes(), shape=shapes()),
- kw=kwargs(rtol=todo)
+ x=finite_matrices(shape=rtol_shared_matrix_shapes),
+ kw=kwargs(rtol=rtols)
)
def test_pinv(x, kw):
- # res = linalg.pinv(x, **kw)
- pass
+ linalg.pinv(x, **kw)
@pytest.mark.xp_extension('linalg')
@given(
@@ -482,7 +492,7 @@ def solve_args():
Strategy for the x1 and x2 arguments to test_solve()
solve() takes x1, x2, where x1 is any stack of square invertible matrices
- of shape (..., M, M), and x2 is either shape (..., M) or (..., M, K),
+ of shape (..., M, M), and x2 is either shape (M,) or (..., M, K),
where the ... parts of x1 and x2 are broadcast compatible.
"""
stack_shapes = shared(two_mutually_broadcastable_shapes)
@@ -492,30 +502,22 @@ def solve_args():
pair[0])))
@composite
- def x2_shapes(draw):
- end = draw(xps.array_shapes(min_dims=0, max_dims=1, min_side=0,
- max_side=SQRT_MAX_ARRAY_SIZE))
- return draw(stack_shapes)[1] + draw(x1).shape[-1:] + end
+ def _x2_shapes(draw):
+ end = draw(integers(0, SQRT_MAX_ARRAY_SIZE))
+ return draw(stack_shapes)[1] + draw(x1).shape[-1:] + (end,)
- x2 = xps.arrays(dtype=xps.floating_dtypes(), shape=x2_shapes())
+ x2_shapes = one_of(x1.map(lambda x: (x.shape[-1],)), _x2_shapes())
+ x2 = xps.arrays(dtype=xps.floating_dtypes(), shape=x2_shapes)
return x1, x2
@pytest.mark.xp_extension('linalg')
@given(*solve_args())
def test_solve(x1, x2):
- # TODO: solve() is currently ambiguous, in that some inputs can be
- # interpreted in two different ways. For example, if x1 is shape (2, 2, 2)
- # and x2 is shape (2, 2), should this be interpreted as x2 is (2,) stack
- # of a (2,) vector, i.e., the result would be (2, 2, 2, 1) after
- # broadcasting, or as a single stack of a 2x2 matrix, i.e., resulting in
- # (2, 2, 2, 2).
-
- # res = linalg.solve(x1, x2)
- pass
+ linalg.solve(x1, x2)
@pytest.mark.xp_extension('linalg')
@given(
- x=finite_matrices,
+ x=finite_matrices(),
kw=kwargs(full_matrices=booleans())
)
def test_svd(x, kw):
@@ -551,7 +553,7 @@ def test_svd(x, kw):
@pytest.mark.xp_extension('linalg')
@given(
- x=finite_matrices,
+ x=finite_matrices(),
)
def test_svdvals(x):
res = linalg.svdvals(x)
diff --git a/array_api_tests/test_operators_and_elementwise_functions.py b/array_api_tests/test_operators_and_elementwise_functions.py
index 6947c061..2c9da2b9 100644
--- a/array_api_tests/test_operators_and_elementwise_functions.py
+++ b/array_api_tests/test_operators_and_elementwise_functions.py
@@ -1,7 +1,7 @@
import math
import operator
from enum import Enum, auto
-from typing import Callable, List, NamedTuple, Optional, TypeVar, Union
+from typing import Callable, List, NamedTuple, Optional, Sequence, TypeVar, Union
import pytest
from hypothesis import assume, given
@@ -37,7 +37,7 @@ class OnewayPromotableDtypes(NamedTuple):
@st.composite
def oneway_promotable_dtypes(
- draw, dtypes: List[DataType]
+ draw, dtypes: Sequence[DataType]
) -> st.SearchStrategy[OnewayPromotableDtypes]:
"""Return a strategy for input dtypes that promote to result dtypes."""
d1, d2 = draw(hh.mutually_promotable_dtypes(dtypes=dtypes))
@@ -123,7 +123,10 @@ def default_filter(s: Scalar) -> bool:
Used by default as these values are typically special-cased.
"""
- return math.isfinite(s) and s is not -0.0 and s is not +0.0
+ if isinstance(s, int): # note bools are ints
+ return True
+ else:
+ return math.isfinite(s) and s != 0
T = TypeVar("T")
@@ -346,7 +349,7 @@ def __repr__(self):
def make_binary_params(
- elwise_func_name: str, dtypes: List[DataType]
+ elwise_func_name: str, dtypes: Sequence[DataType]
) -> List[Param[BinaryParamContext]]:
if hh.FILTER_UNDEFINED_DTYPES:
dtypes = [d for d in dtypes if not isinstance(d, xp._UndefinedStub)]
@@ -538,7 +541,7 @@ def test_abs(ctx, data):
abs, # type: ignore
expr_template="abs({})={}",
filter_=lambda s: (
- s == float("infinity") or (math.isfinite(s) and s is not -0.0)
+ s == float("infinity") or (math.isfinite(s) and not ph.is_neg_zero(s))
),
)
@@ -1143,9 +1146,7 @@ def test_pow(ctx, data):
binary_param_assert_dtype(ctx, left, right, res)
binary_param_assert_shape(ctx, left, right, res)
- binary_param_assert_against_refimpl(
- ctx, left, right, res, "**", math.pow, strict_check=False
- )
+ # Values testing pow is too finicky
@pytest.mark.parametrize("ctx", make_binary_params("remainder", dh.numeric_dtypes))
diff --git a/array_api_tests/test_searching_functions.py b/array_api_tests/test_searching_functions.py
index 01c26d0c..e679db73 100644
--- a/array_api_tests/test_searching_functions.py
+++ b/array_api_tests/test_searching_functions.py
@@ -15,7 +15,7 @@
@given(
x=xps.arrays(
dtype=xps.numeric_dtypes(),
- shape=hh.shapes(min_side=1),
+ shape=hh.shapes(min_dims=1, min_side=1),
elements={"allow_nan": False},
),
data=st.data(),
@@ -50,7 +50,7 @@ def test_argmax(x, data):
@given(
x=xps.arrays(
dtype=xps.numeric_dtypes(),
- shape=hh.shapes(min_side=1),
+ shape=hh.shapes(min_dims=1, min_side=1),
elements={"allow_nan": False},
),
data=st.data(),
@@ -82,7 +82,7 @@ def test_argmin(x, data):
ph.assert_scalar_equals("argmin", int, out_idx, min_i, expected)
-# TODO: skip if opted out
+@pytest.mark.data_dependent_shapes
@given(xps.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1)))
def test_nonzero(x):
out = xp.nonzero(x)
diff --git a/array_api_tests/test_set_functions.py b/array_api_tests/test_set_functions.py
index 5ceceb54..5bae6147 100644
--- a/array_api_tests/test_set_functions.py
+++ b/array_api_tests/test_set_functions.py
@@ -12,7 +12,7 @@
from . import shape_helpers as sh
from . import xps
-pytestmark = pytest.mark.ci
+pytestmark = [pytest.mark.ci, pytest.mark.data_dependent_shapes]
@given(xps.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1)))
diff --git a/conftest.py b/conftest.py
index 2af3fef1..9fec536b 100644
--- a/conftest.py
+++ b/conftest.py
@@ -35,6 +35,13 @@ def pytest_addoption(parser):
default=[],
help="disable testing for Array API extension(s)",
)
+ # data-dependent shape
+ parser.addoption(
+ "--disable-data-dependent-shapes",
+ "--disable-dds",
+ action="store_true",
+ help="disable testing functions with output shapes dependent on input",
+ )
# CI
parser.addoption(
"--ci",
@@ -47,6 +54,9 @@ def pytest_configure(config):
config.addinivalue_line(
"markers", "xp_extension(ext): tests an Array API extension"
)
+ config.addinivalue_line(
+ "markers", "data_dependent_shapes: output shapes are dependent on inputs"
+ )
config.addinivalue_line("markers", "ci: primary test")
# Hypothesis
hypothesis_max_examples = config.getoption("--hypothesis-max-examples")
@@ -83,9 +93,15 @@ def xp_has_ext(ext: str) -> bool:
def pytest_collection_modifyitems(config, items):
disabled_exts = config.getoption("--disable-extension")
+ disabled_dds = config.getoption("--disable-data-dependent-shapes")
ci = config.getoption("--ci")
for item in items:
markers = list(item.iter_markers())
+ # skip if specified in skips.txt
+ for id_ in skip_ids:
+ if item.nodeid.startswith(id_):
+ item.add_marker(mark.skip(reason="skips.txt"))
+ break
# skip if disabled or non-existent extension
ext_mark = next((m for m in markers if m.name == "xp_extension"), None)
if ext_mark is not None:
@@ -96,11 +112,14 @@ def pytest_collection_modifyitems(config, items):
)
elif not xp_has_ext(ext):
item.add_marker(mark.skip(reason=f"{ext} not found in array module"))
- # skip if specified in skips.txt
- for id_ in skip_ids:
- if item.nodeid.startswith(id_):
- item.add_marker(mark.skip(reason="skips.txt"))
- break
+ # skip if disabled by dds flag
+ if disabled_dds:
+ for m in markers:
+ if m.name == "data_dependent_shapes":
+ item.add_marker(
+ mark.skip(reason="disabled via --disable-data-dependent-shapes")
+ )
+ break
# skip if test not appropiate for CI
if ci:
ci_mark = next((m for m in markers if m.name == "ci"), None)