Skip to content

Fixes asarray for the Array API #25

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 32 additions & 6 deletions _unittests/ut_array_api/test_hypothesis_array_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import warnings
from os import getenv
from functools import reduce
import numpy as np
from operator import mul
from hypothesis import given
from onnx_array_api.ext_test_case import ExtTestCase
Expand Down Expand Up @@ -89,24 +90,49 @@ def test_scalar_strategies(self):

args_np = []

xx = self.xps.arrays(dtype=dtypes["integer_dtypes"], shape=shapes(self.xps))
kws = array_api_kwargs(dtype=strategies.none() | self.xps.scalar_dtypes())

@given(
x=self.xps.arrays(dtype=dtypes["integer_dtypes"], shape=shapes(self.xps)),
kw=array_api_kwargs(dtype=strategies.none() | self.xps.scalar_dtypes()),
x=xx,
kw=kws,
)
def fct(x, kw):
def fctnp(x, kw):
asa1 = np.asarray(x)
asa2 = np.asarray(x, **kw)
self.assertEqual(asa1.shape, asa2.shape)
args_np.append((x, kw))

fct()
fctnp()
self.assertEqual(len(args_np), 100)

args_onxp = []

xshape = shapes(self.onxps)
xx = self.onxps.arrays(dtype=dtypes_onnx["integer_dtypes"], shape=xshape)
kw = array_api_kwargs(dtype=strategies.none() | self.onxps.scalar_dtypes())
kws = array_api_kwargs(dtype=strategies.none() | self.onxps.scalar_dtypes())

@given(x=xx, kw=kw)
@given(x=xx, kw=kws)
def fctonx(x, kw):
asa = np.asarray(x.numpy())
try:
asp = onxp.asarray(x)
except Exception as e:
raise AssertionError(f"asarray fails with x={x!r}, asp={asa!r}.") from e
try:
self.assertEqualArray(asa, asp.numpy())
except AssertionError as e:
raise AssertionError(
f"x={x!r} kw={kw!r} asa={asa!r}, asp={asp!r}"
) from e
if kw:
try:
asp2 = onxp.asarray(x, **kw)
except Exception as e:
raise AssertionError(
f"asarray fails with x={x!r}, kw={kw!r}, asp={asa!r}."
) from e
self.assertEqual(asp.shape, asp2.shape)
args_onxp.append((x, kw))

fctonx()
Expand Down
9 changes: 9 additions & 0 deletions onnx_array_api/array_api/_onnx_common.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
from typing import Any, Optional
import warnings
import numpy as np

with warnings.catch_warnings():
warnings.simplefilter("ignore")
from numpy.array_api._array_object import Array
from ..npx.npx_types import (
DType,
ElemType,
Expand Down Expand Up @@ -77,6 +82,10 @@ def asarray(
v = TEagerTensor(np.array(a, dtype=np.str_))
elif isinstance(a, list):
v = TEagerTensor(np.array(a))
elif isinstance(a, np.ndarray):
v = TEagerTensor(a)
elif isinstance(a, Array):
v = TEagerTensor(np.asarray(a))
else:
raise RuntimeError(f"Unexpected type {type(a)} for the first input.")
if dtype is not None:
Expand Down
29 changes: 24 additions & 5 deletions onnx_array_api/npx/npx_numpy_tensors.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from typing import Any, Callable, List, Optional, Tuple
import numpy as np
from onnx import ModelProto, TensorProto
Expand Down Expand Up @@ -221,13 +222,18 @@ def __bool__(self):
if self.shape == (0,):
return False
if len(self.shape) != 0:
raise ValueError(
f"Conversion to bool only works for scalar, not for {self!r}."
warnings.warn(
f"Conversion to bool only works for scalar, not for {self!r}, "
f"bool(...)={bool(self._tensor)}."
)
try:
return bool(self._tensor)
except ValueError as e:
raise ValueError(f"Unable to convert {self} to bool.") from e
return bool(self._tensor)

def __int__(self):
"Implicit conversion to bool."
"Implicit conversion to int."
if len(self.shape) != 0:
raise ValueError(
f"Conversion to bool only works for scalar, not for {self!r}."
Expand All @@ -249,7 +255,7 @@ def __int__(self):
return int(self._tensor)

def __float__(self):
"Implicit conversion to bool."
"Implicit conversion to float."
if len(self.shape) != 0:
raise ValueError(
f"Conversion to bool only works for scalar, not for {self!r}."
Expand All @@ -261,11 +267,24 @@ def __float__(self):
DType(TensorProto.BFLOAT16),
}:
raise TypeError(
f"Conversion to int only works for float scalar, "
f"Conversion to float only works for float scalar, "
f"not for dtype={self.dtype}."
)
return float(self._tensor)

def __iter__(self):
"""
The :epkg:`Array API` does not define this function (2022/12).
This method raises an exception with a better error message.
"""
warnings.warn(
f"Iterators are not implemented in the generic case. "
f"Every function using them cannot be converted into ONNX "
f"(tensors - {type(self)})."
)
for row in self._tensor:
yield self.__class__(row)


class JitNumpyTensor(NumpyTensor, JitTensor):
"""
Expand Down
5 changes: 3 additions & 2 deletions onnx_array_api/npx/npx_tensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,9 @@ def __iter__(self):
This method raises an exception with a better error message.
"""
raise ArrayApiError(
"Iterators are not implemented in the generic case. "
"Every function using them cannot be converted into ONNX."
f"Iterators are not implemented in the generic case. "
f"Every function using them cannot be converted into ONNX "
f"(tensors - {type(self)})."
)

@staticmethod
Expand Down
6 changes: 5 additions & 1 deletion onnx_array_api/npx/npx_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,16 @@ def __eq__(self, dt: "DType") -> bool:
return False
if dt.__class__ is DType:
return self.code_ == dt.code_
if isinstance(dt, (int, bool, str)):
if isinstance(dt, (int, bool, str, float)):
return False
if dt is int:
return self.code_ == TensorProto.INT64
if dt is str:
return self.code_ == TensorProto.STRING
if dt is bool:
return self.code_ == TensorProto.BOOL
if dt is float:
return self.code_ == TensorProto.FLOAT64
if isinstance(dt, list):
return False
if dt in ElemType.numpy_map:
Expand Down
5 changes: 3 additions & 2 deletions onnx_array_api/npx/npx_var.py
Original file line number Diff line number Diff line change
Expand Up @@ -607,8 +607,9 @@ def __iter__(self):
This method raises an exception with a better error message.
"""
raise ArrayApiError(
"Iterators are not implemented in the generic case. "
"Every function using them cannot be converted into ONNX."
f"Iterators are not implemented in the generic case. "
f"Every function using them cannot be converted into ONNX "
f"(Var - {type(self)})."
)

def _binary_op(self, ov: "Var", op_name: str, **kwargs) -> "Var":
Expand Down
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ black
coverage
flake8
furo
hypothesis<6.80.0
hypothesis
isort
joblib
lightgbm
Expand Down