diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..e354eaae --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +select = F diff --git a/README.md b/README.md index acbbb291..928a6771 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ This is the test suite for array libraries adopting the [Python Array API standard](https://data-apis.org/array-api/latest). -Keeping full coverage of the spec is an on-going priority as the Array API evolves. +Keeping full coverage of the spec is an on-going priority as the Array API evolves. Feedback and contributions are welcome! ## Quickstart @@ -285,6 +285,19 @@ values should result in more rigorous runs. For example, `--max-examples 10_000` may find bugs where default runs don't but will take much longer to run. +#### Skipping Dtypes + +The test suite will automatically skip testing of inessential dtypes if they +are not present on the array module namespace, but dtypes can also be skipped +manually by setting the environment variable `ARRAY_API_TESTS_SKIP_DTYPES` to +a comma separated list of dtypes to skip. For example + +``` +ARRAY_API_TESTS_SKIP_DTYPES=uint16,uint32,uint64 pytest array_api_tests/ +``` + +Note that skipping certain essential dtypes such as `bool` and the default +floating-point dtype is not supported. ## Contributing diff --git a/array_api_tests/dtype_helpers.py b/array_api_tests/dtype_helpers.py index 59edfe86..9ef50705 100644 --- a/array_api_tests/dtype_helpers.py +++ b/array_api_tests/dtype_helpers.py @@ -1,3 +1,4 @@ +import os import re from collections import defaultdict from collections.abc import Mapping @@ -104,9 +105,18 @@ def __repr__(self): numeric_names = real_names + complex_names dtype_names = ("bool",) + numeric_names +_skip_dtypes = os.getenv("ARRAY_API_TESTS_SKIP_DTYPES", '') +_skip_dtypes = _skip_dtypes.split(',') +skip_dtypes = [] +for dtype in _skip_dtypes: + if dtype and dtype not in dtype_names: + raise ValueError(f"Invalid dtype name in ARRAY_API_TESTS_SKIP_DTYPES: {dtype}") + skip_dtypes.append(dtype) _name_to_dtype = {} for name in dtype_names: + if name in skip_dtypes: + continue try: dtype = getattr(xp, name) except AttributeError: @@ -184,9 +194,9 @@ def _make_dtype_mapping_from_names(mapping: Dict[str, Any]) -> EqualityMapping: dtype_value_pairs = [] for name, value in mapping.items(): assert isinstance(name, str) and name in dtype_names # sanity check - try: - dtype = getattr(xp, name) - except AttributeError: + if name in _name_to_dtype: + dtype = _name_to_dtype[name] + else: continue dtype_value_pairs.append((dtype, value)) return EqualityMapping(dtype_value_pairs) @@ -313,9 +323,9 @@ def accumulation_result_dtype(x_dtype, dtype_kwarg): else: default_complex = None if dtype_nbits[default_int] == 32: - default_uint = getattr(xp, "uint32", None) + default_uint = _name_to_dtype.get("uint32") else: - default_uint = getattr(xp, "uint64", None) + default_uint = _name_to_dtype.get("uint64") _promotion_table: Dict[Tuple[str, str], str] = { ("bool", "bool"): "bool", @@ -366,18 +376,12 @@ def accumulation_result_dtype(x_dtype, dtype_kwarg): _promotion_table.update({(d2, d1): res for (d1, d2), res in _promotion_table.items()}) _promotion_table_pairs: List[Tuple[Tuple[DataType, DataType], DataType]] = [] for (in_name1, in_name2), res_name in _promotion_table.items(): - try: - in_dtype1 = getattr(xp, in_name1) - except AttributeError: - continue - try: - in_dtype2 = getattr(xp, in_name2) - except AttributeError: - continue - try: - res_dtype = getattr(xp, res_name) - except AttributeError: + if in_name1 not in _name_to_dtype or in_name2 not in _name_to_dtype or res_name not in _name_to_dtype: continue + in_dtype1 = _name_to_dtype[in_name1] + in_dtype2 = _name_to_dtype[in_name2] + res_dtype = _name_to_dtype[res_name] + _promotion_table_pairs.append(((in_dtype1, in_dtype2), res_dtype)) promotion_table = EqualityMapping(_promotion_table_pairs) diff --git a/array_api_tests/hypothesis_helpers.py b/array_api_tests/hypothesis_helpers.py index 60c683c4..7377274b 100644 --- a/array_api_tests/hypothesis_helpers.py +++ b/array_api_tests/hypothesis_helpers.py @@ -174,10 +174,24 @@ def oneway_broadcastable_shapes(draw) -> OnewayBroadcastableShapes: return OnewayBroadcastableShapes(input_shape, result_shape) +# Use these instead of xps.scalar_dtypes, etc. because it skips dtypes from +# ARRAY_API_TESTS_SKIP_DTYPES +all_dtypes = sampled_from(_sorted_dtypes) +int_dtypes = sampled_from(dh.int_dtypes) +uint_dtypes = sampled_from(dh.uint_dtypes) +real_dtypes = sampled_from(dh.real_dtypes) +# Warning: The hypothesis "floating_dtypes" is what we call +# "real_floating_dtypes" +floating_dtypes = sampled_from(dh.all_float_dtypes) +real_floating_dtypes = sampled_from(dh.real_float_dtypes) +numeric_dtypes = sampled_from(dh.numeric_dtypes) +# Note: this always returns complex dtypes, even if api_version < 2022.12 +complex_dtypes = sampled_from(dh.complex_dtypes) + def all_floating_dtypes() -> SearchStrategy[DataType]: - strat = xps.floating_dtypes() + strat = floating_dtypes if api_version >= "2022.12": - strat |= xps.complex_dtypes() + strat |= complex_dtypes return strat @@ -236,7 +250,7 @@ def matrix_shapes(draw, stack_shapes=shapes()): @composite def finite_matrices(draw, shape=matrix_shapes()): - return draw(arrays(dtype=xps.floating_dtypes(), + return draw(arrays(dtype=floating_dtypes, shape=shape, elements=dict(allow_nan=False, allow_infinity=False))) @@ -245,7 +259,7 @@ def finite_matrices(draw, shape=matrix_shapes()): # Should we set a max_value here? _rtol_float_kw = dict(allow_nan=False, allow_infinity=False, min_value=0) rtols = one_of(floats(**_rtol_float_kw), - arrays(dtype=xps.floating_dtypes(), + arrays(dtype=real_floating_dtypes, shape=rtol_shared_matrix_shapes.map(lambda shape: shape[:-2]), elements=_rtol_float_kw)) @@ -280,9 +294,9 @@ def mutually_broadcastable_shapes( two_mutually_broadcastable_shapes = mutually_broadcastable_shapes(2) -# Note: This should become hermitian_matrices when complex dtypes are added +# TODO: Add support for complex Hermitian matrices @composite -def symmetric_matrices(draw, dtypes=xps.floating_dtypes(), finite=True, bound=10.): +def symmetric_matrices(draw, dtypes=real_floating_dtypes, finite=True, bound=10.): shape = draw(square_matrix_shapes) dtype = draw(dtypes) if not isinstance(finite, bool): @@ -297,7 +311,7 @@ def symmetric_matrices(draw, dtypes=xps.floating_dtypes(), finite=True, bound=10 return H @composite -def positive_definite_matrices(draw, dtypes=xps.floating_dtypes()): +def positive_definite_matrices(draw, dtypes=floating_dtypes): # For now just generate stacks of identity matrices # TODO: Generate arbitrary positive definite matrices, for instance, by # using something like @@ -310,7 +324,7 @@ def positive_definite_matrices(draw, dtypes=xps.floating_dtypes()): return broadcast_to(eye(n, dtype=dtype), shape) @composite -def invertible_matrices(draw, dtypes=xps.floating_dtypes(), stack_shapes=shapes()): +def invertible_matrices(draw, dtypes=floating_dtypes, stack_shapes=shapes()): # For now, just generate stacks of diagonal matrices. stack_shape = draw(stack_shapes) n = draw(integers(0, SQRT_MAX_ARRAY_SIZE // max(math.prod(stack_shape), 1)),) @@ -344,7 +358,7 @@ def two_broadcastable_shapes(draw): sqrt_sizes = integers(0, SQRT_MAX_ARRAY_SIZE) numeric_arrays = arrays( - dtype=shared(xps.floating_dtypes(), key='dtypes'), + dtype=shared(floating_dtypes, key='dtypes'), shape=shared(xps.array_shapes(), key='shapes'), ) @@ -388,7 +402,7 @@ def python_integer_indices(draw, sizes): def integer_indices(draw, sizes): # Return either a Python integer or a 0-D array with some integer dtype idx = draw(python_integer_indices(sizes)) - dtype = draw(xps.integer_dtypes() | xps.unsigned_integer_dtypes()) + dtype = draw(int_dtypes | uint_dtypes) m, M = dh.dtype_ranges[dtype] if m <= idx <= M: return draw(one_of(just(idx), diff --git a/array_api_tests/pytest_helpers.py b/array_api_tests/pytest_helpers.py index 9759822e..15cd8093 100644 --- a/array_api_tests/pytest_helpers.py +++ b/array_api_tests/pytest_helpers.py @@ -137,6 +137,34 @@ def assert_dtype( assert out_dtype == expected, msg +def assert_float_to_complex_dtype( + func_name: str, *, in_dtype: DataType, out_dtype: DataType +): + if in_dtype == xp.float32: + expected = xp.complex64 + else: + assert in_dtype == xp.float64 # sanity check + expected = xp.complex128 + assert_dtype( + func_name, in_dtype=in_dtype, out_dtype=out_dtype, expected=expected + ) + + +def assert_complex_to_float_dtype( + func_name: str, *, in_dtype: DataType, out_dtype: DataType, repr_name: str = "out.dtype" +): + if in_dtype == xp.complex64: + expected = xp.float32 + elif in_dtype == xp.complex128: + expected = xp.float64 + else: + assert in_dtype in (xp.float32, xp.float64) # sanity check + expected = in_dtype + assert_dtype( + func_name, in_dtype=in_dtype, out_dtype=out_dtype, expected=expected, repr_name=repr_name + ) + + def assert_kw_dtype( func_name: str, *, diff --git a/array_api_tests/test_array_object.py b/array_api_tests/test_array_object.py index bc3e7276..a0ee82f6 100644 --- a/array_api_tests/test_array_object.py +++ b/array_api_tests/test_array_object.py @@ -13,7 +13,6 @@ from . import pytest_helpers as ph from . import shape_helpers as sh from . import xps -from . import xp as _xp from .typing import DataType, Index, Param, Scalar, ScalarType, Shape @@ -75,7 +74,7 @@ def get_indexed_axes_and_out_shape( return tuple(axes_indices), tuple(out_shape) -@given(shape=hh.shapes(), dtype=xps.scalar_dtypes(), data=st.data()) +@given(shape=hh.shapes(), dtype=hh.all_dtypes, data=st.data()) def test_getitem(shape, dtype, data): zero_sided = any(side == 0 for side in shape) if zero_sided: @@ -157,7 +156,7 @@ def test_setitem(shape, dtypes, data): @pytest.mark.data_dependent_shapes @given(hh.shapes(), st.data()) def test_getitem_masking(shape, data): - x = data.draw(hh.arrays(xps.scalar_dtypes(), shape=shape), label="x") + x = data.draw(hh.arrays(hh.all_dtypes, shape=shape), label="x") mask_shapes = st.one_of( st.sampled_from([x.shape, ()]), st.lists(st.booleans(), min_size=x.ndim, max_size=x.ndim).map( @@ -202,7 +201,7 @@ def test_getitem_masking(shape, data): @pytest.mark.unvectorized @given(hh.shapes(), st.data()) def test_setitem_masking(shape, data): - x = data.draw(hh.arrays(xps.scalar_dtypes(), shape=shape), label="x") + x = data.draw(hh.arrays(hh.all_dtypes, shape=shape), label="x") key = data.draw(hh.arrays(dtype=xp.bool, shape=shape), label="key") value = data.draw( hh.from_dtype(x.dtype) | hh.arrays(dtype=x.dtype, shape=()), label="value" @@ -252,18 +251,14 @@ def make_scalar_casting_param( @pytest.mark.parametrize( - "method_name, dtype_name, stype", - [make_scalar_casting_param("__bool__", "bool", bool)] - + [make_scalar_casting_param("__int__", n, int) for n in dh.all_int_names] - + [make_scalar_casting_param("__index__", n, int) for n in dh.all_int_names] - + [make_scalar_casting_param("__float__", n, float) for n in dh.real_float_names], + "method_name, dtype, stype", + [make_scalar_casting_param("__bool__", xp.bool, bool)] + + [make_scalar_casting_param("__int__", n, int) for n in dh.all_int_dtypes] + + [make_scalar_casting_param("__index__", n, int) for n in dh.all_int_dtypes] + + [make_scalar_casting_param("__float__", n, float) for n in dh.real_float_dtypes], ) @given(data=st.data()) -def test_scalar_casting(method_name, dtype_name, stype, data): - try: - dtype = getattr(_xp, dtype_name) - except AttributeError as e: - pytest.skip(str(e)) +def test_scalar_casting(method_name, dtype, stype, data): x = data.draw(hh.arrays(dtype, shape=()), label="x") method = getattr(x, method_name) out = method() diff --git a/array_api_tests/test_creation_functions.py b/array_api_tests/test_creation_functions.py index 6bee0533..4975a5f9 100644 --- a/array_api_tests/test_creation_functions.py +++ b/array_api_tests/test_creation_functions.py @@ -77,7 +77,7 @@ def reals(min_value=None, max_value=None) -> st.SearchStrategy[Union[int, float] # TODO: support testing complex dtypes -@given(dtype=st.none() | xps.real_dtypes(), data=st.data()) +@given(dtype=st.none() | hh.real_dtypes, data=st.data()) def test_arange(dtype, data): if dtype is None or dh.is_float_dtype(dtype): start = data.draw(reals(), label="start") @@ -198,7 +198,7 @@ def test_arange(dtype, data): @given(shape=hh.shapes(min_side=1), data=st.data()) def test_asarray_scalars(shape, data): kw = data.draw( - hh.kwargs(dtype=st.none() | xps.scalar_dtypes(), copy=st.none()), label="kw" + hh.kwargs(dtype=st.none() | hh.all_dtypes, copy=st.none()), label="kw" ) dtype = kw.get("dtype", None) if dtype is None: @@ -312,7 +312,7 @@ def test_asarray_arrays(shape, dtypes, data): ), f"{f_out}, but should be {value} after x was mutated" -@given(hh.shapes(), hh.kwargs(dtype=st.none() | xps.scalar_dtypes())) +@given(hh.shapes(), hh.kwargs(dtype=st.none() | hh.all_dtypes)) def test_empty(shape, kw): out = xp.empty(shape, **kw) if kw.get("dtype", None) is None: @@ -323,8 +323,8 @@ def test_empty(shape, kw): @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes()), - kw=hh.kwargs(dtype=st.none() | xps.scalar_dtypes()), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes()), + kw=hh.kwargs(dtype=st.none() | hh.all_dtypes), ) def test_empty_like(x, kw): out = xp.empty_like(x, **kw) @@ -340,7 +340,7 @@ def test_empty_like(x, kw): n_cols=st.none() | hh.sqrt_sizes, kw=hh.kwargs( k=st.integers(), - dtype=xps.numeric_dtypes(), + dtype=hh.numeric_dtypes, ), ) def test_eye(n_rows, n_cols, kw): @@ -368,7 +368,7 @@ def test_eye(n_rows, n_cols, kw): default_unsafe_dtypes.append(xp.float64) if dh.default_complex == xp.complex64: default_unsafe_dtypes.append(xp.complex64) -default_safe_dtypes: st.SearchStrategy = xps.scalar_dtypes().filter( +default_safe_dtypes: st.SearchStrategy = hh.all_dtypes.filter( lambda d: d not in default_unsafe_dtypes ) @@ -376,7 +376,7 @@ def test_eye(n_rows, n_cols, kw): @st.composite def full_fill_values(draw) -> Union[bool, int, float, complex]: kw = draw( - st.shared(hh.kwargs(dtype=st.none() | xps.scalar_dtypes()), key="full_kw") + st.shared(hh.kwargs(dtype=st.none() | hh.all_dtypes), key="full_kw") ) dtype = kw.get("dtype", None) or draw(default_safe_dtypes) return draw(hh.from_dtype(dtype)) @@ -385,7 +385,7 @@ def full_fill_values(draw) -> Union[bool, int, float, complex]: @given( shape=hh.shapes(), fill_value=full_fill_values(), - kw=st.shared(hh.kwargs(dtype=st.none() | xps.scalar_dtypes()), key="full_kw"), + kw=st.shared(hh.kwargs(dtype=st.none() | hh.all_dtypes), key="full_kw"), ) def test_full(shape, fill_value, kw): with hh.reject_overflow(): @@ -424,9 +424,9 @@ def test_full(shape, fill_value, kw): ph.assert_fill("full", fill_value=fill_value, dtype=dtype, out=out, kw=dict(fill_value=fill_value)) -@given(kw=hh.kwargs(dtype=st.none() | xps.scalar_dtypes()), data=st.data()) +@given(kw=hh.kwargs(dtype=st.none() | hh.all_dtypes), data=st.data()) def test_full_like(kw, data): - dtype = kw.get("dtype", None) or data.draw(xps.scalar_dtypes(), label="dtype") + dtype = kw.get("dtype", None) or data.draw(hh.all_dtypes, label="dtype") x = data.draw(hh.arrays(dtype=dtype, shape=hh.shapes()), label="x") fill_value = data.draw(hh.from_dtype(dtype), label="fill_value") out = xp.full_like(x, fill_value, **kw) @@ -444,7 +444,7 @@ def test_full_like(kw, data): @given( num=hh.sizes, - dtype=st.none() | xps.floating_dtypes(), + dtype=st.none() | hh.real_floating_dtypes, endpoint=st.booleans(), data=st.data(), ) @@ -492,7 +492,7 @@ def test_linspace(num, dtype, endpoint, data): ph.assert_array_elements("linspace", out=out, expected=expected) -@given(dtype=xps.numeric_dtypes(), data=st.data()) +@given(dtype=hh.numeric_dtypes, data=st.data()) def test_meshgrid(dtype, data): # The number and size of generated arrays is arbitrarily limited to prevent # meshgrid() running out of memory. @@ -524,7 +524,7 @@ def make_one(dtype: DataType) -> Scalar: return True -@given(hh.shapes(), hh.kwargs(dtype=st.none() | xps.scalar_dtypes())) +@given(hh.shapes(), hh.kwargs(dtype=st.none() | hh.all_dtypes)) def test_ones(shape, kw): out = xp.ones(shape, **kw) if kw.get("dtype", None) is None: @@ -538,8 +538,8 @@ def test_ones(shape, kw): @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes()), - kw=hh.kwargs(dtype=st.none() | xps.scalar_dtypes()), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes()), + kw=hh.kwargs(dtype=st.none() | hh.all_dtypes), ) def test_ones_like(x, kw): out = xp.ones_like(x, **kw) @@ -562,7 +562,7 @@ def make_zero(dtype: DataType) -> Scalar: return False -@given(hh.shapes(), hh.kwargs(dtype=st.none() | xps.scalar_dtypes())) +@given(hh.shapes(), hh.kwargs(dtype=st.none() | hh.all_dtypes)) def test_zeros(shape, kw): out = xp.zeros(shape, **kw) if kw.get("dtype", None) is None: @@ -576,8 +576,8 @@ def test_zeros(shape, kw): @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes()), - kw=hh.kwargs(dtype=st.none() | xps.scalar_dtypes()), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes()), + kw=hh.kwargs(dtype=st.none() | hh.all_dtypes), ) def test_zeros_like(x, kw): out = xp.zeros_like(x, **kw) diff --git a/array_api_tests/test_data_type_functions.py b/array_api_tests/test_data_type_functions.py index 1fa8c3b6..34c40024 100644 --- a/array_api_tests/test_data_type_functions.py +++ b/array_api_tests/test_data_type_functions.py @@ -11,13 +11,12 @@ from . import pytest_helpers as ph from . import shape_helpers as sh from . import xps -from . import xp as _xp from .typing import DataType # TODO: test with complex dtypes def non_complex_dtypes(): - return xps.boolean_dtypes() | xps.real_dtypes() + return xps.boolean_dtypes() | hh.real_dtypes def float32(n: Union[int, float]) -> float: @@ -69,7 +68,7 @@ def test_astype(x_dtype, dtype, kw, data): def test_broadcast_arrays(shapes, data): arrays = [] for c, shape in enumerate(shapes, 1): - x = data.draw(hh.arrays(dtype=xps.scalar_dtypes(), shape=shape), label=f"x{c}") + x = data.draw(hh.arrays(dtype=hh.all_dtypes, shape=shape), label=f"x{c}") arrays.append(x) out = xp.broadcast_arrays(*arrays) @@ -92,7 +91,7 @@ def test_broadcast_arrays(shapes, data): # TODO: test values -@given(x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes()), data=st.data()) +@given(x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes()), data=st.data()) def test_broadcast_to(x, data): shape = data.draw( hh.mutually_broadcastable_shapes(1, base_shape=x.shape) @@ -140,12 +139,8 @@ def test_can_cast(_from, to, data): assert out == expected, f"{out=}, but should be {expected} {f_func}" -@pytest.mark.parametrize("dtype_name", dh.real_float_names) -def test_finfo(dtype_name): - try: - dtype = getattr(_xp, dtype_name) - except AttributeError as e: - pytest.skip(str(e)) +@pytest.mark.parametrize("dtype", dh.real_float_dtypes) +def test_finfo(dtype): out = xp.finfo(dtype) f_func = f"[finfo({dh.dtype_to_name[dtype]})]" for attr, stype in [ @@ -164,12 +159,8 @@ def test_finfo(dtype_name): # TODO: test values -@pytest.mark.parametrize("dtype_name", dh.all_int_names) -def test_iinfo(dtype_name): - try: - dtype = getattr(_xp, dtype_name) - except AttributeError as e: - pytest.skip(str(e)) +@pytest.mark.parametrize("dtype", dh.int_dtypes) +def test_iinfo(dtype): out = xp.iinfo(dtype) f_func = f"[iinfo({dh.dtype_to_name[dtype]})]" for attr in ["bits", "max", "min"]: @@ -183,12 +174,12 @@ def test_iinfo(dtype_name): def atomic_kinds() -> st.SearchStrategy[Union[DataType, str]]: - return xps.scalar_dtypes() | st.sampled_from(list(dh.kind_to_dtypes.keys())) + return hh.all_dtypes | st.sampled_from(list(dh.kind_to_dtypes.keys())) @pytest.mark.min_version("2022.12") @given( - dtype=xps.scalar_dtypes(), + dtype=hh.all_dtypes, kind=atomic_kinds() | st.lists(atomic_kinds(), min_size=1).map(tuple), ) def test_isdtype(dtype, kind): diff --git a/array_api_tests/test_fft.py b/array_api_tests/test_fft.py index 62b94396..ff71433d 100644 --- a/array_api_tests/test_fft.py +++ b/array_api_tests/test_fft.py @@ -1,19 +1,16 @@ import math from typing import List, Optional -from unittest.mock import MagicMock import pytest from hypothesis import assume, given from hypothesis import strategies as st -from array_api_tests.typing import Array, DataType +from array_api_tests.typing import Array -from . import api_version from . import dtype_helpers as dh from . import hypothesis_helpers as hh from . import pytest_helpers as ph from . import shape_helpers as sh -from . import xps from . import xp pytestmark = [ @@ -21,12 +18,6 @@ pytest.mark.min_version("2022.12"), ] - -# Using xps.complex_dtypes() raises an AttributeError for 2021.12 instances of -# xps, hence this hack. TODO: figure out a better way to manage this! -if api_version < "2022.12": - xps = MagicMock(xps) - fft_shapes_strat = hh.shapes(min_dims=1).filter(lambda s: math.prod(s) > 1) @@ -78,19 +69,6 @@ def draw_s_axes_norm_kwargs(x: Array, data: st.DataObject, *, size_gt_1=False) - return s, axes, norm, kwargs -def assert_float_to_complex_dtype( - func_name: str, *, in_dtype: DataType, out_dtype: DataType -): - if in_dtype == xp.float32: - expected = xp.complex64 - else: - assert in_dtype == xp.float64 # sanity check - expected = xp.complex128 - ph.assert_dtype( - func_name, in_dtype=in_dtype, out_dtype=out_dtype, expected=expected - ) - - def assert_n_axis_shape( func_name: str, *, @@ -128,7 +106,7 @@ def assert_s_axes_shape( ph.assert_shape(func_name, out_shape=out.shape, expected=tuple(expected)) -@given(x=hh.arrays(dtype=xps.complex_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.complex_dtypes, shape=fft_shapes_strat), data=st.data()) def test_fft(x, data): n, axis, norm, kwargs = draw_n_axis_norm_kwargs(x, data) @@ -138,7 +116,7 @@ def test_fft(x, data): assert_n_axis_shape("fft", x=x, n=n, axis=axis, out=out) -@given(x=hh.arrays(dtype=xps.complex_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.complex_dtypes, shape=fft_shapes_strat), data=st.data()) def test_ifft(x, data): n, axis, norm, kwargs = draw_n_axis_norm_kwargs(x, data) @@ -148,7 +126,7 @@ def test_ifft(x, data): assert_n_axis_shape("ifft", x=x, n=n, axis=axis, out=out) -@given(x=hh.arrays(dtype=xps.complex_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.complex_dtypes, shape=fft_shapes_strat), data=st.data()) def test_fftn(x, data): s, axes, norm, kwargs = draw_s_axes_norm_kwargs(x, data) @@ -158,7 +136,7 @@ def test_fftn(x, data): assert_s_axes_shape("fftn", x=x, s=s, axes=axes, out=out) -@given(x=hh.arrays(dtype=xps.complex_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.complex_dtypes, shape=fft_shapes_strat), data=st.data()) def test_ifftn(x, data): s, axes, norm, kwargs = draw_s_axes_norm_kwargs(x, data) @@ -168,13 +146,13 @@ def test_ifftn(x, data): assert_s_axes_shape("ifftn", x=x, s=s, axes=axes, out=out) -@given(x=hh.arrays(dtype=xps.floating_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.real_floating_dtypes, shape=fft_shapes_strat), data=st.data()) def test_rfft(x, data): n, axis, norm, kwargs = draw_n_axis_norm_kwargs(x, data) out = xp.fft.rfft(x, **kwargs) - assert_float_to_complex_dtype("rfft", in_dtype=x.dtype, out_dtype=out.dtype) + ph.assert_float_to_complex_dtype("rfft", in_dtype=x.dtype, out_dtype=out.dtype) _axis = x.ndim - 1 if axis == -1 else axis if n is None: @@ -185,7 +163,7 @@ def test_rfft(x, data): ph.assert_shape("rfft", out_shape=out.shape, expected=expected_shape) -@given(x=hh.arrays(dtype=xps.complex_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.complex_dtypes, shape=fft_shapes_strat), data=st.data()) def test_irfft(x, data): n, axis, norm, kwargs = draw_n_axis_norm_kwargs(x, data, size_gt_1=True) @@ -207,13 +185,13 @@ def test_irfft(x, data): ph.assert_shape("irfft", out_shape=out.shape, expected=expected_shape) -@given(x=hh.arrays(dtype=xps.floating_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.real_floating_dtypes, shape=fft_shapes_strat), data=st.data()) def test_rfftn(x, data): s, axes, norm, kwargs = draw_s_axes_norm_kwargs(x, data) out = xp.fft.rfftn(x, **kwargs) - assert_float_to_complex_dtype("rfftn", in_dtype=x.dtype, out_dtype=out.dtype) + ph.assert_float_to_complex_dtype("rfftn", in_dtype=x.dtype, out_dtype=out.dtype) _axes = sh.normalise_axis(axes, x.ndim) _s = x.shape if s is None else s @@ -230,7 +208,7 @@ def test_rfftn(x, data): @given( x=hh.arrays( - dtype=xps.complex_dtypes(), shape=fft_shapes_strat.filter(lambda s: s[-1] > 1) + dtype=hh.complex_dtypes, shape=fft_shapes_strat.filter(lambda s: s[-1] > 1) ), data=st.data(), ) @@ -261,7 +239,7 @@ def test_irfftn(x, data): # ph.assert_shape("irfftn", out_shape=out.shape, expected=tuple(expected)) -@given(x=hh.arrays(dtype=xps.complex_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.complex_dtypes, shape=fft_shapes_strat), data=st.data()) def test_hfft(x, data): n, axis, norm, kwargs = draw_n_axis_norm_kwargs(x, data, size_gt_1=True) @@ -283,13 +261,13 @@ def test_hfft(x, data): ph.assert_shape("hfft", out_shape=out.shape, expected=expected_shape) -@given(x=hh.arrays(dtype=xps.floating_dtypes(), shape=fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(dtype=hh.real_floating_dtypes, shape=fft_shapes_strat), data=st.data()) def test_ihfft(x, data): n, axis, norm, kwargs = draw_n_axis_norm_kwargs(x, data) out = xp.fft.ihfft(x, **kwargs) - assert_float_to_complex_dtype("ihfft", in_dtype=x.dtype, out_dtype=out.dtype) + ph.assert_float_to_complex_dtype("ihfft", in_dtype=x.dtype, out_dtype=out.dtype) _axis = x.ndim - 1 if axis == -1 else axis if n is None: @@ -315,7 +293,7 @@ def test_rfftfreq(n, kw): @pytest.mark.parametrize("func_name", ["fftshift", "ifftshift"]) -@given(x=hh.arrays(xps.floating_dtypes(), fft_shapes_strat), data=st.data()) +@given(x=hh.arrays(hh.floating_dtypes, fft_shapes_strat), data=st.data()) def test_shift_func(func_name, x, data): func = getattr(xp.fft, func_name) axes = data.draw( diff --git a/array_api_tests/test_indexing_functions.py b/array_api_tests/test_indexing_functions.py index 1fc9031e..3ef01cb7 100644 --- a/array_api_tests/test_indexing_functions.py +++ b/array_api_tests/test_indexing_functions.py @@ -7,13 +7,12 @@ from . import hypothesis_helpers as hh from . import pytest_helpers as ph from . import shape_helpers as sh -from . import xps @pytest.mark.unvectorized @pytest.mark.min_version("2022.12") @given( - x=hh.arrays(xps.scalar_dtypes(), hh.shapes(min_dims=1, min_side=1)), + x=hh.arrays(hh.all_dtypes, hh.shapes(min_dims=1, min_side=1)), data=st.data(), ) def test_take(x, data): diff --git a/array_api_tests/test_linalg.py b/array_api_tests/test_linalg.py index 807eaa87..4e394f0a 100644 --- a/array_api_tests/test_linalg.py +++ b/array_api_tests/test_linalg.py @@ -24,8 +24,9 @@ from typing import Tuple from .array_helpers import assert_exactly_equal, asarray -from .hypothesis_helpers import (arrays, all_floating_dtypes, xps, shapes, - kwargs, matrix_shapes, square_matrix_shapes, +from .hypothesis_helpers import (arrays, all_floating_dtypes, all_dtypes, + numeric_dtypes, xps, shapes, kwargs, + matrix_shapes, square_matrix_shapes, symmetric_matrices, SearchStrategy, positive_definite_matrices, MAX_ARRAY_SIZE, invertible_matrices, two_mutual_arrays, @@ -231,7 +232,7 @@ def test_det(x): @pytest.mark.unvectorized @pytest.mark.xp_extension('linalg') @given( - x=arrays(dtype=xps.scalar_dtypes(), shape=matrix_shapes()), + x=arrays(dtype=all_dtypes, shape=matrix_shapes()), # offset may produce an overflow if it is too large. Supporting offsets # that are way larger than the array shape isn't very important. kw=kwargs(offset=integers(-MAX_ARRAY_SIZE, MAX_ARRAY_SIZE)) @@ -413,7 +414,8 @@ def test_matrix_norm(x, kw): expected_shape = x.shape[:-2] + (1, 1) else: expected_shape = x.shape[:-2] - ph.assert_dtype("matrix_norm", in_dtype=x.dtype, out_dtype=res.dtype) + ph.assert_complex_to_float_dtype("matrix_norm", in_dtype=x.dtype, + out_dtype=res.dtype) ph.assert_result_shape("matrix_norm", in_shapes=[x.shape], out_shape=res.shape, expected=expected_shape) @@ -473,14 +475,14 @@ def _test_matrix_transpose(namespace, x): @pytest.mark.unvectorized @pytest.mark.xp_extension('linalg') @given( - x=arrays(dtype=xps.scalar_dtypes(), shape=matrix_shapes()), + x=arrays(dtype=all_dtypes, shape=matrix_shapes()), ) def test_linalg_matrix_transpose(x): return _test_matrix_transpose(linalg, x) @pytest.mark.unvectorized @given( - x=arrays(dtype=xps.scalar_dtypes(), shape=matrix_shapes()), + x=arrays(dtype=all_dtypes, shape=matrix_shapes()), ) def test_matrix_transpose(x): return _test_matrix_transpose(_array_module, x) @@ -672,8 +674,8 @@ def test_svd(x, kw): ph.assert_dtype("svd", in_dtype=x.dtype, out_dtype=U.dtype, expected=x.dtype, repr_name="U.dtype") - ph.assert_dtype("svd", in_dtype=x.dtype, out_dtype=S.dtype, - expected=x.dtype, repr_name="S.dtype") + ph.assert_complex_to_float_dtype("svd", in_dtype=x.dtype, + out_dtype=S.dtype, repr_name="S.dtype") ph.assert_dtype("svd", in_dtype=x.dtype, out_dtype=Vh.dtype, expected=x.dtype, repr_name="Vh.dtype") @@ -715,8 +717,8 @@ def test_svdvals(x): *stack, M, N = x.shape K = min(M, N) - ph.assert_dtype("svdvals", in_dtype=x.dtype, out_dtype=res.dtype, - expected=x.dtype) + ph.assert_complex_to_float_dtype("svdvals", in_dtype=x.dtype, + out_dtype=res.dtype) ph.assert_result_shape("svdvals", in_shapes=[x.shape], out_shape=res.shape, expected=(*stack, K)) @@ -858,7 +860,7 @@ def test_tensordot(x1, x2, kw): @pytest.mark.unvectorized @pytest.mark.xp_extension('linalg') @given( - x=arrays(dtype=xps.numeric_dtypes(), shape=matrix_shapes()), + x=arrays(dtype=numeric_dtypes, shape=matrix_shapes()), # offset may produce an overflow if it is too large. Supporting offsets # that are way larger than the array shape isn't very important. kw=kwargs(offset=integers(-MAX_ARRAY_SIZE, MAX_ARRAY_SIZE)) diff --git a/array_api_tests/test_manipulation_functions.py b/array_api_tests/test_manipulation_functions.py index 16e48632..55391e43 100644 --- a/array_api_tests/test_manipulation_functions.py +++ b/array_api_tests/test_manipulation_functions.py @@ -122,7 +122,7 @@ def test_concat(dtypes, base_shape, data): @pytest.mark.unvectorized @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=shared_shapes()), + x=hh.arrays(dtype=hh.all_dtypes, shape=shared_shapes()), axis=shared_shapes().flatmap( # Generate both valid and invalid axis lambda s: st.integers(2 * (-len(s) - 1), 2 * len(s)) @@ -150,7 +150,7 @@ def test_expand_dims(x, axis): @pytest.mark.min_version("2023.12") -@given(x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_dims=1)), data=st.data()) +@given(x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_dims=1)), data=st.data()) def test_moveaxis(x, data): source = data.draw( st.integers(-x.ndim, x.ndim - 1) | xps.valid_tuple_axes(x.ndim), label="source" @@ -177,7 +177,7 @@ def test_moveaxis(x, data): @pytest.mark.unvectorized @given( x=hh.arrays( - dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1).filter(lambda s: 1 in s) + dtype=hh.all_dtypes, shape=hh.shapes(min_side=1).filter(lambda s: 1 in s) ), data=st.data(), ) @@ -214,7 +214,7 @@ def test_squeeze(x, data): @pytest.mark.unvectorized @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes()), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes()), data=st.data(), ) def test_flip(x, data): @@ -239,7 +239,7 @@ def test_flip(x, data): @pytest.mark.unvectorized @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=shared_shapes(min_dims=1)), + x=hh.arrays(dtype=hh.all_dtypes, shape=shared_shapes(min_dims=1)), axes=shared_shapes(min_dims=1).flatmap( lambda s: st.lists( st.integers(0, len(s) - 1), @@ -280,7 +280,7 @@ def reshape_shapes(draw, shape): @pytest.mark.min_version("2023.12") @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_dims=1)), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_dims=1)), repeats=st.integers(1, 4), ) def test_repeat(x, repeats): @@ -295,7 +295,7 @@ def test_repeat(x, repeats): @pytest.mark.unvectorized @pytest.mark.skip("flaky") # TODO: fix! @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(max_side=MAX_SIDE)), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(max_side=MAX_SIDE)), data=st.data(), ) def test_reshape(x, data): @@ -326,7 +326,7 @@ def roll_ndindex(shape: Shape, shifts: Tuple[int], axes: Tuple[int]) -> Iterator @pytest.mark.unvectorized -@given(hh.arrays(dtype=xps.scalar_dtypes(), shape=shared_shapes()), st.data()) +@given(hh.arrays(dtype=hh.all_dtypes, shape=shared_shapes()), st.data()) def test_roll(x, data): shift_strat = st.integers(-hh.MAX_ARRAY_SIZE, hh.MAX_ARRAY_SIZE) if x.ndim > 0: @@ -413,7 +413,7 @@ def test_stack(shape, dtypes, kw, data): @pytest.mark.min_version("2023.12") -@given(x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes()), data=st.data()) +@given(x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes()), data=st.data()) def test_tile(x, data): repetitions = data.draw(st.lists(st.integers(1, 4), min_size=1, max_size=x.ndim + 1).map(tuple), label="repetitions") out = xp.tile(x, repetitions) @@ -422,10 +422,10 @@ def test_tile(x, data): @pytest.mark.min_version("2023.12") -@given(x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_dims=1)), data=st.data()) +@given(x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_dims=1)), data=st.data()) def test_unstack(x, data): axis = data.draw(st.integers(min_value=-x.ndim, max_value=x.ndim - 1), label="axis") kw = data.draw(hh.specified_kwargs(("axis", axis, 0)), label="kw") out = xp.asarray(xp.unstack(x, **kw), dtype=x.dtype) ph.assert_dtype("unstack", in_dtype=x.dtype, out_dtype=out.dtype) - # TODO: shapes and values testing \ No newline at end of file + # TODO: shapes and values testing diff --git a/array_api_tests/test_operators_and_elementwise_functions.py b/array_api_tests/test_operators_and_elementwise_functions.py index fe0ffc5d..18ed9f55 100644 --- a/array_api_tests/test_operators_and_elementwise_functions.py +++ b/array_api_tests/test_operators_and_elementwise_functions.py @@ -25,16 +25,6 @@ pytestmark = pytest.mark.unvectorized -def all_integer_dtypes() -> st.SearchStrategy[DataType]: - """Returns a strategy for signed and unsigned integer dtype objects.""" - return xps.unsigned_integer_dtypes() | xps.integer_dtypes() - - -def boolean_and_all_integer_dtypes() -> st.SearchStrategy[DataType]: - """Returns a strategy for boolean and all integer dtype objects.""" - return xps.boolean_dtypes() | all_integer_dtypes() - - def mock_int_dtype(n: int, dtype: DataType) -> int: """Returns equivalent of `n` that mocks `dtype` behaviour.""" nbits = dh.dtype_nbits[dtype] @@ -925,7 +915,7 @@ def test_bitwise_xor(ctx, data): binary_param_assert_against_refimpl(ctx, left, right, res, "^", refimpl) -@given(hh.arrays(dtype=xps.real_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.real_dtypes, shape=hh.shapes())) def test_ceil(x): out = xp.ceil(x) ph.assert_dtype("ceil", in_dtype=x.dtype, out_dtype=out.dtype) @@ -934,7 +924,7 @@ def test_ceil(x): @pytest.mark.min_version("2023.12") -@given(hh.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.real_floating_dtypes, shape=hh.shapes())) def test_clip(x): # TODO: test min/max kwargs, adjust values testing accordingly out = xp.clip(x) @@ -945,7 +935,7 @@ def test_clip(x): if api_version >= "2022.12": - @given(hh.arrays(dtype=xps.complex_dtypes(), shape=hh.shapes())) + @given(hh.arrays(dtype=hh.complex_dtypes, shape=hh.shapes())) def test_conj(x): out = xp.conj(x) ph.assert_dtype("conj", in_dtype=x.dtype, out_dtype=out.dtype) @@ -1047,7 +1037,7 @@ def test_expm1(x): unary_assert_against_refimpl("expm1", x, out, math.expm1) -@given(hh.arrays(dtype=xps.real_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.real_dtypes, shape=hh.shapes())) def test_floor(x): out = xp.floor(x) ph.assert_dtype("floor", in_dtype=x.dtype, out_dtype=out.dtype) @@ -1125,7 +1115,7 @@ def test_hypot(x1, x2): if api_version >= "2022.12": - @given(hh.arrays(dtype=xps.complex_dtypes(), shape=hh.shapes())) + @given(hh.arrays(dtype=hh.complex_dtypes, shape=hh.shapes())) def test_imag(x): out = xp.imag(x) ph.assert_dtype("imag", in_dtype=x.dtype, out_dtype=out.dtype, expected=dh.dtype_components[x.dtype]) @@ -1133,7 +1123,7 @@ def test_imag(x): unary_assert_against_refimpl("imag", x, out, operator.attrgetter("imag")) -@given(hh.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.numeric_dtypes, shape=hh.shapes())) def test_isfinite(x): out = xp.isfinite(x) ph.assert_dtype("isfinite", in_dtype=x.dtype, out_dtype=out.dtype, expected=xp.bool) @@ -1141,7 +1131,7 @@ def test_isfinite(x): unary_assert_against_refimpl("isfinite", x, out, math.isfinite, res_stype=bool) -@given(hh.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.numeric_dtypes, shape=hh.shapes())) def test_isinf(x): out = xp.isinf(x) ph.assert_dtype("isfinite", in_dtype=x.dtype, out_dtype=out.dtype, expected=xp.bool) @@ -1149,7 +1139,7 @@ def test_isinf(x): unary_assert_against_refimpl("isinf", x, out, math.isinf, res_stype=bool) -@given(hh.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.numeric_dtypes, shape=hh.shapes())) def test_isnan(x): out = xp.isnan(x) ph.assert_dtype("isnan", in_dtype=x.dtype, out_dtype=out.dtype, expected=xp.bool) @@ -1392,7 +1382,7 @@ def test_pow(ctx, data): if api_version >= "2022.12": - @given(hh.arrays(dtype=xps.complex_dtypes(), shape=hh.shapes())) + @given(hh.arrays(dtype=hh.complex_dtypes, shape=hh.shapes())) def test_real(x): out = xp.real(x) ph.assert_dtype("real", in_dtype=x.dtype, out_dtype=out.dtype, expected=dh.dtype_components[x.dtype]) @@ -1418,7 +1408,7 @@ def test_remainder(ctx, data): binary_param_assert_against_refimpl(ctx, left, right, res, "%", operator.mod) -@given(hh.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.numeric_dtypes, shape=hh.shapes())) def test_round(x): out = xp.round(x) ph.assert_dtype("round", in_dtype=x.dtype, out_dtype=out.dtype) @@ -1427,7 +1417,7 @@ def test_round(x): @pytest.mark.min_version("2023.12") -@given(hh.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.real_floating_dtypes, shape=hh.shapes())) def test_signbit(x): out = xp.signbit(x) ph.assert_dtype("signbit", in_dtype=x.dtype, out_dtype=out.dtype, expected=xp.bool) @@ -1435,7 +1425,7 @@ def test_signbit(x): # TODO: values testing -@given(hh.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes(), elements=finite_kw)) +@given(hh.arrays(dtype=hh.numeric_dtypes, shape=hh.shapes(), elements=finite_kw)) def test_sign(x): out = xp.sign(x) ph.assert_dtype("sign", in_dtype=x.dtype, out_dtype=out.dtype) @@ -1467,7 +1457,7 @@ def test_sinh(x): unary_assert_against_refimpl("sinh", x, out, math.sinh) -@given(hh.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes())) +@given(hh.arrays(dtype=hh.numeric_dtypes, shape=hh.shapes())) def test_square(x): out = xp.square(x) ph.assert_dtype("square", in_dtype=x.dtype, out_dtype=out.dtype) @@ -1517,7 +1507,7 @@ def test_tanh(x): unary_assert_against_refimpl("tanh", x, out, math.tanh) -@given(hh.arrays(dtype=xps.real_dtypes(), shape=xps.array_shapes())) +@given(hh.arrays(dtype=hh.real_dtypes, shape=xps.array_shapes())) def test_trunc(x): out = xp.trunc(x) ph.assert_dtype("trunc", in_dtype=x.dtype, out_dtype=out.dtype) diff --git a/array_api_tests/test_searching_functions.py b/array_api_tests/test_searching_functions.py index 608547ec..a12e9d52 100644 --- a/array_api_tests/test_searching_functions.py +++ b/array_api_tests/test_searching_functions.py @@ -17,7 +17,7 @@ @given( x=hh.arrays( - dtype=xps.real_dtypes(), + dtype=hh.real_dtypes, shape=hh.shapes(min_dims=1, min_side=1), elements={"allow_nan": False}, ), @@ -54,7 +54,7 @@ def test_argmax(x, data): @given( x=hh.arrays( - dtype=xps.real_dtypes(), + dtype=hh.real_dtypes, shape=hh.shapes(min_dims=1, min_side=1), elements={"allow_nan": False}, ), @@ -88,14 +88,14 @@ def test_argmin(x, data): ph.assert_scalar_equals("argmin", type_=int, idx=out_idx, out=min_i, expected=expected) -@given(hh.arrays(dtype=xps.scalar_dtypes(), shape=())) +@given(hh.arrays(dtype=hh.all_dtypes, shape=())) def test_nonzero_zerodim_error(x): with pytest.raises(Exception): xp.nonzero(x) @pytest.mark.data_dependent_shapes -@given(hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_dims=1, min_side=1))) +@given(hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_dims=1, min_side=1))) def test_nonzero(x): out = xp.nonzero(x) assert len(out) == x.ndim, f"{len(out)=}, but should be {x.ndim=}" @@ -202,4 +202,4 @@ def test_searchsorted(data): out_dtype=out.dtype, expected=xp.__array_namespace_info__().default_dtypes()["indexing"], ) - # TODO: shapes and values testing \ No newline at end of file + # TODO: shapes and values testing diff --git a/array_api_tests/test_set_functions.py b/array_api_tests/test_set_functions.py index a94a9c2d..c9abaad1 100644 --- a/array_api_tests/test_set_functions.py +++ b/array_api_tests/test_set_functions.py @@ -11,12 +11,11 @@ from . import hypothesis_helpers as hh from . import pytest_helpers as ph from . import shape_helpers as sh -from . import xps pytestmark = [pytest.mark.data_dependent_shapes, pytest.mark.unvectorized] -@given(hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1))) +@given(hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_side=1))) def test_unique_all(x): out = xp.unique_all(x) @@ -116,7 +115,7 @@ def test_unique_all(x): assert nans == expected, f"{nans} NaNs in out, but should be {expected}" -@given(hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1))) +@given(hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_side=1))) def test_unique_counts(x): out = xp.unique_counts(x) assert hasattr(out, "values") @@ -163,7 +162,7 @@ def test_unique_counts(x): assert nans == expected, f"{nans} NaNs in out, but should be {expected}" -@given(hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1))) +@given(hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_side=1))) def test_unique_inverse(x): out = xp.unique_inverse(x) assert hasattr(out, "values") @@ -216,7 +215,7 @@ def test_unique_inverse(x): assert nans == expected, f"{nans} NaNs in out.values, but should be {expected}" -@given(hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1))) +@given(hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_side=1))) def test_unique_values(x): out = xp.unique_values(x) ph.assert_dtype("unique_values", in_dtype=x.dtype, out_dtype=out.dtype) diff --git a/array_api_tests/test_sorting_functions.py b/array_api_tests/test_sorting_functions.py index 4aecfbe7..8501045c 100644 --- a/array_api_tests/test_sorting_functions.py +++ b/array_api_tests/test_sorting_functions.py @@ -11,7 +11,6 @@ from . import hypothesis_helpers as hh from . import pytest_helpers as ph from . import shape_helpers as sh -from . import xps from .typing import Scalar, Shape @@ -33,7 +32,7 @@ def assert_scalar_in_set( @pytest.mark.unvectorized @given( x=hh.arrays( - dtype=xps.real_dtypes(), + dtype=hh.real_dtypes, shape=hh.shapes(min_dims=1, min_side=1), elements={"allow_nan": False}, ), @@ -94,7 +93,7 @@ def test_argsort(x, data): # TODO: Test with signed zeros and NaNs (and ignore them somehow) @given( x=hh.arrays( - dtype=xps.real_dtypes(), + dtype=hh.real_dtypes, shape=hh.shapes(min_dims=1, min_side=1), elements={"allow_nan": False}, ), diff --git a/array_api_tests/test_special_cases.py b/array_api_tests/test_special_cases.py index d7be6b47..07ab3616 100644 --- a/array_api_tests/test_special_cases.py +++ b/array_api_tests/test_special_cases.py @@ -1326,7 +1326,7 @@ def test_empty_arrays(func_name, expected): # TODO: parse docstrings to get exp "func_name", [f.__name__ for f in category_to_funcs["statistical"]] ) @given( - x=hh.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes(min_side=1)), + x=hh.arrays(dtype=hh.real_floating_dtypes, shape=hh.shapes(min_side=1)), data=st.data(), ) def test_nan_propagation(func_name, x, data): diff --git a/array_api_tests/test_statistical_functions.py b/array_api_tests/test_statistical_functions.py index cb9843ed..bb8c0ef2 100644 --- a/array_api_tests/test_statistical_functions.py +++ b/array_api_tests/test_statistical_functions.py @@ -11,13 +11,13 @@ from . import hypothesis_helpers as hh from . import pytest_helpers as ph from . import shape_helpers as sh -from . import api_version, xps +from . import api_version from ._array_module import _UndefinedStub from .typing import DataType @pytest.mark.min_version("2023.12") -@given(hh.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes(min_dims=1, max_dims=1))) +@given(hh.arrays(dtype=hh.numeric_dtypes, shape=hh.shapes(min_dims=1, max_dims=1))) def test_cumulative_sum(x): # TODO: test kwargs + diff shapes, adjust shape and values testing accordingly out = xp.cumulative_sum(x) @@ -36,7 +36,7 @@ def kwarg_dtypes(dtype: DataType) -> st.SearchStrategy[Optional[DataType]]: @pytest.mark.unvectorized @given( x=hh.arrays( - dtype=xps.real_dtypes(), + dtype=hh.real_dtypes, shape=hh.shapes(min_side=1), elements={"allow_nan": False}, ), @@ -66,7 +66,7 @@ def test_max(x, data): @given( x=hh.arrays( - dtype=xps.floating_dtypes(), + dtype=hh.real_floating_dtypes, shape=hh.shapes(min_side=1), elements={"allow_nan": False}, ), @@ -89,7 +89,7 @@ def test_mean(x, data): @pytest.mark.unvectorized @given( x=hh.arrays( - dtype=xps.real_dtypes(), + dtype=hh.real_dtypes, shape=hh.shapes(min_side=1), elements={"allow_nan": False}, ), @@ -120,7 +120,7 @@ def test_min(x, data): @pytest.mark.unvectorized @given( x=hh.arrays( - dtype=xps.numeric_dtypes(), + dtype=hh.numeric_dtypes, shape=hh.shapes(min_side=1), elements={"allow_nan": False}, ), @@ -172,7 +172,7 @@ def test_prod(x, data): @pytest.mark.skip(reason="flaky") # TODO: fix! @given( x=hh.arrays( - dtype=xps.floating_dtypes(), + dtype=hh.real_floating_dtypes, shape=hh.shapes(min_side=1), elements={"allow_nan": False}, ).filter(lambda x: math.prod(x.shape) >= 2), @@ -209,7 +209,7 @@ def test_std(x, data): @pytest.mark.unvectorized @given( x=hh.arrays( - dtype=xps.numeric_dtypes(), + dtype=hh.numeric_dtypes, shape=hh.shapes(min_side=1), elements={"allow_nan": False}, ), @@ -262,7 +262,7 @@ def test_sum(x, data): @pytest.mark.skip(reason="flaky") # TODO: fix! @given( x=hh.arrays( - dtype=xps.floating_dtypes(), + dtype=hh.real_floating_dtypes, shape=hh.shapes(min_side=1), elements={"allow_nan": False}, ).filter(lambda x: math.prod(x.shape) >= 2), diff --git a/array_api_tests/test_utility_functions.py b/array_api_tests/test_utility_functions.py index e094cfb9..2338d1d2 100644 --- a/array_api_tests/test_utility_functions.py +++ b/array_api_tests/test_utility_functions.py @@ -7,12 +7,11 @@ from . import hypothesis_helpers as hh from . import pytest_helpers as ph from . import shape_helpers as sh -from . import xps @pytest.mark.unvectorized @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes(min_side=1)), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes(min_side=1)), data=st.data(), ) def test_all(x, data): @@ -40,7 +39,7 @@ def test_all(x, data): @pytest.mark.unvectorized @given( - x=hh.arrays(dtype=xps.scalar_dtypes(), shape=hh.shapes()), + x=hh.arrays(dtype=hh.all_dtypes, shape=hh.shapes()), data=st.data(), ) def test_any(x, data):