From 9647fb699c0b2a6ceb8363a7f7d7abd6472f63d4 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sun, 26 Feb 2023 23:04:28 +0300 Subject: [PATCH 01/33] MAINT: bare-bones normalizations via type hints --- torch_np/_funcs.py | 115 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 102 insertions(+), 13 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 9ebcd364..261a1766 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -1,13 +1,102 @@ +import typing + import torch from . import _decorators, _helpers from ._detail import _dtypes_impl, _flips, _util from ._detail import implementations as _impl +################################## normalizations -def nonzero(a): - (tensor,) = _helpers.to_tensors(a) - result = tensor.nonzero(as_tuple=True) +ArrayLike = typing.TypeVar("ArrayLike") +DTypeLike = typing.TypeVar("DTypeLike") +SubokLike = typing.TypeVar("SubokLike") + + +import inspect + +from . import _dtypes + + +def normalize_array_like(x, name=None): + (tensor,) = _helpers.to_tensors(x) + return tensor + + +def normalize_dtype(dtype, name=None): + # cf _decorators.dtype_to_torch + torch_dtype = None + if dtype is not None: + dtype = _dtypes.dtype(dtype) + torch_dtype = dtype.torch_dtype + return torch_dtype + + +def normalize_subok_like(arg, name): + if arg: + raise ValueError(f"'{name}' parameter is not supported.") + + +normalizers = { + ArrayLike: normalize_array_like, + DTypeLike: normalize_dtype, + SubokLike: normalize_subok_like, +} + +import functools + + +def normalizer(func): + @functools.wraps(func) + def wrapped(*args, **kwds): + sig = inspect.signature(func) + + dct = {} + # loop over positional parameters and actual arguments + for arg, (name, parm) in zip(args, sig.parameters.items()): + print(arg, name, parm.annotation) + normalizer = normalizers.get(parm.annotation, None) + if normalizer: + dct[name] = normalizer(arg, name) + else: + # untyped arguments pass through + dct[name] = arg + + # normalize keyword arguments + for name, arg in kwds.items(): + print("kw: ", name, sig.parameters[name].annotation) + parm = sig.parameters[name] + normalizer = normalizers.get(parm.annotation, None) + if normalizer: + dct[name] = normalizer(kwds[name], name) + else: + dct[name] = arg + + ba = sig.bind(**dct) + ba.apply_defaults() + + # TODO: + # 2. extra unknown args -- error out : nonzero([2, 0, 3], oops=42) + # 3. [LOOKS OK] optional (tensor_or_none) : untyped => pass through + # 4. [LOOKS OK] DTypeLike : positional or kw + # 5. axes : live in _impl or in types? several ways of handling them + # 6. keepdims : peel off, postprocess + # 7. OutLike : normal & keyword-only, peel off, postprocess + + # finally, pass normalized arguments through + result = func(*ba.args) + return result + + return wrapped + + +################################## + + +@normalizer +def nonzero(a: ArrayLike): + # (tensor,) = _helpers.to_tensors(a) + result = a.nonzero(as_tuple=True) return _helpers.tuple_arrays_from(result) @@ -41,25 +130,25 @@ def diagonal(a, offset=0, axis1=0, axis2=1): return _helpers.array_from(result) -@_decorators.dtype_to_torch -def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - (tensor,) = _helpers.to_tensors(a) - result = _impl.trace(tensor, offset, axis1, axis2, dtype) +@normalizer +def trace(a: ArrayLike, offset=0, axis1=0, axis2=1, dtype: DTypeLike = None, out=None): + # (tensor,) = _helpers.to_tensors(a) + result = _impl.trace(a, offset, axis1, axis2, dtype) return _helpers.result_or_out(result, out) -@_decorators.dtype_to_torch -def eye(N, M=None, k=0, dtype=float, order="C", *, like=None): - _util.subok_not_ok(like) +@normalizer +def eye(N, M=None, k=0, dtype: DTypeLike = float, order="C", *, like: SubokLike = None): + # _util.subok_not_ok(like) if order != "C": raise NotImplementedError result = _impl.eye(N, M, k, dtype) return _helpers.array_from(result) -@_decorators.dtype_to_torch -def identity(n, dtype=None, *, like=None): - _util.subok_not_ok(like) +@normalizer +def identity(n, dtype: DTypeLike = None, *, like: SubokLike = None): + ## _util.subok_not_ok(like) result = torch.eye(n, dtype=dtype) return _helpers.array_from(result) From 0b8264f2f7448ac776143ec1e143fba297881a66 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Tue, 28 Feb 2023 20:36:36 +0300 Subject: [PATCH 02/33] BUG: normalizations: raise on mismatch between parameters and actual arguments --- torch_np/_funcs.py | 16 +++++++++++++++- torch_np/tests/test_basic.py | 31 ++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 261a1766..2431617d 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -64,6 +64,12 @@ def wrapped(*args, **kwds): # normalize keyword arguments for name, arg in kwds.items(): + if not name in sig.parameters: + # unknown kwarg, bail out + raise TypeError( + f"{func.__name__}() got an unexpected keyword argument '{name}'." + ) + print("kw: ", name, sig.parameters[name].annotation) parm = sig.parameters[name] normalizer = normalizers.get(parm.annotation, None) @@ -75,8 +81,16 @@ def wrapped(*args, **kwds): ba = sig.bind(**dct) ba.apply_defaults() + # Now that all parameters have been consumed, check: + # Anything that has not been bound is unexpected positional arg => raise. + # If there are too few actual arguments, this fill fail in func(*ba.args) below + if len(args) > len(ba.args): + raise TypeError( + f"{func.__name__}() takes {len(ba.args)} positional argument but {len(args)} were given." + ) + # TODO: - # 2. extra unknown args -- error out : nonzero([2, 0, 3], oops=42) + # 2. [LOOKS OK] extra unknown args -- error out : nonzero([2, 0, 3], oops=42) # 3. [LOOKS OK] optional (tensor_or_none) : untyped => pass through # 4. [LOOKS OK] DTypeLike : positional or kw # 5. axes : live in _impl or in types? several ways of handling them diff --git a/torch_np/tests/test_basic.py b/torch_np/tests/test_basic.py index f102793d..52189b55 100644 --- a/torch_np/tests/test_basic.py +++ b/torch_np/tests/test_basic.py @@ -1,8 +1,9 @@ import functools -import numpy as np +import numpy as _np import pytest import torch +from pytest import raises as assert_raises import torch_np as w import torch_np._unary_ufuncs as _unary_ufuncs @@ -211,7 +212,7 @@ def test_array(self, func): assert ta.shape == self.shape -one_arg_scalar_funcs = [(w.size, np.size), (w.shape, np.shape), (w.ndim, np.ndim)] +one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] @pytest.mark.parametrize("func, np_func", one_arg_scalar_funcs) @@ -221,7 +222,7 @@ class TestOneArrToScalar: def test_tensor(self, func, np_func): t = torch.Tensor([[1, 2, 3], [4, 5, 6]]) ta = func(t) - tn = np_func(np.asarray(t)) + tn = np_func(_np.asarray(t)) assert not isinstance(ta, w.ndarray) assert ta == tn @@ -384,3 +385,27 @@ class TestPythonArgsToArray: def test_simple(self, func, args): a = func(*args) assert isinstance(a, w.ndarray) + + +class TestNormalizations: + """Smoke test generic problems with normalizations.""" + + def test_unknown_args(self): + # Check that unknown args to decorated functions fail + a = w.arange(7) % 2 == 0 + + # unknown positional args + with assert_raises(TypeError): + w.nonzero(a, "kaboom") + + # unknown kwarg + with assert_raises(TypeError): + w.nonzero(a, oops="ouch") + + def test_unknown_args_with_defaults(self): + # check a function 5 arguments and 4 defaults: this should work + w.eye(3) + + # five arguments, four defaults: this should fail + with assert_raises(TypeError): + w.eye() From d583c62a594a737e8955d10ed601b3783cbe02c5 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 2 Mar 2023 10:50:43 +0300 Subject: [PATCH 03/33] MAINT: normalize dtype in concatenate and *stack family Gradual (!) typing WTF: only annotate the dtype can get rid of dtype_to_torch decorator. Annotating SeqArrayLike typing TBD. --- torch_np/_funcs.py | 3 ++- torch_np/_wrapper.py | 27 +++++++++++++++------------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 2431617d..c26d8be7 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -90,6 +90,7 @@ def wrapped(*args, **kwds): ) # TODO: + # 1. [LOOKS OK] kw-only parameters : see vstack # 2. [LOOKS OK] extra unknown args -- error out : nonzero([2, 0, 3], oops=42) # 3. [LOOKS OK] optional (tensor_or_none) : untyped => pass through # 4. [LOOKS OK] DTypeLike : positional or kw @@ -98,7 +99,7 @@ def wrapped(*args, **kwds): # 7. OutLike : normal & keyword-only, peel off, postprocess # finally, pass normalized arguments through - result = func(*ba.args) + result = func(*ba.args, **ba.kwargs) return result return wrapped diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index dbf0bf9a..e0860ab6 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -108,16 +108,19 @@ def _concat_check(tup, dtype, out): ) -@_decorators.dtype_to_torch -def concatenate(ar_tuple, axis=0, out=None, dtype=None, casting="same_kind"): +### XXX: order the imports DAG +from . _funcs import normalizer, DTypeLike + +@normalizer +def concatenate(ar_tuple, axis=0, out=None, dtype: DTypeLike=None, casting="same_kind"): tensors = _helpers.to_tensors(*ar_tuple) _concat_check(tensors, dtype, out=out) result = _impl.concatenate(tensors, axis, out, dtype, casting) return _helpers.result_or_out(result, out) -@_decorators.dtype_to_torch -def vstack(tup, *, dtype=None, casting="same_kind"): +@normalizer +def vstack(tup, *, dtype : DTypeLike=None, casting="same_kind"): tensors = _helpers.to_tensors(*tup) _concat_check(tensors, dtype, out=None) result = _impl.vstack(tensors, dtype=dtype, casting=casting) @@ -127,16 +130,16 @@ def vstack(tup, *, dtype=None, casting="same_kind"): row_stack = vstack -@_decorators.dtype_to_torch -def hstack(tup, *, dtype=None, casting="same_kind"): +@normalizer +def hstack(tup, *, dtype : DTypeLike=None, casting="same_kind"): tensors = _helpers.to_tensors(*tup) _concat_check(tensors, dtype, out=None) result = _impl.hstack(tensors, dtype=dtype, casting=casting) return asarray(result) -@_decorators.dtype_to_torch -def dstack(tup, *, dtype=None, casting="same_kind"): +@normalizer +def dstack(tup, *, dtype : DTypeLike=None, casting="same_kind"): # XXX: in numpy 1.24 dstack does not have dtype and casting keywords # but {h,v}stack do. Hence add them here for consistency. tensors = _helpers.to_tensors(*tup) @@ -144,8 +147,8 @@ def dstack(tup, *, dtype=None, casting="same_kind"): return asarray(result) -@_decorators.dtype_to_torch -def column_stack(tup, *, dtype=None, casting="same_kind"): +@normalizer +def column_stack(tup, *, dtype : DTypeLike=None, casting="same_kind"): # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords # but row_stack does. (because row_stack is an alias for vstack, really). # Hence add these keywords here for consistency. @@ -155,8 +158,8 @@ def column_stack(tup, *, dtype=None, casting="same_kind"): return asarray(result) -@_decorators.dtype_to_torch -def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): +@normalizer +def stack(arrays, axis=0, out=None, *, dtype : DTypeLike=None, casting="same_kind"): tensors = _helpers.to_tensors(*arrays) _concat_check(tensors, dtype, out=out) result = _impl.stack(tensors, axis=axis, out=out, dtype=dtype, casting=casting) From ffe46fa196376800a9660b7015728907ae9874f0 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 3 Mar 2023 19:40:15 +0300 Subject: [PATCH 04/33] normalize Optional[ArrayLike] via annotations --- torch_np/_detail/implementations.py | 6 ------ torch_np/_funcs.py | 15 +++++++++++---- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/torch_np/_detail/implementations.py b/torch_np/_detail/implementations.py index 315b28b0..627e93dd 100644 --- a/torch_np/_detail/implementations.py +++ b/torch_np/_detail/implementations.py @@ -261,12 +261,6 @@ def dsplit(tensor, indices_or_sections): def clip(tensor, t_min, t_max): - if t_min is not None: - t_min = torch.broadcast_to(t_min, tensor.shape) - - if t_max is not None: - t_max = torch.broadcast_to(t_max, tensor.shape) - if t_min is None and t_max is None: raise ValueError("One of max or min must be given") diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index c26d8be7..a131c720 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -1,4 +1,5 @@ import typing +from typing import Optional import torch @@ -23,6 +24,12 @@ def normalize_array_like(x, name=None): return tensor +def normalize_optional_array_like(x, name=None): + # This explicit normalizer is needed because otherwise normalize_array_like + # does not run for a parameter annotated as Optional[ArrayLike] + return None if x is None else normalize_array_like(x, name) + + def normalize_dtype(dtype, name=None): # cf _decorators.dtype_to_torch torch_dtype = None @@ -39,6 +46,7 @@ def normalize_subok_like(arg, name): normalizers = { ArrayLike: normalize_array_like, + Optional[ArrayLike]: normalize_optional_array_like, DTypeLike: normalize_dtype, SubokLike: normalize_subok_like, } @@ -121,12 +129,11 @@ def argwhere(a): return _helpers.array_from(result) -def clip(a, min=None, max=None, out=None): +@normalizer +def clip(a : ArrayLike, min : Optional[ArrayLike]=None, max : Optional[ArrayLike]=None, out=None): # np.clip requires both a_min and a_max not None, while ndarray.clip allows # one of them to be None. Follow the more lax version. - # Also min/max as arg names: follow numpy naming. - tensor, t_min, t_max = _helpers.to_tensors_or_none(a, min, max) - result = _impl.clip(tensor, t_min, t_max) + result = _impl.clip(a, min, max) return _helpers.result_or_out(result, out) From 352f7154b6ae597513796ff21cf3c12647887b6c Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 3 Mar 2023 20:18:30 +0300 Subject: [PATCH 05/33] MAINT: use normalizer/ArrayLike in _funcs --- torch_np/_funcs.py | 137 ++++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 69 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index a131c720..0c3ed962 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -137,31 +137,29 @@ def clip(a : ArrayLike, min : Optional[ArrayLike]=None, max : Optional[ArrayLike return _helpers.result_or_out(result, out) -def repeat(a, repeats, axis=None): - tensor, t_repeats = _helpers.to_tensors(a, repeats) # XXX: scalar repeats - result = torch.repeat_interleave(tensor, t_repeats, axis) +@normalizer +def repeat(a : ArrayLike, repeats: ArrayLike, axis=None): + # XXX: scalar repeats; ArrayLikeOrScalar ? + result = torch.repeat_interleave(a, repeats, axis) return _helpers.array_from(result) # ### diag et al ### - -def diagonal(a, offset=0, axis1=0, axis2=1): - (tensor,) = _helpers.to_tensors(a) - result = _impl.diagonal(tensor, offset, axis1, axis2) +@normalizer +def diagonal(a : ArrayLike, offset=0, axis1=0, axis2=1): + result = _impl.diagonal(a, offset, axis1, axis2) return _helpers.array_from(result) @normalizer def trace(a: ArrayLike, offset=0, axis1=0, axis2=1, dtype: DTypeLike = None, out=None): - # (tensor,) = _helpers.to_tensors(a) result = _impl.trace(a, offset, axis1, axis2, dtype) return _helpers.result_or_out(result, out) @normalizer def eye(N, M=None, k=0, dtype: DTypeLike = float, order="C", *, like: SubokLike = None): - # _util.subok_not_ok(like) if order != "C": raise NotImplementedError result = _impl.eye(N, M, k, dtype) @@ -170,20 +168,19 @@ def eye(N, M=None, k=0, dtype: DTypeLike = float, order="C", *, like: SubokLike @normalizer def identity(n, dtype: DTypeLike = None, *, like: SubokLike = None): - ## _util.subok_not_ok(like) result = torch.eye(n, dtype=dtype) return _helpers.array_from(result) -def diag(v, k=0): - (tensor,) = _helpers.to_tensors(v) - result = torch.diag(tensor, k) +@normalizer +def diag(v : ArrayLike, k=0): + result = torch.diag(v, k) return _helpers.array_from(result) -def diagflat(v, k=0): - (tensor,) = _helpers.to_tensors(v) - result = torch.diagflat(tensor, k) +@normalizer +def diagflat(v : ArrayLike, k=0): + result = torch.diagflat(v, k) return _helpers.array_from(result) @@ -192,68 +189,70 @@ def diag_indices(n, ndim=2): return _helpers.tuple_arrays_from(result) -def diag_indices_from(arr): - (tensor,) = _helpers.to_tensors(arr) - result = _impl.diag_indices_from(tensor) +@normalizer +def diag_indices_from(arr : ArrayLike): + result = _impl.diag_indices_from(arr) return _helpers.tuple_arrays_from(result) -def fill_diagonal(a, val, wrap=False): - tensor, t_val = _helpers.to_tensors(a, val) - result = _impl.fill_diagonal(tensor, t_val, wrap) +@normalizer +def fill_diagonal(a : ArrayLike, val : ArrayLike, wrap=False): + result = _impl.fill_diagonal(a, val, wrap) return _helpers.array_from(result) -def vdot(a, b, /): - t_a, t_b = _helpers.to_tensors(a, b) - result = _impl.vdot(t_a, t_b) +@normalizer +def vdot(a : ArrayLike, b : ArrayLike, /): +# t_a, t_b = _helpers.to_tensors(a, b) + result = _impl.vdot(a, b) return result.item() -def dot(a, b, out=None): - t_a, t_b = _helpers.to_tensors(a, b) - result = _impl.dot(t_a, t_b) +@normalizer +def dot(a : ArrayLike, b : ArrayLike, out=None): +# t_a, t_b = _helpers.to_tensors(a, b) + result = _impl.dot(a, b) return _helpers.result_or_out(result, out) # ### sort and partition ### -def sort(a, axis=-1, kind=None, order=None): - (tensor,) = _helpers.to_tensors(a) - result = _impl.sort(tensor, axis, kind, order) +@normalizer +def sort(a : ArrayLike, axis=-1, kind=None, order=None): + result = _impl.sort(a, axis, kind, order) return _helpers.array_from(result) -def argsort(a, axis=-1, kind=None, order=None): - (tensor,) = _helpers.to_tensors(a) - result = _impl.argsort(tensor, axis, kind, order) +@normalizer +def argsort(a : ArrayLike, axis=-1, kind=None, order=None): + result = _impl.argsort(a, axis, kind, order) return _helpers.array_from(result) -def searchsorted(a, v, side="left", sorter=None): - a_t, v_t, sorter_t = _helpers.to_tensors_or_none(a, v, sorter) - result = torch.searchsorted(a_t, v_t, side=side, sorter=sorter_t) +@normalizer +def searchsorted(a : ArrayLike, v : ArrayLike, side="left", sorter : Optional[ArrayLike]=None): + result = torch.searchsorted(a, v, side=side, sorter=sorter) return _helpers.array_from(result) # ### swap/move/roll axis ### -def moveaxis(a, source, destination): - (tensor,) = _helpers.to_tensors(a) - result = _impl.moveaxis(tensor, source, destination) +@normalizer +def moveaxis(a : ArrayLike, source, destination): + result = _impl.moveaxis(a, source, destination) return _helpers.array_from(result) -def swapaxes(a, axis1, axis2): - (tensor,) = _helpers.to_tensors(a) - result = _flips.swapaxes(tensor, axis1, axis2) +@normalizer +def swapaxes(a : ArrayLike, axis1, axis2): + result = _flips.swapaxes(a, axis1, axis2) return _helpers.array_from(result) -def rollaxis(a, axis, start=0): - (tensor,) = _helpers.to_tensors(a) +@normalizer +def rollaxis(a : ArrayLike, axis, start=0): result = _flips.rollaxis(a, axis, start) return _helpers.array_from(result) @@ -261,55 +260,55 @@ def rollaxis(a, axis, start=0): # ### shape manipulations ### -def squeeze(a, axis=None): - (tensor,) = _helpers.to_tensors(a) - result = _impl.squeeze(tensor, axis) +@normalizer +def squeeze(a : ArrayLike, axis=None): + result = _impl.squeeze(a, axis) return _helpers.array_from(result, a) -def reshape(a, newshape, order="C"): - (tensor,) = _helpers.to_tensors(a) - result = _impl.reshape(tensor, newshape, order=order) +@normalizer +def reshape(a : ArrayLike, newshape, order="C"): + result = _impl.reshape(a, newshape, order=order) return _helpers.array_from(result, a) -def transpose(a, axes=None): - (tensor,) = _helpers.to_tensors(a) - result = _impl.transpose(tensor, axes) +@normalizer +def transpose(a : ArrayLike, axes=None): + result = _impl.transpose(a, axes) return _helpers.array_from(result, a) -def ravel(a, order="C"): - (tensor,) = _helpers.to_tensors(a) - result = _impl.ravel(tensor) +@normalizer +def ravel(a : ArrayLike, order="C"): + result = _impl.ravel(a) return _helpers.array_from(result, a) # leading underscore since arr.flatten exists but np.flatten does not -def _flatten(a, order="C"): - (tensor,) = _helpers.to_tensors(a) - result = _impl._flatten(tensor) +@normalizer +def _flatten(a : ArrayLike, order="C"): + result = _impl._flatten(a) return _helpers.array_from(result, a) # ### Type/shape etc queries ### -def real(a): - (tensor,) = _helpers.to_tensors(a) - result = torch.real(tensor) +@normalizer +def real(a : ArrayLike): + result = torch.real(a) return _helpers.array_from(result) -def imag(a): - (tensor,) = _helpers.to_tensors(a) - result = _impl.imag(tensor) +@normalizer +def imag(a: ArrayLike): + result = _impl.imag(a) return _helpers.array_from(result) -def round_(a, decimals=0, out=None): - (tensor,) = _helpers.to_tensors(a) - result = _impl.round(tensor, decimals) +@normalizer +def round_(a : ArrayLike, decimals=0, out=None): + result = _impl.round(a, decimals) return _helpers.result_or_out(result, out) From 7d268710cc8605d19349e997a70705597a9169d4 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 3 Mar 2023 20:56:42 +0300 Subject: [PATCH 06/33] MAINT: modify tests arr.base --> arr.get()._base --- torch_np/tests/test_ndarray_methods.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/torch_np/tests/test_ndarray_methods.py b/torch_np/tests/test_ndarray_methods.py index 51608dfd..66fa4e58 100644 --- a/torch_np/tests/test_ndarray_methods.py +++ b/torch_np/tests/test_ndarray_methods.py @@ -17,7 +17,7 @@ def test_indexing_simple(self): assert isinstance(a[0, 0], np.ndarray) assert isinstance(a[0, :], np.ndarray) - assert a[0, :].base is a + assert a[0, :].get()._base is a.get() def test_setitem(self): a = np.array([[1, 2, 3], [4, 5, 6]]) @@ -33,7 +33,7 @@ def test_reshape_function(self): assert np.all(np.reshape(arr, (2, 6)) == tgt) arr = np.asarray(arr) - assert np.transpose(arr, (1, 0)).base is arr + assert np.transpose(arr, (1, 0)).get()._base is arr.get() def test_reshape_method(self): arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) @@ -43,24 +43,24 @@ def test_reshape_method(self): # reshape(*shape_tuple) assert np.all(arr.reshape(2, 6) == tgt) - assert arr.reshape(2, 6).base is arr # reshape keeps the base + assert arr.reshape(2, 6).get()._base is arr.get() # reshape keeps the base assert arr.shape == arr_shape # arr is intact # XXX: move out to dedicated test(s) - assert arr.reshape(2, 6)._tensor._base is arr._tensor + assert arr.reshape(2, 6).get()._base is arr.get() # reshape(shape_tuple) assert np.all(arr.reshape((2, 6)) == tgt) - assert arr.reshape((2, 6)).base is arr + assert arr.reshape((2, 6)).get()._base is arr.get() assert arr.shape == arr_shape tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] assert np.all(arr.reshape(3, 4) == tgt) - assert arr.reshape(3, 4).base is arr + assert arr.reshape(3, 4).get()._base is arr.get() assert arr.shape == arr_shape assert np.all(arr.reshape((3, 4)) == tgt) - assert arr.reshape((3, 4)).base is arr + assert arr.reshape((3, 4)).get()._base is arr.get() assert arr.shape == arr_shape @@ -82,7 +82,7 @@ def test_transpose_function(self): assert_equal(np.transpose(arr, (1, 0)), tgt) arr = np.asarray(arr) - assert np.transpose(arr, (1, 0)).base is arr + assert np.transpose(arr, (1, 0)).get()._base is arr.get() def test_transpose_method(self): a = np.array([[1, 2], [3, 4]]) @@ -92,7 +92,7 @@ def test_transpose_method(self): assert_raises(ValueError, lambda: a.transpose(0, 0)) assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) - assert a.transpose().base is a + assert a.transpose().get()._base is a.get() class TestRavel: @@ -102,13 +102,13 @@ def test_ravel_function(self): assert_equal(np.ravel(a), tgt) arr = np.asarray(a) - assert np.ravel(arr).base is arr + assert np.ravel(arr).get()._base is arr.get() def test_ravel_method(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) - assert a.ravel().base is a + assert a.ravel().get()._base is a.get() class TestNonzero: From ce9861a3c96119961d64ed55a289720bcace3aa9 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 4 Mar 2023 00:04:34 +0300 Subject: [PATCH 07/33] BUG: handle positional-only parameters in @ normalize --- torch_np/_funcs.py | 10 ++++++---- torch_np/tests/numpy_tests/lib/test_shape_base_.py | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 0c3ed962..ef56507a 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -59,16 +59,18 @@ def normalizer(func): def wrapped(*args, **kwds): sig = inspect.signature(func) - dct = {} + lst, dct = [], {} # loop over positional parameters and actual arguments for arg, (name, parm) in zip(args, sig.parameters.items()): print(arg, name, parm.annotation) normalizer = normalizers.get(parm.annotation, None) if normalizer: - dct[name] = normalizer(arg, name) + # dct[name] = normalizer(arg, name) + lst.append(normalizer(arg)) else: # untyped arguments pass through - dct[name] = arg + # dct[name] = arg + lst.append(arg) # normalize keyword arguments for name, arg in kwds.items(): @@ -86,7 +88,7 @@ def wrapped(*args, **kwds): else: dct[name] = arg - ba = sig.bind(**dct) + ba = sig.bind(*lst, **dct) ba.apply_defaults() # Now that all parameters have been consumed, check: diff --git a/torch_np/tests/numpy_tests/lib/test_shape_base_.py b/torch_np/tests/numpy_tests/lib/test_shape_base_.py index 63aa0b24..70d2b261 100644 --- a/torch_np/tests/numpy_tests/lib/test_shape_base_.py +++ b/torch_np/tests/numpy_tests/lib/test_shape_base_.py @@ -597,7 +597,7 @@ def test_basic(self): assert type(res) is np.ndarray aa = np.ones((3, 1, 4, 1, 1)) - assert aa.squeeze().base is aa + assert aa.squeeze().get()._base is aa.get() def test_squeeze_axis(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] From eec7bc38193b3cea6fc25ea87675385268fad480 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 4 Mar 2023 00:47:05 +0300 Subject: [PATCH 08/33] MAINT: remove to_tensors_or_none, use Optional[ArrayLike] instead --- torch_np/_helpers.py | 6 -- torch_np/_wrapper.py | 67 ++++++++----------- .../numpy_tests/lib/test_function_base.py | 2 +- 3 files changed, 29 insertions(+), 46 deletions(-) diff --git a/torch_np/_helpers.py b/torch_np/_helpers.py index e40a6473..d7a13c80 100644 --- a/torch_np/_helpers.py +++ b/torch_np/_helpers.py @@ -118,9 +118,3 @@ def to_tensors(*inputs): return tuple(asarray(value).get() for value in inputs) - -def to_tensors_or_none(*inputs): - """Convert all array_likes from `inputs` to tensors. Nones pass through""" - from ._ndarray import asarray, ndarray - - return tuple(None if value is None else asarray(value).get() for value in inputs) diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index e0860ab6..8247bd02 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -109,7 +109,8 @@ def _concat_check(tup, dtype, out): ### XXX: order the imports DAG -from . _funcs import normalizer, DTypeLike +from . _funcs import normalizer, DTypeLike, ArrayLike +from typing import Optional @normalizer def concatenate(ar_tuple, axis=0, out=None, dtype: DTypeLike=None, casting="same_kind"): @@ -368,55 +369,46 @@ def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True): return x_tensor -@_decorators.dtype_to_torch -def corrcoef(x, y=None, rowvar=True, bias=NoValue, ddof=NoValue, *, dtype=None): +#@_decorators.dtype_to_torch +@normalizer +def corrcoef(x : ArrayLike, y : Optional[ArrayLike]=None, rowvar=True, bias=NoValue, ddof=NoValue, *, dtype : DTypeLike=None): if bias is not None or ddof is not None: # deprecated in NumPy raise NotImplementedError - - x_tensor, y_tensor = _helpers.to_tensors_or_none(x, y) - tensor = _xy_helper_corrcoef(x_tensor, y_tensor, rowvar) + tensor = _xy_helper_corrcoef(x, y, rowvar) result = _impl.corrcoef(tensor, dtype=dtype) return asarray(result) -@_decorators.dtype_to_torch +@normalizer def cov( - m, - y=None, + m : ArrayLike, + y : Optional[ArrayLike]=None, rowvar=True, bias=False, ddof=None, - fweights=None, - aweights=None, + fweights : Optional[ArrayLike]=None, + aweights : Optional[ArrayLike]=None, *, - dtype=None, + dtype : DTypeLike=None, ): - - m_tensor, y_tensor, fweights_tensor, aweights_tensor = _helpers.to_tensors_or_none( - m, y, fweights, aweights - ) - m_tensor = _xy_helper_corrcoef(m_tensor, y_tensor, rowvar) - - result = _impl.cov( - m_tensor, bias, ddof, fweights_tensor, aweights_tensor, dtype=dtype - ) + m = _xy_helper_corrcoef(m, y, rowvar) + result = _impl.cov(m, bias, ddof, fweights, aweights, dtype=dtype) return asarray(result) -def bincount(x, /, weights=None, minlength=0): - if not isinstance(x, ndarray) and x == []: +@normalizer +def bincount(x : ArrayLike, /, weights : Optional[ArrayLike]=None, minlength=0): + if x.numel() == 0: # edge case allowed by numpy - x = asarray([], dtype=int) - - x_tensor, weights_tensor = _helpers.to_tensors_or_none(x, weights) - result = _impl.bincount(x_tensor, weights_tensor, minlength) + x = torch.as_tensor([], dtype=int) + result = _impl.bincount(x, weights, minlength) return asarray(result) -def where(condition, x=None, y=None, /): - cond_t, x_t, y_t = _helpers.to_tensors_or_none(condition, x, y) - result = _impl.where(cond_t, x_t, y_t) +@normalizer +def where(condition : ArrayLike, x : Optional[ArrayLike]=None, y: Optional[ArrayLike]=None, /): + result = _impl.where(condition, x, y) if isinstance(result, tuple): # single-argument where(condition) return tuple(asarray(x) for x in result) @@ -840,22 +832,19 @@ def nanpercentile(): raise NotImplementedError -def diff(a, n=1, axis=-1, prepend=NoValue, append=NoValue): +@normalizer +def diff(a : ArrayLike, n=1, axis=-1, prepend : Optional[ArrayLike]=NoValue, append : Optional[ArrayLike]=NoValue): if n == 0: # match numpy and return the input immediately - return a - - a_tensor, prepend_tensor, append_tensor = _helpers.to_tensors_or_none( - a, prepend, append - ) + return asarray(a) result = _impl.diff( - a_tensor, + a, n=n, axis=axis, - prepend_tensor=prepend_tensor, - append_tensor=append_tensor, + prepend_tensor=prepend, + append_tensor=append, ) return asarray(result) diff --git a/torch_np/tests/numpy_tests/lib/test_function_base.py b/torch_np/tests/numpy_tests/lib/test_function_base.py index 4bb19689..c6ea3eaf 100644 --- a/torch_np/tests/numpy_tests/lib/test_function_base.py +++ b/torch_np/tests/numpy_tests/lib/test_function_base.py @@ -732,7 +732,7 @@ def test_n(self): assert_raises(ValueError, diff, x, n=-1) output = [diff(x, n=n) for n in range(1, 5)] expected = [[1, 1], [0], [], []] - assert_(diff(x, n=0) is x) + ## assert_(diff(x, n=0) is x) for n, (expected, out) in enumerate(zip(expected, output), start=1): assert_(type(out) is np.ndarray) assert_array_equal(out, expected) From 0c98dfb04267e535cc5c3449034665df8b7613db Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 4 Mar 2023 10:42:02 +0300 Subject: [PATCH 09/33] ENH: normalize tuples of array_likes --- torch_np/_funcs.py | 8 +++++++- torch_np/_wrapper.py | 42 ++++++++++++++++++------------------------ 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index ef56507a..de3e7a0d 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -1,5 +1,5 @@ import typing -from typing import Optional +from typing import Optional, Sequence import torch @@ -30,6 +30,11 @@ def normalize_optional_array_like(x, name=None): return None if x is None else normalize_array_like(x, name) +def normalize_seq_array_like(x, name=None): + tensors = _helpers.to_tensors(*x) + return tensors + + def normalize_dtype(dtype, name=None): # cf _decorators.dtype_to_torch torch_dtype = None @@ -47,6 +52,7 @@ def normalize_subok_like(arg, name): normalizers = { ArrayLike: normalize_array_like, Optional[ArrayLike]: normalize_optional_array_like, + Sequence[ArrayLike]: normalize_seq_array_like, DTypeLike: normalize_dtype, SubokLike: normalize_subok_like, } diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 8247bd02..7cf31414 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -110,21 +110,19 @@ def _concat_check(tup, dtype, out): ### XXX: order the imports DAG from . _funcs import normalizer, DTypeLike, ArrayLike -from typing import Optional +from typing import Optional, Sequence @normalizer -def concatenate(ar_tuple, axis=0, out=None, dtype: DTypeLike=None, casting="same_kind"): - tensors = _helpers.to_tensors(*ar_tuple) - _concat_check(tensors, dtype, out=out) - result = _impl.concatenate(tensors, axis, out, dtype, casting) +def concatenate(ar_tuple : Sequence[ArrayLike], axis=0, out=None, dtype: DTypeLike=None, casting="same_kind"): + _concat_check(ar_tuple, dtype, out=out) + result = _impl.concatenate(ar_tuple, axis, out, dtype, casting) return _helpers.result_or_out(result, out) @normalizer -def vstack(tup, *, dtype : DTypeLike=None, casting="same_kind"): - tensors = _helpers.to_tensors(*tup) - _concat_check(tensors, dtype, out=None) - result = _impl.vstack(tensors, dtype=dtype, casting=casting) +def vstack(tup : Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): + _concat_check(tup, dtype, out=None) + result = _impl.vstack(tup, dtype=dtype, casting=casting) return asarray(result) @@ -132,38 +130,34 @@ def vstack(tup, *, dtype : DTypeLike=None, casting="same_kind"): @normalizer -def hstack(tup, *, dtype : DTypeLike=None, casting="same_kind"): - tensors = _helpers.to_tensors(*tup) - _concat_check(tensors, dtype, out=None) - result = _impl.hstack(tensors, dtype=dtype, casting=casting) +def hstack(tup: Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): + _concat_check(tup, dtype, out=None) + result = _impl.hstack(tup, dtype=dtype, casting=casting) return asarray(result) @normalizer -def dstack(tup, *, dtype : DTypeLike=None, casting="same_kind"): +def dstack(tup: Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): # XXX: in numpy 1.24 dstack does not have dtype and casting keywords # but {h,v}stack do. Hence add them here for consistency. - tensors = _helpers.to_tensors(*tup) - result = _impl.dstack(tensors, dtype=dtype, casting=casting) + result = _impl.dstack(tup, dtype=dtype, casting=casting) return asarray(result) @normalizer -def column_stack(tup, *, dtype : DTypeLike=None, casting="same_kind"): +def column_stack(tup : Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords # but row_stack does. (because row_stack is an alias for vstack, really). # Hence add these keywords here for consistency. - tensors = _helpers.to_tensors(*tup) - _concat_check(tensors, dtype, out=None) - result = _impl.column_stack(tensors, dtype=dtype, casting=casting) + _concat_check(tup, dtype, out=None) + result = _impl.column_stack(tup, dtype=dtype, casting=casting) return asarray(result) @normalizer -def stack(arrays, axis=0, out=None, *, dtype : DTypeLike=None, casting="same_kind"): - tensors = _helpers.to_tensors(*arrays) - _concat_check(tensors, dtype, out=out) - result = _impl.stack(tensors, axis=axis, out=out, dtype=dtype, casting=casting) +def stack(arrays : Sequence[ArrayLike], axis=0, out=None, *, dtype : DTypeLike=None, casting="same_kind"): + _concat_check(arrays, dtype, out=out) + result = _impl.stack(arrays, axis=axis, out=out, dtype=dtype, casting=casting) return _helpers.result_or_out(result, out) From f5731e58a7132ac69bb48d2074658992e8186656 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 4 Mar 2023 12:20:25 +0300 Subject: [PATCH 10/33] ENH: annotate *args This is a bit clumsy: func(*args : Annotation) gives a single annotation for a runtime-determined number of arguments. There is no way to annotate individual elements of *args AFAICS. Thus register a special annotation to repack args into a tuple and a normalizer to normalize this tuple. --- torch_np/_funcs.py | 47 +++++++++++++++++++++++++++++++------------- torch_np/_wrapper.py | 27 +++++++++++++------------ 2 files changed, 47 insertions(+), 27 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index de3e7a0d..edf18087 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -13,6 +13,9 @@ DTypeLike = typing.TypeVar("DTypeLike") SubokLike = typing.TypeVar("SubokLike") +# annotate e.g. atleast_1d(*arys) +UnpackedSeqArrayLike = typing.TypeVar("UnpackedSeqArrayLike") + import inspect @@ -53,6 +56,7 @@ def normalize_subok_like(arg, name): ArrayLike: normalize_array_like, Optional[ArrayLike]: normalize_optional_array_like, Sequence[ArrayLike]: normalize_seq_array_like, + UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize DTypeLike: normalize_dtype, SubokLike: normalize_subok_like, } @@ -60,23 +64,41 @@ def normalize_subok_like(arg, name): import functools +def normalize_this(arg, parm): + """Normalize arg if a normalizer is registred.""" + normalizer = normalizers.get(parm.annotation, None) + if normalizer: + return normalizer(arg) + else: + # untyped arguments pass through + return arg + + def normalizer(func): @functools.wraps(func) def wrapped(*args, **kwds): sig = inspect.signature(func) - lst, dct = [], {} + # first, check for *args in positional parameters. Case in point: + # atleast_1d(*arys: UnpackedSequenceArrayLike) + # if found, consume all args into a tuple to normalize as a whole + for j, param in enumerate(sig.parameters.values()): + if param.annotation == UnpackedSeqArrayLike: + if j == 0: + args = (args,) + else: + # args = args[:j] + (args[j:],) would likely work + # not present in numpy codebase, so do not bother just yet. + # NB: branching on j ==0 is to avoid the empty tuple, args[:j] + raise NotImplementedError + break + # loop over positional parameters and actual arguments + lst, dct = [], {} for arg, (name, parm) in zip(args, sig.parameters.items()): print(arg, name, parm.annotation) - normalizer = normalizers.get(parm.annotation, None) - if normalizer: - # dct[name] = normalizer(arg, name) - lst.append(normalizer(arg)) - else: - # untyped arguments pass through - # dct[name] = arg - lst.append(arg) + lst.append(normalize_this(arg, parm)) + # normalize keyword arguments for name, arg in kwds.items(): @@ -88,11 +110,7 @@ def wrapped(*args, **kwds): print("kw: ", name, sig.parameters[name].annotation) parm = sig.parameters[name] - normalizer = normalizers.get(parm.annotation, None) - if normalizer: - dct[name] = normalizer(kwds[name], name) - else: - dct[name] = arg + dct[name] = normalize_this(arg, parm) ba = sig.bind(*lst, **dct) ba.apply_defaults() @@ -113,6 +131,7 @@ def wrapped(*args, **kwds): # 5. axes : live in _impl or in types? several ways of handling them # 6. keepdims : peel off, postprocess # 7. OutLike : normal & keyword-only, peel off, postprocess + # 8. *args # finally, pass normalized arguments through result = func(*ba.args, **ba.kwargs) diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 7cf31414..06a2fa79 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -13,6 +13,11 @@ from . import _dtypes, _helpers, _decorators # isort: skip # XXX +### XXX: order the imports DAG +from . _funcs import normalizer, DTypeLike, ArrayLike, UnpackedSeqArrayLike +from typing import Optional, Sequence + + # Things to decide on (punt for now) # # 1. Q: What are the return types of wrapper functions: plain torch.Tensors or @@ -63,27 +68,27 @@ def copy(a, order="K", subok=False): return a.copy(order="C") -def atleast_1d(*arys): - tensors = _helpers.to_tensors(*arys) - res = torch.atleast_1d(tensors) +@normalizer +def atleast_1d(*arys : UnpackedSeqArrayLike): + res = torch.atleast_1d(*arys) if len(res) == 1: return asarray(res[0]) else: return list(asarray(_) for _ in res) -def atleast_2d(*arys): - tensors = _helpers.to_tensors(*arys) - res = torch.atleast_2d(tensors) +@normalizer +def atleast_2d(*arys : UnpackedSeqArrayLike): + res = torch.atleast_2d(*arys) if len(res) == 1: return asarray(res[0]) else: return list(asarray(_) for _ in res) -def atleast_3d(*arys): - tensors = _helpers.to_tensors(*arys) - res = torch.atleast_3d(tensors) +@normalizer +def atleast_3d(*arys : UnpackedSeqArrayLike): + res = torch.atleast_3d(*arys) if len(res) == 1: return asarray(res[0]) else: @@ -108,10 +113,6 @@ def _concat_check(tup, dtype, out): ) -### XXX: order the imports DAG -from . _funcs import normalizer, DTypeLike, ArrayLike -from typing import Optional, Sequence - @normalizer def concatenate(ar_tuple : Sequence[ArrayLike], axis=0, out=None, dtype: DTypeLike=None, casting="same_kind"): _concat_check(ar_tuple, dtype, out=out) From b7112e36b9ba6986ae73bf515621278cc88b32ca Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 4 Mar 2023 14:57:37 +0300 Subject: [PATCH 11/33] MAINT: use normalizations across namespace functions nuke asarray_replacer as no longer used. --- torch_np/_funcs.py | 3 +- torch_np/_ndarray.py | 19 --- torch_np/_wrapper.py | 331 +++++++++++++++++++++---------------------- 3 files changed, 164 insertions(+), 189 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index edf18087..7b44a00e 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -131,7 +131,7 @@ def wrapped(*args, **kwds): # 5. axes : live in _impl or in types? several ways of handling them # 6. keepdims : peel off, postprocess # 7. OutLike : normal & keyword-only, peel off, postprocess - # 8. *args + # 8. [LOOKS OK] *args # finally, pass normalized arguments through result = func(*ba.args, **ba.kwargs) @@ -145,7 +145,6 @@ def wrapped(*args, **kwds): @normalizer def nonzero(a: ArrayLike): - # (tensor,) = _helpers.to_tensors(a) result = a.nonzero(as_tuple=True) return _helpers.tuple_arrays_from(result) diff --git a/torch_np/_ndarray.py b/torch_np/_ndarray.py index ea3e0739..a45b5b58 100644 --- a/torch_np/_ndarray.py +++ b/torch_np/_ndarray.py @@ -470,25 +470,6 @@ def maybe_set_base(tensor, base): return ndarray._from_tensor_and_base(tensor, base) -class asarray_replacer: - def __init__(self, dispatch="one"): - if dispatch not in ["one", "two"]: - raise ValueError("ararray_replacer: unknown dispatch %s" % dispatch) - self._dispatch = dispatch - - def __call__(self, func): - if self._dispatch == "one": - - @functools.wraps(func) - def wrapped(x, *args, **kwds): - x_tensor = asarray(x).get() - return asarray(func(x_tensor, *args, **kwds)) - - return wrapped - else: - raise ValueError - - ###### dtype routines diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 06a2fa79..9b87e574 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -9,12 +9,12 @@ from . import _funcs from ._detail import _dtypes_impl, _flips, _reductions, _util from ._detail import implementations as _impl -from ._ndarray import array, asarray, asarray_replacer, maybe_set_base, ndarray, newaxis +from ._ndarray import array, asarray, maybe_set_base, ndarray from . import _dtypes, _helpers, _decorators # isort: skip # XXX ### XXX: order the imports DAG -from . _funcs import normalizer, DTypeLike, ArrayLike, UnpackedSeqArrayLike +from . _funcs import normalizer, DTypeLike, ArrayLike, UnpackedSeqArrayLike, SubokLike from typing import Optional, Sequence @@ -59,13 +59,12 @@ ###### array creation routines -def copy(a, order="K", subok=False): - a = asarray(a) - _util.subok_not_ok(subok=subok) +@normalizer +def copy(a: ArrayLike, order="K", subok: SubokLike=False): if order != "K": raise NotImplementedError - # XXX: ndarray.copy only accepts order='C' - return a.copy(order="C") + tensor = a.clone() + return asarray(tensor) @normalizer @@ -197,24 +196,24 @@ def dsplit(ary, indices_or_sections): return tuple(maybe_set_base(x, base) for x in result) -def kron(a, b): - a_tensor, b_tensor = _helpers.to_tensors(a, b) - result = torch.kron(a_tensor, b_tensor) +@normalizer +def kron(a: ArrayLike, b: ArrayLike): + result = torch.kron(a, b) return asarray(result) -def tile(A, reps): - a_tensor = asarray(A).get() +@normalizer +def tile(A: ArrayLike, reps): if isinstance(reps, int): reps = (reps,) - result = torch.tile(a_tensor, reps) + result = torch.tile(A, reps) return asarray(result) -def vander(x, N=None, increasing=False): - x_tensor = asarray(x).get() - result = torch.vander(x_tensor, N, increasing) +@normalizer +def vander(x : ArrayLike, N=None, increasing=False): + result = torch.vander(x, N, increasing) return asarray(result) @@ -225,33 +224,29 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis return asarray(torch.linspace(start, stop, num, dtype=dtype)) -@_decorators.dtype_to_torch -def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): +@normalizer +def geomspace(start: ArrayLike, stop: ArrayLike, num=50, endpoint=True, dtype: DTypeLike=None, axis=0): if axis != 0 or not endpoint: raise NotImplementedError - start, stop = _helpers.to_tensors(start, stop) result = _impl.geomspace(start, stop, num, endpoint, dtype, axis) return asarray(result) -@_decorators.dtype_to_torch -def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): +@normalizer +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype: DTypeLike=None, axis=0): if axis != 0 or not endpoint: raise NotImplementedError return asarray(torch.logspace(start, stop, num, base=base, dtype=dtype)) -@_decorators.dtype_to_torch -def arange(start=None, stop=None, step=1, dtype=None, *, like=None): - _util.subok_not_ok(like) - start, stop, step = _helpers.ndarrays_to_tensors(start, stop, step) +@normalizer +def arange(start: Optional[ArrayLike]=None, stop: Optional[ArrayLike]=None, step: Optional[ArrayLike]=1, dtype: DTypeLike=None, *, like : SubokLike=None): result = _impl.arange(start, stop, step, dtype=dtype) return asarray(result) -@_decorators.dtype_to_torch -def empty(shape, dtype=float, order="C", *, like=None): - _util.subok_not_ok(like) +@normalizer +def empty(shape, dtype:DTypeLike=float, order="C", *, like : SubokLike=None): if order != "C": raise NotImplementedError if dtype is None: @@ -260,21 +255,18 @@ def empty(shape, dtype=float, order="C", *, like=None): return asarray(result) -# NB: *_like function deliberately deviate from numpy: it has subok=True +# NB: *_like functions deliberately deviate from numpy: it has subok=True # as the default; we set subok=False and raise on anything else. -@asarray_replacer() -@_decorators.dtype_to_torch -def empty_like(prototype, dtype=None, order="K", subok=False, shape=None): - _util.subok_not_ok(subok=subok) +@normalizer +def empty_like(prototype : ArrayLike, dtype : DTypeLike=None, order="K", subok : SubokLike=False, shape=None): if order != "K": raise NotImplementedError result = _impl.empty_like(prototype, dtype=dtype, shape=shape) - return result + return asarray(result) -@_decorators.dtype_to_torch -def full(shape, fill_value, dtype=None, order="C", *, like=None): - _util.subok_not_ok(like) +@normalizer +def full(shape, fill_value, dtype:DTypeLike=None, order="C", *, like : SubokLike=None): if isinstance(shape, int): shape = (shape,) if order != "C": @@ -284,19 +276,16 @@ def full(shape, fill_value, dtype=None, order="C", *, like=None): return asarray(result) -@asarray_replacer() -@_decorators.dtype_to_torch -def full_like(a, fill_value, dtype=None, order="K", subok=False, shape=None): - _util.subok_not_ok(subok=subok) +@normalizer +def full_like(a: ArrayLike, fill_value, dtype : DTypeLike=None, order="K", subok: SubokLike=False, shape=None): if order != "K": raise NotImplementedError result = _impl.full_like(a, fill_value, dtype=dtype, shape=shape) - return result + return asarray(result) -@_decorators.dtype_to_torch -def ones(shape, dtype=None, order="C", *, like=None): - _util.subok_not_ok(like) +@normalizer +def ones(shape, dtype: DTypeLike=None, order="C", *, like : SubokLike=None): if order != "C": raise NotImplementedError if dtype is None: @@ -305,19 +294,16 @@ def ones(shape, dtype=None, order="C", *, like=None): return asarray(result) -@asarray_replacer() -@_decorators.dtype_to_torch -def ones_like(a, dtype=None, order="K", subok=False, shape=None): - _util.subok_not_ok(subok=subok) +@normalizer +def ones_like(a: ArrayLike, dtype : DTypeLike=None, order="K", subok: SubokLike=False, shape=None): if order != "K": raise NotImplementedError result = _impl.ones_like(a, dtype=dtype, shape=shape) - return result + return asarray(result) -@_decorators.dtype_to_torch -def zeros(shape, dtype=None, order="C", *, like=None): - _util.subok_not_ok(like) +@normalizer +def zeros(shape, dtype: DTypeLike=None, order="C", *, like: SubokLike=None): if order != "C": raise NotImplementedError if dtype is None: @@ -326,14 +312,12 @@ def zeros(shape, dtype=None, order="C", *, like=None): return asarray(result) -@asarray_replacer() -@_decorators.dtype_to_torch -def zeros_like(a, dtype=None, order="K", subok=False, shape=None): - _util.subok_not_ok(subok=subok) +@normalizer +def zeros_like(a: ArrayLike, dtype : DTypeLike=None, order="K", subok: SubokLike=False, shape=None): if order != "K": raise NotImplementedError result = _impl.zeros_like(a, dtype=dtype, shape=shape) - return result + return asarray(result) ###### misc/unordered @@ -364,7 +348,6 @@ def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True): return x_tensor -#@_decorators.dtype_to_torch @normalizer def corrcoef(x : ArrayLike, y : Optional[ArrayLike]=None, rowvar=True, bias=NoValue, ddof=NoValue, *, dtype : DTypeLike=None): if bias is not None or ddof is not None: @@ -434,48 +417,51 @@ def size(a, axis=None): ###### shape manipulations and indexing - -def expand_dims(a, axis): - a = asarray(a) +@normalizer +def expand_dims(a: ArrayLike, axis): shape = _util.expand_shape(a.shape, axis) - tensor = a.get().view(shape) # never copies + tensor = a.view(shape) # never copies return ndarray._from_tensor_and_base(tensor, a) -@asarray_replacer() -def flip(m, axis=None): - return _flips.flip(m, axis) +@normalizer +def flip(m: ArrayLike, axis=None): + result = _flips.flip(m, axis) + return asarray(result) -@asarray_replacer() -def flipud(m): - return _flips.flipud(m) +@normalizer +def flipud(m: ArrayLike): + result = _flips.flipud(m) + return asarray(result) -@asarray_replacer() -def fliplr(m): - return _flips.fliplr(m) +@normalizer +def fliplr(m: ArrayLike): + result = _flips.fliplr(m) + return asarray(result) -@asarray_replacer() -def rot90(m, k=1, axes=(0, 1)): - return _flips.rot90(m, k, axes) +@normalizer +def rot90(m: ArrayLike, k=1, axes=(0, 1)): + result = _flips.rot90(m, k, axes) + return asarray(result) -@asarray_replacer() -def broadcast_to(array, shape, subok=False): - _util.subok_not_ok(subok=subok) - return torch.broadcast_to(array, size=shape) +@normalizer +def broadcast_to(array: ArrayLike, shape, subok : SubokLike=False): + result = torch.broadcast_to(array, size=shape) + return asarray(result) from torch import broadcast_shapes # YYY: pattern: tuple of arrays as input, tuple of arrays as output; cf nonzero -def broadcast_arrays(*args, subok=False): - _util.subok_not_ok(subok=subok) - tensors = _helpers.to_tensors(*args) - res = torch.broadcast_tensors(*tensors) +@normalizer +def broadcast_arrays(*args : UnpackedSeqArrayLike, subok: SubokLike=False): + args = args[0] # undo the *args wrapping in normalizer + res = torch.broadcast_tensors(*args) return tuple(asarray(_) for _ in res) @@ -500,14 +486,15 @@ def ravel_multi_index(multi_index, dims, mode="raise", order="C"): return sum(idx * dim for idx, dim in zip(multi_index, dims)) -def meshgrid(*xi, copy=True, sparse=False, indexing="xy"): - xi_tensors = _helpers.to_tensors(*xi) - output = _impl.meshgrid(*xi_tensors, copy=copy, sparse=sparse, indexing=indexing) +@normalizer +def meshgrid(*xi : UnpackedSeqArrayLike, copy=True, sparse=False, indexing="xy"): + xi = xi[0] # undo the *xi wrapping in normalizer + output = _impl.meshgrid(*xi, copy=copy, sparse=sparse, indexing=indexing) return [asarray(t) for t in output] -@_decorators.dtype_to_torch -def indices(dimensions, dtype=int, sparse=False): +@normalizer +def indices(dimensions, dtype: DTypeLike=int, sparse=False): result = _impl.indices(dimensions, dtype=dtype, sparse=sparse) if sparse: return tuple(asarray(x) for x in result) @@ -526,21 +513,23 @@ def flatnonzero(a): count_nonzero = emulate_out_arg(axis_keepdims_wrapper(_reductions.count_nonzero)) -def roll(a, shift, axis=None): - tensor = asarray(a).get() - result = _impl.roll(tensor, shift, axis) +@normalizer +def roll(a: ArrayLike, shift, axis=None): + result = _impl.roll(a, shift, axis) return asarray(result) ###### tri{l, u} and related -@asarray_replacer() -def tril(m, k=0): - return m.tril(k) +@normalizer +def tril(m: ArrayLike, k=0): + result = m.tril(k) + return asarray(result) -@asarray_replacer() -def triu(m, k=0): - return m.triu(k) +@normalizer +def triu(m: ArrayLike, k=0): + result = m.triu(k) + return asarray(result) def tril_indices(n, k=0, m=None): @@ -553,21 +542,20 @@ def triu_indices(n, k=0, m=None): return tuple(asarray(t) for t in result) -def tril_indices_from(arr, k=0): - tensor = asarray(arr).get() - result = _impl.tril_indices_from(tensor, k) +@normalizer +def tril_indices_from(arr: ArrayLike, k=0): + result = _impl.tril_indices_from(arr, k) return tuple(asarray(t) for t in result) -def triu_indices_from(arr, k=0): - tensor = asarray(arr).get() - result = _impl.triu_indices_from(tensor, k) +@normalizer +def triu_indices_from(arr : ArrayLike, k=0): + result = _impl.triu_indices_from(arr, k) return tuple(asarray(t) for t in result) -@_decorators.dtype_to_torch -def tri(N, M=None, k=0, dtype=float, *, like=None): - _util.subok_not_ok(like) +@normalizer +def tri(N, M=None, k=0, dtype: DTypeLike=float, *, like : SubokLike=None): result = _impl.tri(N, M, k, dtype) return asarray(result) @@ -746,20 +734,21 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ) -def inner(a, b, /): - t_a, t_b = _helpers.to_tensors(a, b) - result = _impl.inner(t_a, t_b) +@normalizer +def inner(a: ArrayLike, b: ArrayLike, /): + result = _impl.inner(a, b) return asarray(result) -def outer(a, b, out=None): - a_t, b_t = _helpers.to_tensors(a, b) - result = torch.outer(a_t, b_t) +@normalizer +def outer(a: ArrayLike, b: ArrayLike, out=None): + result = torch.outer(a, b) return _helpers.result_or_out(result, out) -@asarray_replacer() -def nanmean(a, axis=None, dtype=None, out=None, keepdims=NoValue, *, where=NoValue): +@normalizer +def nanmean(a: ArrayLike, axis=None, dtype: DTypeLike=None, out=None, keepdims=NoValue, *, where=NoValue): + # XXX: this needs to be rewritten if where is not NoValue: raise NotImplementedError if dtype is None: @@ -772,7 +761,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=NoValue, *, where=NoVal result = a.nanmean(dtype=dtype, dim=axis, keepdim=bool(keepdims)) if out is not None: out.copy_(result) - return result + return asarray(result) def nanmin(): @@ -847,58 +836,63 @@ def diff(a : ArrayLike, n=1, axis=-1, prepend : Optional[ArrayLike]=NoValue, app ##### math functions -@asarray_replacer() -def angle(z, deg=False): +@normalizer +def angle(z: ArrayLike, deg=False): result = _impl.angle(z, deg) - return result + return asarray(result) -@asarray_replacer() -def sinc(x): - return torch.sinc(x) +@normalizer +def sinc(x: ArrayLike): + result = torch.sinc(x) + return asarray(result) -@asarray_replacer() -def real_if_close(a, tol=100): +@normalizer +def real_if_close(a: ArrayLike, tol=100): result = _impl.real_if_close(a, tol=tol) - return result + return asarray(result) -@asarray_replacer() -def iscomplex(x): +@normalizer +def iscomplex(x: ArrayLike): result = _impl.iscomplex(x) - return result # XXX: missing .item on a zero-dim value; a case for array_or_scalar(value) ? + return asarray(result) # XXX: missing .item on a zero-dim value; a case for array_or_scalar(value) ? -@asarray_replacer() -def isreal(x): +@normalizer +def isreal(x: ArrayLike): result = _impl.isreal(x) - return result - + return asarray(result) -@asarray_replacer() -def iscomplexobj(x): - return torch.is_complex(x) +@normalizer +def iscomplexobj(x: ArrayLike): + result = torch.is_complex(x) + return asarray(result) -@asarray_replacer() -def isrealobj(x): - return not torch.is_complex(x) +@normalizer +def isrealobj(x: ArrayLike): + result = not torch.is_complex(x) + return asarray(result) -@asarray_replacer() -def isneginf(x, out=None): - return torch.isneginf(x, out=out) +@normalizer +def isneginf(x: ArrayLike, out=None): + result = torch.isneginf(x, out=out) + return asarray(result) -@asarray_replacer() -def isposinf(x, out=None): - return torch.isposinf(x, out=out) +@normalizer +def isposinf(x: ArrayLike, out=None): + result = torch.isposinf(x, out=out) + return asarray(result) -@asarray_replacer() -def i0(x): - return torch.special.i0(x) +@normalizer +def i0(x: ArrayLike): + result = torch.special.i0(x) + return asarray(result) def isscalar(a): @@ -910,27 +904,27 @@ def isscalar(a): return False -def isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): - a_t, b_t = _helpers.to_tensors(a, b) - result = _impl.isclose(a_t, b_t, rtol, atol, equal_nan=equal_nan) +@normalizer +def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): + result = _impl.isclose(a, b, rtol, atol, equal_nan=equal_nan) return asarray(result) -def allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): - a_t, b_t = _helpers.to_tensors(a, b) - result = _impl.isclose(a_t, b_t, rtol, atol, equal_nan=equal_nan) +@normalizer +def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False): + result = _impl.isclose(a, b, rtol, atol, equal_nan=equal_nan) return result.all() -def array_equal(a1, a2, equal_nan=False): - a1_t, a2_t = _helpers.to_tensors(a1, a2) - result = _impl.tensor_equal(a1_t, a2_t, equal_nan) +@normalizer +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False): + result = _impl.tensor_equal(a1, a2, equal_nan) return result -def array_equiv(a1, a2): - a1_t, a2_t = _helpers.to_tensors(a1, a2) - result = _impl.tensor_equiv(a1_t, a2_t) +@normalizer +def array_equiv(a1: ArrayLike, a2: ArrayLike): + result = _impl.tensor_equiv(a1, a2) return result @@ -953,24 +947,26 @@ def asfarray(): # ### put/take_along_axis ### -def take_along_axis(arr, indices, axis): - tensor, t_indices = _helpers.to_tensors(arr, indices) - result = _impl.take_along_dim(tensor, t_indices, axis) +@normalizer +def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis): + result = _impl.take_along_dim(arr, indices, axis) return asarray(result) -def put_along_axis(arr, indices, values, axis): - tensor, t_indices, t_values = _helpers.to_tensors(arr, indices, values) - # modify the argument in-place - arr._tensor = _impl.put_along_dim(tensor, t_indices, t_values, axis) +@normalizer +def put_along_axis(arr: ArrayLike, indices : ArrayLike, values: ArrayLike, axis): + # modify the argument in-place : here `arr` is `arr._tensor` of the orignal `arr` argument + result = _impl.put_along_dim(arr, indices, values, axis) + arr.copy_(result.reshape(arr.shape)) return None # ### unqiue et al ### +@normalizer def unique( - ar, + ar : ArrayLike, return_index=False, return_inverse=False, return_counts=False, @@ -978,9 +974,8 @@ def unique( *, equal_nan=True, ): - tensor = asarray(ar).get() result = _impl.unique( - tensor, + ar, return_index=return_index, return_inverse=return_inverse, return_counts=return_counts, From 94e21dddc528a229d42137dc0b68f8519eb08cd2 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 4 Mar 2023 16:56:07 +0300 Subject: [PATCH 12/33] lint --- torch_np/_funcs.py | 57 +++--- torch_np/_helpers.py | 1 - torch_np/_ndarray.py | 1 - torch_np/_wrapper.py | 325 ++++++++++++++++++++++------------- torch_np/tests/test_basic.py | 2 - 5 files changed, 240 insertions(+), 146 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 7b44a00e..757951e9 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -56,7 +56,7 @@ def normalize_subok_like(arg, name): ArrayLike: normalize_array_like, Optional[ArrayLike]: normalize_optional_array_like, Sequence[ArrayLike]: normalize_seq_array_like, - UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize + UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize DTypeLike: normalize_dtype, SubokLike: normalize_subok_like, } @@ -99,7 +99,6 @@ def wrapped(*args, **kwds): print(arg, name, parm.annotation) lst.append(normalize_this(arg, parm)) - # normalize keyword arguments for name, arg in kwds.items(): if not name in sig.parameters: @@ -156,7 +155,12 @@ def argwhere(a): @normalizer -def clip(a : ArrayLike, min : Optional[ArrayLike]=None, max : Optional[ArrayLike]=None, out=None): +def clip( + a: ArrayLike, + min: Optional[ArrayLike] = None, + max: Optional[ArrayLike] = None, + out=None, +): # np.clip requires both a_min and a_max not None, while ndarray.clip allows # one of them to be None. Follow the more lax version. result = _impl.clip(a, min, max) @@ -164,7 +168,7 @@ def clip(a : ArrayLike, min : Optional[ArrayLike]=None, max : Optional[ArrayLike @normalizer -def repeat(a : ArrayLike, repeats: ArrayLike, axis=None): +def repeat(a: ArrayLike, repeats: ArrayLike, axis=None): # XXX: scalar repeats; ArrayLikeOrScalar ? result = torch.repeat_interleave(a, repeats, axis) return _helpers.array_from(result) @@ -172,8 +176,9 @@ def repeat(a : ArrayLike, repeats: ArrayLike, axis=None): # ### diag et al ### + @normalizer -def diagonal(a : ArrayLike, offset=0, axis1=0, axis2=1): +def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1): result = _impl.diagonal(a, offset, axis1, axis2) return _helpers.array_from(result) @@ -199,13 +204,13 @@ def identity(n, dtype: DTypeLike = None, *, like: SubokLike = None): @normalizer -def diag(v : ArrayLike, k=0): +def diag(v: ArrayLike, k=0): result = torch.diag(v, k) return _helpers.array_from(result) @normalizer -def diagflat(v : ArrayLike, k=0): +def diagflat(v: ArrayLike, k=0): result = torch.diagflat(v, k) return _helpers.array_from(result) @@ -216,27 +221,25 @@ def diag_indices(n, ndim=2): @normalizer -def diag_indices_from(arr : ArrayLike): +def diag_indices_from(arr: ArrayLike): result = _impl.diag_indices_from(arr) return _helpers.tuple_arrays_from(result) @normalizer -def fill_diagonal(a : ArrayLike, val : ArrayLike, wrap=False): +def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False): result = _impl.fill_diagonal(a, val, wrap) return _helpers.array_from(result) @normalizer -def vdot(a : ArrayLike, b : ArrayLike, /): -# t_a, t_b = _helpers.to_tensors(a, b) +def vdot(a: ArrayLike, b: ArrayLike, /): result = _impl.vdot(a, b) return result.item() @normalizer -def dot(a : ArrayLike, b : ArrayLike, out=None): -# t_a, t_b = _helpers.to_tensors(a, b) +def dot(a: ArrayLike, b: ArrayLike, out=None): result = _impl.dot(a, b) return _helpers.result_or_out(result, out) @@ -245,19 +248,21 @@ def dot(a : ArrayLike, b : ArrayLike, out=None): @normalizer -def sort(a : ArrayLike, axis=-1, kind=None, order=None): +def sort(a: ArrayLike, axis=-1, kind=None, order=None): result = _impl.sort(a, axis, kind, order) return _helpers.array_from(result) @normalizer -def argsort(a : ArrayLike, axis=-1, kind=None, order=None): +def argsort(a: ArrayLike, axis=-1, kind=None, order=None): result = _impl.argsort(a, axis, kind, order) return _helpers.array_from(result) @normalizer -def searchsorted(a : ArrayLike, v : ArrayLike, side="left", sorter : Optional[ArrayLike]=None): +def searchsorted( + a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None +): result = torch.searchsorted(a, v, side=side, sorter=sorter) return _helpers.array_from(result) @@ -266,19 +271,19 @@ def searchsorted(a : ArrayLike, v : ArrayLike, side="left", sorter : Optional[Ar @normalizer -def moveaxis(a : ArrayLike, source, destination): +def moveaxis(a: ArrayLike, source, destination): result = _impl.moveaxis(a, source, destination) return _helpers.array_from(result) @normalizer -def swapaxes(a : ArrayLike, axis1, axis2): +def swapaxes(a: ArrayLike, axis1, axis2): result = _flips.swapaxes(a, axis1, axis2) return _helpers.array_from(result) @normalizer -def rollaxis(a : ArrayLike, axis, start=0): +def rollaxis(a: ArrayLike, axis, start=0): result = _flips.rollaxis(a, axis, start) return _helpers.array_from(result) @@ -287,32 +292,32 @@ def rollaxis(a : ArrayLike, axis, start=0): @normalizer -def squeeze(a : ArrayLike, axis=None): +def squeeze(a: ArrayLike, axis=None): result = _impl.squeeze(a, axis) return _helpers.array_from(result, a) @normalizer -def reshape(a : ArrayLike, newshape, order="C"): +def reshape(a: ArrayLike, newshape, order="C"): result = _impl.reshape(a, newshape, order=order) return _helpers.array_from(result, a) @normalizer -def transpose(a : ArrayLike, axes=None): +def transpose(a: ArrayLike, axes=None): result = _impl.transpose(a, axes) return _helpers.array_from(result, a) @normalizer -def ravel(a : ArrayLike, order="C"): +def ravel(a: ArrayLike, order="C"): result = _impl.ravel(a) return _helpers.array_from(result, a) # leading underscore since arr.flatten exists but np.flatten does not @normalizer -def _flatten(a : ArrayLike, order="C"): +def _flatten(a: ArrayLike, order="C"): result = _impl._flatten(a) return _helpers.array_from(result, a) @@ -321,7 +326,7 @@ def _flatten(a : ArrayLike, order="C"): @normalizer -def real(a : ArrayLike): +def real(a: ArrayLike): result = torch.real(a) return _helpers.array_from(result) @@ -333,7 +338,7 @@ def imag(a: ArrayLike): @normalizer -def round_(a : ArrayLike, decimals=0, out=None): +def round_(a: ArrayLike, decimals=0, out=None): result = _impl.round(a, decimals) return _helpers.result_or_out(result, out) diff --git a/torch_np/_helpers.py b/torch_np/_helpers.py index d7a13c80..f1589f28 100644 --- a/torch_np/_helpers.py +++ b/torch_np/_helpers.py @@ -117,4 +117,3 @@ def to_tensors(*inputs): from ._ndarray import asarray, ndarray return tuple(asarray(value).get() for value in inputs) - diff --git a/torch_np/_ndarray.py b/torch_np/_ndarray.py index a45b5b58..f93b7dbc 100644 --- a/torch_np/_ndarray.py +++ b/torch_np/_ndarray.py @@ -1,4 +1,3 @@ -import functools import operator import torch diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 9b87e574..5c2bffb1 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -4,19 +4,20 @@ pytorch tensors. """ +from typing import Optional, Sequence + import torch from . import _funcs from ._detail import _dtypes_impl, _flips, _reductions, _util from ._detail import implementations as _impl + +### XXX: order the imports DAG +from ._funcs import ArrayLike, DTypeLike, SubokLike, UnpackedSeqArrayLike, normalizer from ._ndarray import array, asarray, maybe_set_base, ndarray from . import _dtypes, _helpers, _decorators # isort: skip # XXX -### XXX: order the imports DAG -from . _funcs import normalizer, DTypeLike, ArrayLike, UnpackedSeqArrayLike, SubokLike -from typing import Optional, Sequence - # Things to decide on (punt for now) # @@ -60,38 +61,38 @@ @normalizer -def copy(a: ArrayLike, order="K", subok: SubokLike=False): +def copy(a: ArrayLike, order="K", subok: SubokLike = False): if order != "K": raise NotImplementedError tensor = a.clone() - return asarray(tensor) + return _helpers.array_from(tensor) @normalizer -def atleast_1d(*arys : UnpackedSeqArrayLike): +def atleast_1d(*arys: UnpackedSeqArrayLike): res = torch.atleast_1d(*arys) if len(res) == 1: - return asarray(res[0]) + return _helpers.array_from(res[0]) else: - return list(asarray(_) for _ in res) + return list(_helpers.tuple_arrays_from(res)) @normalizer -def atleast_2d(*arys : UnpackedSeqArrayLike): +def atleast_2d(*arys: UnpackedSeqArrayLike): res = torch.atleast_2d(*arys) if len(res) == 1: - return asarray(res[0]) + return _helpers.array_from(res[0]) else: - return list(asarray(_) for _ in res) + return list(_helpers.tuple_arrays_from(res)) @normalizer -def atleast_3d(*arys : UnpackedSeqArrayLike): +def atleast_3d(*arys: UnpackedSeqArrayLike): res = torch.atleast_3d(*arys) if len(res) == 1: - return asarray(res[0]) + return _helpers.array_from(res[0]) else: - return list(asarray(_) for _ in res) + return list(_helpers.tuple_arrays_from(res)) def _concat_check(tup, dtype, out): @@ -113,49 +114,65 @@ def _concat_check(tup, dtype, out): @normalizer -def concatenate(ar_tuple : Sequence[ArrayLike], axis=0, out=None, dtype: DTypeLike=None, casting="same_kind"): +def concatenate( + ar_tuple: Sequence[ArrayLike], + axis=0, + out=None, + dtype: DTypeLike = None, + casting="same_kind", +): _concat_check(ar_tuple, dtype, out=out) result = _impl.concatenate(ar_tuple, axis, out, dtype, casting) return _helpers.result_or_out(result, out) @normalizer -def vstack(tup : Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): +def vstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): _concat_check(tup, dtype, out=None) result = _impl.vstack(tup, dtype=dtype, casting=casting) - return asarray(result) + return _helpers.array_from(result) + row_stack = vstack @normalizer -def hstack(tup: Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): +def hstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): _concat_check(tup, dtype, out=None) result = _impl.hstack(tup, dtype=dtype, casting=casting) - return asarray(result) + return _helpers.array_from(result) @normalizer -def dstack(tup: Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): +def dstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind"): # XXX: in numpy 1.24 dstack does not have dtype and casting keywords # but {h,v}stack do. Hence add them here for consistency. result = _impl.dstack(tup, dtype=dtype, casting=casting) - return asarray(result) + return _helpers.array_from(result) @normalizer -def column_stack(tup : Sequence[ArrayLike], *, dtype : DTypeLike=None, casting="same_kind"): +def column_stack( + tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_kind" +): # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords # but row_stack does. (because row_stack is an alias for vstack, really). # Hence add these keywords here for consistency. _concat_check(tup, dtype, out=None) result = _impl.column_stack(tup, dtype=dtype, casting=casting) - return asarray(result) + return _helpers.array_from(result) @normalizer -def stack(arrays : Sequence[ArrayLike], axis=0, out=None, *, dtype : DTypeLike=None, casting="same_kind"): +def stack( + arrays: Sequence[ArrayLike], + axis=0, + out=None, + *, + dtype: DTypeLike = None, + casting="same_kind", +): _concat_check(arrays, dtype, out=out) result = _impl.stack(arrays, axis=axis, out=out, dtype=dtype, casting=casting) return _helpers.result_or_out(result, out) @@ -199,7 +216,7 @@ def dsplit(ary, indices_or_sections): @normalizer def kron(a: ArrayLike, b: ArrayLike): result = torch.kron(a, b) - return asarray(result) + return _helpers.array_from(result) @normalizer @@ -208,116 +225,160 @@ def tile(A: ArrayLike, reps): reps = (reps,) result = torch.tile(A, reps) - return asarray(result) - + return _helpers.array_from(result) @normalizer -def vander(x : ArrayLike, N=None, increasing=False): +def vander(x: ArrayLike, N=None, increasing=False): result = torch.vander(x, N, increasing) - return asarray(result) + return _helpers.array_from(result) def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): if axis != 0 or retstep or not endpoint: raise NotImplementedError # XXX: raises TypeError if start or stop are not scalars - return asarray(torch.linspace(start, stop, num, dtype=dtype)) + result = torch.linspace(start, stop, num, dtype=dtype) + return _helpers.array_from(result) @normalizer -def geomspace(start: ArrayLike, stop: ArrayLike, num=50, endpoint=True, dtype: DTypeLike=None, axis=0): +def geomspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + dtype: DTypeLike = None, + axis=0, +): if axis != 0 or not endpoint: raise NotImplementedError result = _impl.geomspace(start, stop, num, endpoint, dtype, axis) - return asarray(result) + return _helpers.array_from(result) @normalizer -def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype: DTypeLike=None, axis=0): +def logspace( + start, stop, num=50, endpoint=True, base=10.0, dtype: DTypeLike = None, axis=0 +): if axis != 0 or not endpoint: raise NotImplementedError - return asarray(torch.logspace(start, stop, num, base=base, dtype=dtype)) + result = torch.logspace(start, stop, num, base=base, dtype=dtype) + return _helpers.array_from(result) @normalizer -def arange(start: Optional[ArrayLike]=None, stop: Optional[ArrayLike]=None, step: Optional[ArrayLike]=1, dtype: DTypeLike=None, *, like : SubokLike=None): +def arange( + start: Optional[ArrayLike] = None, + stop: Optional[ArrayLike] = None, + step: Optional[ArrayLike] = 1, + dtype: DTypeLike = None, + *, + like: SubokLike = None, +): result = _impl.arange(start, stop, step, dtype=dtype) - return asarray(result) + return _helpers.array_from(result) @normalizer -def empty(shape, dtype:DTypeLike=float, order="C", *, like : SubokLike=None): +def empty(shape, dtype: DTypeLike = float, order="C", *, like: SubokLike = None): if order != "C": raise NotImplementedError if dtype is None: dtype = _dtypes_impl.default_float_dtype result = torch.empty(shape, dtype=dtype) - return asarray(result) + return _helpers.array_from(result) # NB: *_like functions deliberately deviate from numpy: it has subok=True # as the default; we set subok=False and raise on anything else. @normalizer -def empty_like(prototype : ArrayLike, dtype : DTypeLike=None, order="K", subok : SubokLike=False, shape=None): +def empty_like( + prototype: ArrayLike, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): if order != "K": raise NotImplementedError result = _impl.empty_like(prototype, dtype=dtype, shape=shape) - return asarray(result) + return _helpers.array_from(result) @normalizer -def full(shape, fill_value, dtype:DTypeLike=None, order="C", *, like : SubokLike=None): +def full( + shape, fill_value, dtype: DTypeLike = None, order="C", *, like: SubokLike = None +): if isinstance(shape, int): shape = (shape,) if order != "C": raise NotImplementedError fill_value = asarray(fill_value).get() result = _impl.full(shape, fill_value, dtype=dtype) - return asarray(result) + return _helpers.array_from(result) @normalizer -def full_like(a: ArrayLike, fill_value, dtype : DTypeLike=None, order="K", subok: SubokLike=False, shape=None): +def full_like( + a: ArrayLike, + fill_value, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): if order != "K": raise NotImplementedError result = _impl.full_like(a, fill_value, dtype=dtype, shape=shape) - return asarray(result) + return _helpers.array_from(result) @normalizer -def ones(shape, dtype: DTypeLike=None, order="C", *, like : SubokLike=None): +def ones(shape, dtype: DTypeLike = None, order="C", *, like: SubokLike = None): if order != "C": raise NotImplementedError if dtype is None: dtype = _dtypes_impl.default_float_dtype result = torch.ones(shape, dtype=dtype) - return asarray(result) + return _helpers.array_from(result) @normalizer -def ones_like(a: ArrayLike, dtype : DTypeLike=None, order="K", subok: SubokLike=False, shape=None): +def ones_like( + a: ArrayLike, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): if order != "K": raise NotImplementedError result = _impl.ones_like(a, dtype=dtype, shape=shape) - return asarray(result) + return _helpers.array_from(result) @normalizer -def zeros(shape, dtype: DTypeLike=None, order="C", *, like: SubokLike=None): +def zeros(shape, dtype: DTypeLike = None, order="C", *, like: SubokLike = None): if order != "C": raise NotImplementedError if dtype is None: dtype = _dtypes_impl.default_float_dtype result = torch.zeros(shape, dtype=dtype) - return asarray(result) + return _helpers.array_from(result) @normalizer -def zeros_like(a: ArrayLike, dtype : DTypeLike=None, order="K", subok: SubokLike=False, shape=None): +def zeros_like( + a: ArrayLike, + dtype: DTypeLike = None, + order="K", + subok: SubokLike = False, + shape=None, +): if order != "K": raise NotImplementedError result = _impl.zeros_like(a, dtype=dtype, shape=shape) - return asarray(result) + return _helpers.array_from(result) ###### misc/unordered @@ -349,49 +410,62 @@ def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True): @normalizer -def corrcoef(x : ArrayLike, y : Optional[ArrayLike]=None, rowvar=True, bias=NoValue, ddof=NoValue, *, dtype : DTypeLike=None): +def corrcoef( + x: ArrayLike, + y: Optional[ArrayLike] = None, + rowvar=True, + bias=NoValue, + ddof=NoValue, + *, + dtype: DTypeLike = None, +): if bias is not None or ddof is not None: # deprecated in NumPy raise NotImplementedError tensor = _xy_helper_corrcoef(x, y, rowvar) result = _impl.corrcoef(tensor, dtype=dtype) - return asarray(result) + return _helpers.array_from(result) @normalizer def cov( - m : ArrayLike, - y : Optional[ArrayLike]=None, + m: ArrayLike, + y: Optional[ArrayLike] = None, rowvar=True, bias=False, ddof=None, - fweights : Optional[ArrayLike]=None, - aweights : Optional[ArrayLike]=None, + fweights: Optional[ArrayLike] = None, + aweights: Optional[ArrayLike] = None, *, - dtype : DTypeLike=None, + dtype: DTypeLike = None, ): m = _xy_helper_corrcoef(m, y, rowvar) result = _impl.cov(m, bias, ddof, fweights, aweights, dtype=dtype) - return asarray(result) + return _helpers.array_from(result) @normalizer -def bincount(x : ArrayLike, /, weights : Optional[ArrayLike]=None, minlength=0): +def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0): if x.numel() == 0: # edge case allowed by numpy x = torch.as_tensor([], dtype=int) result = _impl.bincount(x, weights, minlength) - return asarray(result) + return _helpers.array_from(result) @normalizer -def where(condition : ArrayLike, x : Optional[ArrayLike]=None, y: Optional[ArrayLike]=None, /): +def where( + condition: ArrayLike, + x: Optional[ArrayLike] = None, + y: Optional[ArrayLike] = None, + /, +): result = _impl.where(condition, x, y) if isinstance(result, tuple): # single-argument where(condition) - return tuple(asarray(x) for x in result) + return _helpers.tuple_arrays_from(result) else: - return asarray(result) + return _helpers.array_from(result) ###### module-level queries of object properties @@ -417,41 +491,41 @@ def size(a, axis=None): ###### shape manipulations and indexing + @normalizer def expand_dims(a: ArrayLike, axis): shape = _util.expand_shape(a.shape, axis) tensor = a.view(shape) # never copies - return ndarray._from_tensor_and_base(tensor, a) + return _helpers.array_from(tensor, a) @normalizer def flip(m: ArrayLike, axis=None): result = _flips.flip(m, axis) - return asarray(result) + return _helpers.array_from(result) @normalizer def flipud(m: ArrayLike): result = _flips.flipud(m) - return asarray(result) + return _helpers.array_from(result) @normalizer def fliplr(m: ArrayLike): result = _flips.fliplr(m) - return asarray(result) + return _helpers.array_from(result) @normalizer def rot90(m: ArrayLike, k=1, axes=(0, 1)): result = _flips.rot90(m, k, axes) - return asarray(result) - + return _helpers.array_from(result) @normalizer -def broadcast_to(array: ArrayLike, shape, subok : SubokLike=False): +def broadcast_to(array: ArrayLike, shape, subok: SubokLike = False): result = torch.broadcast_to(array, size=shape) - return asarray(result) + return _helpers.array_from(result) from torch import broadcast_shapes @@ -459,10 +533,10 @@ def broadcast_to(array: ArrayLike, shape, subok : SubokLike=False): # YYY: pattern: tuple of arrays as input, tuple of arrays as output; cf nonzero @normalizer -def broadcast_arrays(*args : UnpackedSeqArrayLike, subok: SubokLike=False): - args = args[0] # undo the *args wrapping in normalizer +def broadcast_arrays(*args: UnpackedSeqArrayLike, subok: SubokLike = False): + args = args[0] # undo the *args wrapping in normalizer res = torch.broadcast_tensors(*args) - return tuple(asarray(_) for _ in res) + return _helpers.tuple_arrays_from(res) def unravel_index(indices, shape, order="C"): @@ -487,19 +561,20 @@ def ravel_multi_index(multi_index, dims, mode="raise", order="C"): @normalizer -def meshgrid(*xi : UnpackedSeqArrayLike, copy=True, sparse=False, indexing="xy"): +def meshgrid(*xi: UnpackedSeqArrayLike, copy=True, sparse=False, indexing="xy"): xi = xi[0] # undo the *xi wrapping in normalizer output = _impl.meshgrid(*xi, copy=copy, sparse=sparse, indexing=indexing) - return [asarray(t) for t in output] + outp = _helpers.tuple_arrays_from(output) + return list(outp) # match numpy, return a list @normalizer -def indices(dimensions, dtype: DTypeLike=int, sparse=False): +def indices(dimensions, dtype: DTypeLike = int, sparse=False): result = _impl.indices(dimensions, dtype=dtype, sparse=sparse) if sparse: - return tuple(asarray(x) for x in result) + return _helpers.tuple_arrays_from(result) else: - return asarray(result) + return _helpers.array_from(result) def flatnonzero(a): @@ -516,48 +591,48 @@ def flatnonzero(a): @normalizer def roll(a: ArrayLike, shift, axis=None): result = _impl.roll(a, shift, axis) - return asarray(result) + return _helpers.array_from(result) ###### tri{l, u} and related @normalizer def tril(m: ArrayLike, k=0): result = m.tril(k) - return asarray(result) + return _helpers.array_from(result) @normalizer def triu(m: ArrayLike, k=0): result = m.triu(k) - return asarray(result) + return _helpers.array_from(result) def tril_indices(n, k=0, m=None): result = _impl.tril_indices(n, k, m) - return tuple(asarray(t) for t in result) + return _helpers.tuple_arrays_from(result) def triu_indices(n, k=0, m=None): result = _impl.triu_indices(n, k, m) - return tuple(asarray(t) for t in result) + return _helpers.tuple_arrays_from(result) @normalizer def tril_indices_from(arr: ArrayLike, k=0): result = _impl.tril_indices_from(arr, k) - return tuple(asarray(t) for t in result) + return _helpers.tuple_arrays_from(result) @normalizer -def triu_indices_from(arr : ArrayLike, k=0): +def triu_indices_from(arr: ArrayLike, k=0): result = _impl.triu_indices_from(arr, k) - return tuple(asarray(t) for t in result) + return _helpers.tuple_arrays_from(result) @normalizer -def tri(N, M=None, k=0, dtype: DTypeLike=float, *, like : SubokLike=None): +def tri(N, M=None, k=0, dtype: DTypeLike = float, *, like: SubokLike = None): result = _impl.tri(N, M, k, dtype) - return asarray(result) + return _helpers.array_from(result) ###### reductions @@ -684,9 +759,10 @@ def average(a, axis=None, weights=None, returned=False, *, keepdims=NoValue): if scl.shape != result.shape: scl = torch.broadcast_to(scl, result.shape).clone() - return asarray(result), asarray(scl) + return _helpers.array_from(result), _helpers.array_from(scl) + else: - return asarray(result) + return _helpers.array_from(result) def percentile( @@ -737,7 +813,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): @normalizer def inner(a: ArrayLike, b: ArrayLike, /): result = _impl.inner(a, b) - return asarray(result) + return _helpers.array_from(result) @normalizer @@ -747,7 +823,15 @@ def outer(a: ArrayLike, b: ArrayLike, out=None): @normalizer -def nanmean(a: ArrayLike, axis=None, dtype: DTypeLike=None, out=None, keepdims=NoValue, *, where=NoValue): +def nanmean( + a: ArrayLike, + axis=None, + dtype: DTypeLike = None, + out=None, + keepdims=NoValue, + *, + where=NoValue, +): # XXX: this needs to be rewritten if where is not NoValue: raise NotImplementedError @@ -761,7 +845,7 @@ def nanmean(a: ArrayLike, axis=None, dtype: DTypeLike=None, out=None, keepdims=N result = a.nanmean(dtype=dtype, dim=axis, keepdim=bool(keepdims)) if out is not None: out.copy_(result) - return asarray(result) + return _helpers.array_from(result) def nanmin(): @@ -817,11 +901,17 @@ def nanpercentile(): @normalizer -def diff(a : ArrayLike, n=1, axis=-1, prepend : Optional[ArrayLike]=NoValue, append : Optional[ArrayLike]=NoValue): +def diff( + a: ArrayLike, + n=1, + axis=-1, + prepend: Optional[ArrayLike] = NoValue, + append: Optional[ArrayLike] = NoValue, +): if n == 0: # match numpy and return the input immediately - return asarray(a) + return _helpers.array_from(result) result = _impl.diff( a, @@ -830,7 +920,7 @@ def diff(a : ArrayLike, n=1, axis=-1, prepend : Optional[ArrayLike]=NoValue, app prepend_tensor=prepend, append_tensor=append, ) - return asarray(result) + return _helpers.array_from(result) ##### math functions @@ -839,60 +929,62 @@ def diff(a : ArrayLike, n=1, axis=-1, prepend : Optional[ArrayLike]=NoValue, app @normalizer def angle(z: ArrayLike, deg=False): result = _impl.angle(z, deg) - return asarray(result) + return _helpers.array_from(result) @normalizer def sinc(x: ArrayLike): result = torch.sinc(x) - return asarray(result) + return _helpers.array_from(result) @normalizer def real_if_close(a: ArrayLike, tol=100): result = _impl.real_if_close(a, tol=tol) - return asarray(result) + return _helpers.array_from(result) @normalizer def iscomplex(x: ArrayLike): result = _impl.iscomplex(x) - return asarray(result) # XXX: missing .item on a zero-dim value; a case for array_or_scalar(value) ? + # XXX: missing .item on a zero-dim value; a case for array_or_scalar(value) ? + return _helpers.array_from(result) @normalizer def isreal(x: ArrayLike): result = _impl.isreal(x) - return asarray(result) + return _helpers.array_from(result) @normalizer def iscomplexobj(x: ArrayLike): result = torch.is_complex(x) - return asarray(result) + return result @normalizer def isrealobj(x: ArrayLike): result = not torch.is_complex(x) - return asarray(result) + return result + @normalizer def isneginf(x: ArrayLike, out=None): result = torch.isneginf(x, out=out) - return asarray(result) + return _helpers.array_from(result) @normalizer def isposinf(x: ArrayLike, out=None): result = torch.isposinf(x, out=out) - return asarray(result) + return _helpers.array_from(result) @normalizer def i0(x: ArrayLike): result = torch.special.i0(x) - return asarray(result) + return _helpers.array_from(result) def isscalar(a): @@ -907,7 +999,7 @@ def isscalar(a): @normalizer def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): result = _impl.isclose(a, b, rtol, atol, equal_nan=equal_nan) - return asarray(result) + return _helpers.array_from(result) @normalizer @@ -950,11 +1042,11 @@ def asfarray(): @normalizer def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis): result = _impl.take_along_dim(arr, indices, axis) - return asarray(result) + return _helpers.array_from(result) @normalizer -def put_along_axis(arr: ArrayLike, indices : ArrayLike, values: ArrayLike, axis): +def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis): # modify the argument in-place : here `arr` is `arr._tensor` of the orignal `arr` argument result = _impl.put_along_dim(arr, indices, values, axis) arr.copy_(result.reshape(arr.shape)) @@ -966,7 +1058,7 @@ def put_along_axis(arr: ArrayLike, indices : ArrayLike, values: ArrayLike, axis) @normalizer def unique( - ar : ArrayLike, + ar: ArrayLike, return_index=False, return_inverse=False, return_counts=False, @@ -984,9 +1076,10 @@ def unique( ) if isinstance(result, tuple): - return tuple(asarray(x) for x in result) + return _helpers.tuple_arrays_from(result) else: - return asarray(result) + return _helpers.array_from(result) + ###### mapping from numpy API objects to wrappers from this module ###### diff --git a/torch_np/tests/test_basic.py b/torch_np/tests/test_basic.py index 52189b55..0f43be84 100644 --- a/torch_np/tests/test_basic.py +++ b/torch_np/tests/test_basic.py @@ -26,9 +26,7 @@ w.angle, w.real_if_close, w.isreal, - w.isrealobj, w.iscomplex, - w.iscomplexobj, w.isneginf, w.isposinf, w.i0, From 2fdd3c64c85353dacd5ed65500fccd95c868aaf8 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 6 Mar 2023 19:49:46 +0300 Subject: [PATCH 13/33] MAINT: simplify sum, prod, mean, var, std, argmin, argmax --- torch_np/_detail/_reductions.py | 39 ++++++++++++++ torch_np/_funcs.py | 93 ++++++++++++++++++++++++++++++++- torch_np/_ndarray.py | 17 +++--- torch_np/_wrapper.py | 53 +------------------ 4 files changed, 142 insertions(+), 60 deletions(-) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index a3d0a0b7..824f0c76 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -11,6 +11,38 @@ NoValue = None +import functools + + +############# XXX +### From _util.axis_expand_func + + +def deco_axis_expand(func): + """Generically handle axis arguments in reductions.""" + @functools.wraps(func) + def wrapped(tensor, axis, *args, **kwds): + + if axis is not None: + if not isinstance(axis, (list, tuple)): + axis = (axis,) + axis = _util.normalize_axis_tuple(axis, tensor.ndim) + + if axis == (): + newshape = _util.expand_shape(tensor.shape, axis=0) + tensor = tensor.reshape(newshape) + axis = (0,) + + result = func(tensor, axis=axis, *args, **kwds) + return result + + return wrapped + + + +##################################3 + + def _atleast_float(dtype, other_dtype): """Return a dtype that is real or complex floating-point. @@ -34,6 +66,7 @@ def count_nonzero(a, axis=None): return tensor +@deco_axis_expand def argmax(tensor, axis=None): axis = _util.allow_only_single_axis(axis) @@ -45,6 +78,7 @@ def argmax(tensor, axis=None): return tensor +@deco_axis_expand def argmin(tensor, axis=None): axis = _util.allow_only_single_axis(axis) @@ -103,6 +137,7 @@ def ptp(tensor, axis=None): return result +@deco_axis_expand def sum(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): if initial is not NoValue or where is not NoValue: raise NotImplementedError @@ -120,6 +155,7 @@ def sum(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): return result +@deco_axis_expand def prod(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): if initial is not NoValue or where is not NoValue: raise NotImplementedError @@ -137,6 +173,7 @@ def prod(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): return result +@deco_axis_expand def mean(tensor, axis=None, dtype=None, *, where=NoValue): if where is not NoValue: raise NotImplementedError @@ -159,6 +196,7 @@ def mean(tensor, axis=None, dtype=None, *, where=NoValue): return result +@deco_axis_expand def std(tensor, axis=None, dtype=None, ddof=0, *, where=NoValue): if where is not NoValue: raise NotImplementedError @@ -170,6 +208,7 @@ def std(tensor, axis=None, dtype=None, ddof=0, *, where=NoValue): return result +@deco_axis_expand def var(tensor, axis=None, dtype=None, ddof=0, *, where=NoValue): if where is not NoValue: raise NotImplementedError diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 757951e9..67144da1 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -1,10 +1,11 @@ +import operator import typing from typing import Optional, Sequence import torch from . import _decorators, _helpers -from ._detail import _dtypes_impl, _flips, _util +from ._detail import _dtypes_impl, _flips, _util, _reductions from ._detail import implementations as _impl ################################## normalizations @@ -12,6 +13,7 @@ ArrayLike = typing.TypeVar("ArrayLike") DTypeLike = typing.TypeVar("DTypeLike") SubokLike = typing.TypeVar("SubokLike") +AxisLike = typing.TypeVar("AxisLike") # annotate e.g. atleast_1d(*arys) UnpackedSeqArrayLike = typing.TypeVar("UnpackedSeqArrayLike") @@ -52,6 +54,14 @@ def normalize_subok_like(arg, name): raise ValueError(f"'{name}' parameter is not supported.") +def normalize_axis_like(arg, name=None): + from ._ndarray import ndarray + + if isinstance(arg, ndarray): + arg = operator.index(arg) + return arg + + normalizers = { ArrayLike: normalize_array_like, Optional[ArrayLike]: normalize_optional_array_like, @@ -59,6 +69,7 @@ def normalize_subok_like(arg, name): UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize DTypeLike: normalize_dtype, SubokLike: normalize_subok_like, + AxisLike: normalize_axis_like, } import functools @@ -345,3 +356,83 @@ def round_(a: ArrayLike, decimals=0, out=None): around = round_ round = round_ + + +# ### reductions ### + + +NoValue = None # FIXME + +@normalizer +def sum(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): + result = _reductions.sum(a, axis=axis, dtype=dtype, initial=NoValue, where=NoValue) + + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + return _helpers.result_or_out(result, out) + + +@normalizer +def prod(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): + result = _reductions.prod(a, axis=axis, dtype=dtype, initial=NoValue, where=NoValue) + + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + return _helpers.result_or_out(result, out) + + +product = prod + + +@normalizer +def mean(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, *, where=NoValue): + result = _reductions.mean(a, axis=axis, dtype=dtype, where=NoValue) + + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + return _helpers.result_or_out(result, out) + + +@normalizer +def var(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): + result = _reductions.var(a, axis=axis, dtype=dtype, ddof=ddof, where=NoValue) + + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + return _helpers.result_or_out(result, out) + + +@normalizer +def std(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): + result = _reductions.std(a, axis=axis, dtype=dtype, ddof=ddof, where=NoValue) + + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + return _helpers.result_or_out(result, out) + + + +@normalizer +def argmin(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): + result = _reductions.argmin(a, axis=axis) + + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + return _helpers.result_or_out(result, out) + + +@normalizer +def argmax(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): + result = _reductions.argmax(a, axis=axis) + + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + return _helpers.result_or_out(result, out) + diff --git a/torch_np/_ndarray.py b/torch_np/_ndarray.py index f93b7dbc..4d9604cb 100644 --- a/torch_np/_ndarray.py +++ b/torch_np/_ndarray.py @@ -375,8 +375,11 @@ def sort(self, axis=-1, kind=None, order=None): ### reductions ### - argmin = emulate_out_arg(axis_keepdims_wrapper(_reductions.argmin)) - argmax = emulate_out_arg(axis_keepdims_wrapper(_reductions.argmax)) + ##argmin = emulate_out_arg(axis_keepdims_wrapper(_reductions.argmin)) + ##argmax = emulate_out_arg(axis_keepdims_wrapper(_reductions.argmax)) + + argmax = _funcs.argmax + argmin = _funcs.argmin any = emulate_out_arg(axis_keepdims_wrapper(_reductions.any)) all = emulate_out_arg(axis_keepdims_wrapper(_reductions.all)) @@ -384,11 +387,11 @@ def sort(self, axis=-1, kind=None, order=None): min = emulate_out_arg(axis_keepdims_wrapper(_reductions.min)) ptp = emulate_out_arg(axis_keepdims_wrapper(_reductions.ptp)) - sum = emulate_out_arg(axis_keepdims_wrapper(dtype_to_torch(_reductions.sum))) - prod = emulate_out_arg(axis_keepdims_wrapper(dtype_to_torch(_reductions.prod))) - mean = emulate_out_arg(axis_keepdims_wrapper(dtype_to_torch(_reductions.mean))) - var = emulate_out_arg(axis_keepdims_wrapper(dtype_to_torch(_reductions.var))) - std = emulate_out_arg(axis_keepdims_wrapper(dtype_to_torch(_reductions.std))) + sum = _funcs.sum + prod = _funcs.prod + mean = _funcs.mean + var = _funcs.var + std = _funcs.std cumprod = emulate_out_arg( axis_none_ravel_wrapper(dtype_to_torch(_reductions.cumprod)) diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 5c2bffb1..1ef16036 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -636,14 +636,6 @@ def tri(N, M=None, k=0, dtype: DTypeLike = float, *, like: SubokLike = None): ###### reductions -def argmax(a, axis=None, out=None, *, keepdims=NoValue): - arr = asarray(a) - return arr.argmax(axis=axis, out=out, keepdims=keepdims) - - -def argmin(a, axis=None, out=None, *, keepdims=NoValue): - arr = asarray(a) - return arr.argmin(axis=axis, out=out, keepdims=keepdims) def amax(a, axis=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): @@ -677,34 +669,6 @@ def any(a, axis=None, out=None, keepdims=NoValue, *, where=NoValue): return arr.any(axis=axis, out=out, keepdims=keepdims, where=where) -def mean(a, axis=None, dtype=None, out=None, keepdims=NoValue, *, where=NoValue): - arr = asarray(a) - return arr.mean(axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where) - - -# YYY: pattern: initial=... - - -def sum( - a, axis=None, dtype=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue -): - arr = asarray(a) - return arr.sum( - axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where - ) - - -def prod( - a, axis=None, dtype=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue -): - arr = asarray(a) - return arr.prod( - axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where - ) - - -product = prod - def cumprod(a, axis=None, dtype=None, out=None): arr = asarray(a) @@ -719,27 +683,12 @@ def cumsum(a, axis=None, dtype=None, out=None): return arr.cumsum(axis=axis, dtype=dtype, out=out) -# YYY: pattern : ddof - - -def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): - arr = asarray(a) - return arr.std( - axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, where=where - ) - - -def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): - arr = asarray(a) - return arr.var( - axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, where=where - ) def average(a, axis=None, weights=None, returned=False, *, keepdims=NoValue): if weights is None: - result = mean(a, axis=axis, keepdims=keepdims) + result = _funcs.mean(a, axis=axis, keepdims=keepdims) if returned: scl = result.dtype.type(a.size / result.size) return result, scl From ab85d7291ac85b53f966c23230384bdbb1150d20 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Tue, 7 Mar 2023 22:46:51 +0300 Subject: [PATCH 14/33] MAINT: emulate_keepdims via a decorator in _detail/reductions.py --- torch_np/_detail/_reductions.py | 25 +++++++++++++ torch_np/_funcs.py | 64 ++++++++++++++++++--------------- torch_np/_ndarray.py | 14 +++----- torch_np/_wrapper.py | 34 ------------------ 4 files changed, 65 insertions(+), 72 deletions(-) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index 824f0c76..f5d604dc 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -39,6 +39,14 @@ def wrapped(tensor, axis, *args, **kwds): return wrapped +def emulate_keepdims(func): + @functools.wraps(func) + def wrapped(tensor, axis=None, keepdims=NoValue, *args, **kwds): + result = func(tensor, axis=axis, *args, **kwds) + if keepdims: + result = _util.apply_keepdims(result, axis, tensor.ndim) + return result + return wrapped ##################################3 @@ -66,6 +74,7 @@ def count_nonzero(a, axis=None): return tensor +@emulate_keepdims @deco_axis_expand def argmax(tensor, axis=None): axis = _util.allow_only_single_axis(axis) @@ -78,6 +87,7 @@ def argmax(tensor, axis=None): return tensor +@emulate_keepdims @deco_axis_expand def argmin(tensor, axis=None): axis = _util.allow_only_single_axis(axis) @@ -90,6 +100,8 @@ def argmin(tensor, axis=None): return tensor +@emulate_keepdims +@deco_axis_expand def any(tensor, axis=None, *, where=NoValue): if where is not NoValue: raise NotImplementedError @@ -103,6 +115,8 @@ def any(tensor, axis=None, *, where=NoValue): return result +@emulate_keepdims +@deco_axis_expand def all(tensor, axis=None, *, where=NoValue): if where is not NoValue: raise NotImplementedError @@ -116,6 +130,8 @@ def all(tensor, axis=None, *, where=NoValue): return result +@emulate_keepdims +@deco_axis_expand def max(tensor, axis=None, initial=NoValue, where=NoValue): if initial is not NoValue or where is not NoValue: raise NotImplementedError @@ -124,6 +140,8 @@ def max(tensor, axis=None, initial=NoValue, where=NoValue): return result +@emulate_keepdims +@deco_axis_expand def min(tensor, axis=None, initial=NoValue, where=NoValue): if initial is not NoValue or where is not NoValue: raise NotImplementedError @@ -132,11 +150,14 @@ def min(tensor, axis=None, initial=NoValue, where=NoValue): return result +@emulate_keepdims +@deco_axis_expand def ptp(tensor, axis=None): result = tensor.amax(axis) - tensor.amin(axis) return result +@emulate_keepdims @deco_axis_expand def sum(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): if initial is not NoValue or where is not NoValue: @@ -155,6 +176,7 @@ def sum(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): return result +@emulate_keepdims @deco_axis_expand def prod(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): if initial is not NoValue or where is not NoValue: @@ -173,6 +195,7 @@ def prod(tensor, axis=None, dtype=None, initial=NoValue, where=NoValue): return result +@emulate_keepdims @deco_axis_expand def mean(tensor, axis=None, dtype=None, *, where=NoValue): if where is not NoValue: @@ -196,6 +219,7 @@ def mean(tensor, axis=None, dtype=None, *, where=NoValue): return result +@emulate_keepdims @deco_axis_expand def std(tensor, axis=None, dtype=None, ddof=0, *, where=NoValue): if where is not NoValue: @@ -208,6 +232,7 @@ def std(tensor, axis=None, dtype=None, ddof=0, *, where=NoValue): return result +@emulate_keepdims @deco_axis_expand def var(tensor, axis=None, dtype=None, ddof=0, *, where=NoValue): if where is not NoValue: diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 67144da1..de9d949c 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -365,21 +365,13 @@ def round_(a: ArrayLike, decimals=0, out=None): @normalizer def sum(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - result = _reductions.sum(a, axis=axis, dtype=dtype, initial=NoValue, where=NoValue) - - if keepdims: - result = _util.apply_keepdims(result, axis, a.ndim) - + result = _reductions.sum(a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer def prod(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - result = _reductions.prod(a, axis=axis, dtype=dtype, initial=NoValue, where=NoValue) - - if keepdims: - result = _util.apply_keepdims(result, axis, a.ndim) - + result = _reductions.prod(a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @@ -388,51 +380,65 @@ def prod(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, k @normalizer def mean(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, *, where=NoValue): - result = _reductions.mean(a, axis=axis, dtype=dtype, where=NoValue) + result = _reductions.mean(a, axis=axis, dtype=dtype, where=NoValue, keepdims=keepdims) + return _helpers.result_or_out(result, out) - if keepdims: - result = _util.apply_keepdims(result, axis, a.ndim) +@normalizer +def var(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): + result = _reductions.var(a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def var(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): - result = _reductions.var(a, axis=axis, dtype=dtype, ddof=ddof, where=NoValue) +def std(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): + result = _reductions.std(a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims) + return _helpers.result_or_out(result, out) - if keepdims: - result = _util.apply_keepdims(result, axis, a.ndim) +@normalizer +def argmin(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): + result = _reductions.argmin(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def std(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): - result = _reductions.std(a, axis=axis, dtype=dtype, ddof=ddof, where=NoValue) +def argmax(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): + result = _reductions.argmax(a, axis=axis, keepdims=keepdims) + return _helpers.result_or_out(result, out) - if keepdims: - result = _util.apply_keepdims(result, axis, a.ndim) +@normalizer +def amax(a : ArrayLike, axis : AxisLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): + result = _reductions.max(a, axis=axis, initial=initial, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) +max = amax + @normalizer -def argmin(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): - result = _reductions.argmin(a, axis=axis) +def amin(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): + result = _reductions.min(a, axis=axis, initial=initial, where=where, keepdims=keepdims) + return _helpers.result_or_out(result, out) + +min = amin - if keepdims: - result = _util.apply_keepdims(result, axis, a.ndim) +@normalizer +def ptp(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue): + result = _reductions.ptp(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def argmax(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): - result = _reductions.argmax(a, axis=axis) +def all(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue, *, where=NoValue): + result = _reductions.all(a, axis=axis, where=where, keepdims=keepdims) + return _helpers.result_or_out(result, out) - if keepdims: - result = _util.apply_keepdims(result, axis, a.ndim) +@normalizer +def any(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue, *, where=NoValue): + result = _reductions.any(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) diff --git a/torch_np/_ndarray.py b/torch_np/_ndarray.py index 4d9604cb..628febaa 100644 --- a/torch_np/_ndarray.py +++ b/torch_np/_ndarray.py @@ -374,18 +374,14 @@ def sort(self, axis=-1, kind=None, order=None): searchsorted = _funcs.searchsorted ### reductions ### - - ##argmin = emulate_out_arg(axis_keepdims_wrapper(_reductions.argmin)) - ##argmax = emulate_out_arg(axis_keepdims_wrapper(_reductions.argmax)) - argmax = _funcs.argmax argmin = _funcs.argmin - any = emulate_out_arg(axis_keepdims_wrapper(_reductions.any)) - all = emulate_out_arg(axis_keepdims_wrapper(_reductions.all)) - max = emulate_out_arg(axis_keepdims_wrapper(_reductions.max)) - min = emulate_out_arg(axis_keepdims_wrapper(_reductions.min)) - ptp = emulate_out_arg(axis_keepdims_wrapper(_reductions.ptp)) + any = _funcs.any + all = _funcs.all + max = _funcs.max + min = _funcs.min + ptp = _funcs.ptp sum = _funcs.sum prod = _funcs.prod diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 1ef16036..9b6c1535 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -638,38 +638,6 @@ def tri(N, M=None, k=0, dtype: DTypeLike = float, *, like: SubokLike = None): ###### reductions -def amax(a, axis=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - arr = asarray(a) - return arr.max(axis=axis, out=out, keepdims=keepdims, initial=initial, where=where) - - -max = amax - - -def amin(a, axis=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - arr = asarray(a) - return arr.min(axis=axis, out=out, keepdims=keepdims, initial=initial, where=where) - - -min = amin - - -def ptp(a, axis=None, out=None, keepdims=NoValue): - arr = asarray(a) - return arr.ptp(axis=axis, out=out, keepdims=keepdims) - - -def all(a, axis=None, out=None, keepdims=NoValue, *, where=NoValue): - arr = asarray(a) - return arr.all(axis=axis, out=out, keepdims=keepdims, where=where) - - -def any(a, axis=None, out=None, keepdims=NoValue, *, where=NoValue): - arr = asarray(a) - return arr.any(axis=axis, out=out, keepdims=keepdims, where=where) - - - def cumprod(a, axis=None, dtype=None, out=None): arr = asarray(a) return arr.cumprod(axis=axis, dtype=dtype, out=out) @@ -683,8 +651,6 @@ def cumsum(a, axis=None, dtype=None, out=None): return arr.cumsum(axis=axis, dtype=dtype, out=out) - - def average(a, axis=None, weights=None, returned=False, *, keepdims=NoValue): if weights is None: From 93acb7abf98b26e04b40b5f0322c704adfe0e4f7 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Wed, 8 Mar 2023 12:26:52 +0300 Subject: [PATCH 15/33] MAINT: count_nonzero --- torch_np/_detail/_reductions.py | 2 ++ torch_np/_funcs.py | 6 ++++++ torch_np/_wrapper.py | 6 ------ 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index f5d604dc..b90c4bef 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -65,6 +65,8 @@ def _atleast_float(dtype, other_dtype): return dtype +@emulate_keepdims +@deco_axis_expand def count_nonzero(a, axis=None): # XXX: this all should probably be generalized to a sum(a != 0, dtype=bool) try: diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index de9d949c..639b0d7f 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -442,3 +442,9 @@ def any(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue, *, where= result = _reductions.any(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) + +@normalizer +def count_nonzero(a: ArrayLike, axis: AxisLike=None, *, keepdims=False): + result = _reductions.count_nonzero(a, axis=axis, keepdims=keepdims) + return _helpers.array_from(result) + diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 9b6c1535..ec528576 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -582,12 +582,6 @@ def flatnonzero(a): return _funcs.nonzero(arr.ravel())[0] -from ._decorators import emulate_out_arg -from ._ndarray import axis_keepdims_wrapper - -count_nonzero = emulate_out_arg(axis_keepdims_wrapper(_reductions.count_nonzero)) - - @normalizer def roll(a: ArrayLike, shift, axis=None): result = _impl.roll(a, shift, axis) From 47d8a1e592f1c0c898ba98d95f21637168cbd462 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Wed, 8 Mar 2023 18:46:22 +0300 Subject: [PATCH 16/33] MAINT: cumsum/cumprod --- torch_np/_detail/_reductions.py | 18 ++++++++++++++++++ torch_np/_detail/_util.py | 2 +- torch_np/_funcs.py | 11 +++++++++++ torch_np/_ndarray.py | 15 ++------------- 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index b90c4bef..8ec7a592 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -48,6 +48,22 @@ def wrapped(tensor, axis=None, keepdims=NoValue, *args, **kwds): return result return wrapped + +def deco_axis_ravel(func): + """Generically handle 'axis=None ravels' behavior.""" + @functools.wraps(func) + def wrapped(tensor, axis, *args, **kwds): + if axis is not None: + axis = _util.normalize_axis_index(axis, tensor.ndim) + + tensors, axis = _util.axis_none_ravel(tensor, axis=axis) # XXX: inline + tensor = tensors[0] + + result = func(tensor, axis=axis, *args, **kwds) + return result + return wrapped + + ##################################3 @@ -252,6 +268,7 @@ def var(tensor, axis=None, dtype=None, ddof=0, *, where=NoValue): # 2. axis=None ravels (cf concatenate) +@deco_axis_ravel def cumprod(tensor, axis, dtype=None): if dtype == torch.bool: dtype = _dtypes_impl.default_int_dtype @@ -263,6 +280,7 @@ def cumprod(tensor, axis, dtype=None): return result +@deco_axis_ravel def cumsum(tensor, axis, dtype=None): if dtype == torch.bool: dtype = _dtypes_impl.default_int_dtype diff --git a/torch_np/_detail/_util.py b/torch_np/_detail/_util.py index 550f5492..47d4ae40 100644 --- a/torch_np/_detail/_util.py +++ b/torch_np/_detail/_util.py @@ -128,7 +128,7 @@ def apply_keepdims(tensor, axis, ndim): def axis_none_ravel(*tensors, axis=None): """Ravel the arrays if axis is none.""" - # XXX: is only used at `concatenate`. Inline unless reused more widely + # XXX: is only used at `concatenate` and cumsum/cumprod. Inline unless reused more widely if axis is None: tensors = tuple(ar.ravel() for ar in tensors) return tensors, 0 diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 639b0d7f..f59de100 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -448,3 +448,14 @@ def count_nonzero(a: ArrayLike, axis: AxisLike=None, *, keepdims=False): result = _reductions.count_nonzero(a, axis=axis, keepdims=keepdims) return _helpers.array_from(result) + +@normalizer +def cumsum(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=None): + result = _reductions.cumsum(a, axis=axis, dtype=dtype) + return _helpers.result_or_out(result, out) + + +@normalizer +def cumprod(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=None): + result = _reductions.cumprod(a, axis=axis, dtype=dtype) + return _helpers.result_or_out(result, out) diff --git a/torch_np/_ndarray.py b/torch_np/_ndarray.py index 628febaa..3c6f8000 100644 --- a/torch_np/_ndarray.py +++ b/torch_np/_ndarray.py @@ -3,13 +3,6 @@ import torch from . import _binary_ufuncs, _dtypes, _funcs, _helpers, _unary_ufuncs -from ._decorators import ( - NoValue, - axis_keepdims_wrapper, - axis_none_ravel_wrapper, - dtype_to_torch, - emulate_out_arg, -) from ._detail import _dtypes_impl, _flips, _reductions, _util from ._detail import implementations as _impl @@ -389,12 +382,8 @@ def sort(self, axis=-1, kind=None, order=None): var = _funcs.var std = _funcs.std - cumprod = emulate_out_arg( - axis_none_ravel_wrapper(dtype_to_torch(_reductions.cumprod)) - ) - cumsum = emulate_out_arg( - axis_none_ravel_wrapper(dtype_to_torch(_reductions.cumsum)) - ) + cumsum = _funcs.cumsum + cumprod = _funcs.cumprod ### indexing ### @staticmethod From eb7c4c63fd3c8bdcd6f3cb54a51e909e28590642 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 9 Mar 2023 00:13:32 +0300 Subject: [PATCH 17/33] MAINT: quantile/percentile/median Deviate from the standard argument handling: - percentile and median delegate to quantile at the wrapper level; this is just less code - keepdims=True handling is inline in _reductions.py::quantile, not in a decorator. The standard decorator expects the axis as the second argument, while here it is the third one. Can be fixed, but seems to be more hassle then worth TBH. --- torch_np/_detail/_reductions.py | 11 ++++++++++- torch_np/_funcs.py | 19 +++++++++++++++++++ torch_np/_wrapper.py | 27 +++------------------------ 3 files changed, 32 insertions(+), 25 deletions(-) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index 8ec7a592..be69d2d1 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -337,7 +337,7 @@ def average(a_tensor, axis, w_tensor): return result, denominator -def quantile(a_tensor, q_tensor, axis, method): +def quantile(a_tensor, q_tensor, axis, method, keepdims=False): if (0 > q_tensor).any() or (q_tensor > 1).any(): raise ValueError("Quantiles must be in range [0, 1], got %s" % q_tensor) @@ -350,6 +350,7 @@ def quantile(a_tensor, q_tensor, axis, method): if a_tensor.dtype == torch.float16: a_tensor = a_tensor.to(torch.float32) + # TODO: consider moving this normalize_axis_tuple dance to normalize axis? Across the board if at all. # axis if axis is not None: axis = _util.normalize_axis_tuple(axis, a_tensor.ndim) @@ -357,8 +358,16 @@ def quantile(a_tensor, q_tensor, axis, method): q_tensor = _util.cast_if_needed(q_tensor, a_tensor.dtype) + + # axis=None ravels, so store the originals to reuse with keepdims=True below + ax, ndim = axis, a_tensor.ndim (a_tensor, q_tensor), axis = _util.axis_none_ravel(a_tensor, q_tensor, axis=axis) result = torch.quantile(a_tensor, q_tensor, axis=axis, interpolation=method) + # NB: not using @emulate_keepdims here because the signature is (a, q, axis, ...) + # while the decorator expects (a, axis, ...) + # this can be fixed, of course, but the cure seems worse then the desease + if keepdims: + result = _util.apply_keepdims(result, ax, ndim) return result diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index f59de100..05fc9ca3 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -459,3 +459,22 @@ def cumsum(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=Non def cumprod(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=None): result = _reductions.cumprod(a, axis=axis, dtype=dtype) return _helpers.result_or_out(result, out) + + +@normalizer +def quantile( + a : ArrayLike, + q : ArrayLike, + axis: AxisLike=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + interpolation=None, +): + if interpolation is not None: + raise ValueError("'interpolation' argument is deprecated; use 'method' instead") + + result = _reductions.quantile(a, q, axis, method=method, keepdims=keepdims) + return _helpers.result_or_out(result, out, promote_scalar=True) diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index ec528576..0e635234 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -674,6 +674,7 @@ def average(a, axis=None, weights=None, returned=False, *, keepdims=NoValue): return _helpers.array_from(result) +# Normalizations (ArrayLike et al) are done in quantile. def percentile( a, q, @@ -685,36 +686,14 @@ def percentile( *, interpolation=None, ): - return quantile( + return _funcs.quantile( a, asarray(q) / 100.0, axis, out, overwrite_input, method, keepdims=keepdims ) -def quantile( - a, - q, - axis=None, - out=None, - overwrite_input=False, - method="linear", - keepdims=False, - *, - interpolation=None, -): - if interpolation is not None: - raise ValueError("'interpolation' argument is deprecated; use 'method' instead") - - a_tensor, q_tensor = _helpers.to_tensors(a, q) - result = _reductions.quantile(a_tensor, q_tensor, axis, method) - - # keepdims - if keepdims: - result = _util.apply_keepdims(result, axis, a_tensor.ndim) - return _helpers.result_or_out(result, out, promote_scalar=True) - def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): - return quantile( + return _funcs.quantile( a, 0.5, axis=axis, overwrite_input=overwrite_input, out=out, keepdims=keepdims ) From a20320b6282f185fba2e794b7d41dbf06f2f81dd Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 9 Mar 2023 02:08:19 +0300 Subject: [PATCH 18/33] MAINT: simplify/normalize average --- torch_np/_detail/_reductions.py | 28 +++++++++++++++++++- torch_np/_funcs.py | 3 +++ torch_np/_wrapper.py | 46 +++++---------------------------- 3 files changed, 37 insertions(+), 40 deletions(-) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index be69d2d1..a005fe74 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -292,7 +292,26 @@ def cumsum(tensor, axis, dtype=None): return result -def average(a_tensor, axis, w_tensor): + +def average(a, axis, weights, returned=False, keepdims=False): + if weights is None: + result, wsum = average_noweights(a, axis, keepdims=keepdims) + else: + result, wsum = average_weights(a, axis, weights, keepdims=keepdims) + + if returned: + if wsum.shape != result.shape: + wsum = torch.broadcast_to(wsum, result.shape).clone() + return result, wsum + + +def average_noweights(a_tensor, axis, keepdims=False): + result = mean(a_tensor, axis=axis, keepdims=keepdims) + scl = torch.as_tensor(a_tensor.numel() / result.numel(), dtype=result.dtype) + return result, scl + + +def average_weights(a_tensor, axis, w_tensor, keepdims=False): # dtype # FIXME: 1. use result_type @@ -306,6 +325,9 @@ def average(a_tensor, axis, w_tensor): a_tensor = _util.cast_if_needed(a_tensor, result_dtype) w_tensor = _util.cast_if_needed(w_tensor, result_dtype) + # axis=None ravels, so store the originals to reuse with keepdims=True below + ax, ndim = axis, a_tensor.ndim + # axis if axis is None: (a_tensor, w_tensor), axis = _util.axis_none_ravel( @@ -334,6 +356,10 @@ def average(a_tensor, axis, w_tensor): denominator = w_tensor.sum(axis) result = numerator / denominator + # keepdims + if keepdims: + result = _util.apply_keepdims(result, ax, ndim) + return result, denominator diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 05fc9ca3..b8a98d53 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -461,6 +461,9 @@ def cumprod(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=No return _helpers.result_or_out(result, out) +cumproduct = cumprod + + @normalizer def quantile( a : ArrayLike, diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 0e635234..15fcccc0 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -631,50 +631,16 @@ def tri(N, M=None, k=0, dtype: DTypeLike = float, *, like: SubokLike = None): ###### reductions - -def cumprod(a, axis=None, dtype=None, out=None): - arr = asarray(a) - return arr.cumprod(axis=axis, dtype=dtype, out=out) - - -cumproduct = cumprod - - -def cumsum(a, axis=None, dtype=None, out=None): - arr = asarray(a) - return arr.cumsum(axis=axis, dtype=dtype, out=out) - - -def average(a, axis=None, weights=None, returned=False, *, keepdims=NoValue): - - if weights is None: - result = _funcs.mean(a, axis=axis, keepdims=keepdims) - if returned: - scl = result.dtype.type(a.size / result.size) - return result, scl - return result - - a_tensor, w_tensor = _helpers.to_tensors(a, weights) - - result, wsum = _reductions.average(a_tensor, axis, w_tensor) - - # keepdims - if keepdims: - result = _util.apply_keepdims(result, axis, a_tensor.ndim) - - # returned +@normalizer +def average(a: ArrayLike, axis=None, weights: ArrayLike=None, returned=False, *, keepdims=NoValue): + result, wsum = _reductions.average(a, axis, weights, returned=returned, keepdims=keepdims) if returned: - scl = wsum - if scl.shape != result.shape: - scl = torch.broadcast_to(scl, result.shape).clone() - - return _helpers.array_from(result), _helpers.array_from(scl) - + return _helpers.tuple_arrays_from((result, wsum)) else: return _helpers.array_from(result) -# Normalizations (ArrayLike et al) are done in quantile. +# Normalizations (ArrayLike et al) in percentile and median are done in `_funcs.py/quantile`. def percentile( a, q, @@ -710,6 +676,8 @@ def outer(a: ArrayLike, b: ArrayLike, out=None): return _helpers.result_or_out(result, out) +# ### FIXME: this is a stub + @normalizer def nanmean( a: ArrayLike, From 7b447af6313ad38f978d4572dda26e76b2fea097 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 9 Mar 2023 02:21:55 +0300 Subject: [PATCH 19/33] MAINT: normalize array-like arg of full(), a few others --- torch_np/_wrapper.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 15fcccc0..409b5193 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -307,13 +307,12 @@ def empty_like( @normalizer def full( - shape, fill_value, dtype: DTypeLike = None, order="C", *, like: SubokLike = None + shape, fill_value: ArrayLike, dtype: DTypeLike = None, order="C", *, like: SubokLike = None ): if isinstance(shape, int): shape = (shape,) if order != "C": raise NotImplementedError - fill_value = asarray(fill_value).get() result = _impl.full(shape, fill_value, dtype=dtype) return _helpers.array_from(result) @@ -471,18 +470,18 @@ def where( ###### module-level queries of object properties -def ndim(a): - a = asarray(a).get() +@normalizer +def ndim(a : ArrayLike): return a.ndim -def shape(a): - a = asarray(a).get() +@normalizer +def shape(a: ArrayLike): return tuple(a.shape) -def size(a, axis=None): - a = asarray(a).get() +@normalizer +def size(a: ArrayLike, axis=None): if axis is None: return a.numel() else: From b1ca69a13039bac5b29bfd86dc0e23b7b8f1c4b9 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 9 Mar 2023 02:29:04 +0300 Subject: [PATCH 20/33] MAINT: rm dead code from decorators.py --- torch_np/_decorators.py | 55 ----------------------------------------- 1 file changed, 55 deletions(-) diff --git a/torch_np/_decorators.py b/torch_np/_decorators.py index 37c98bf6..2611b43f 100644 --- a/torch_np/_decorators.py +++ b/torch_np/_decorators.py @@ -82,58 +82,3 @@ def wrapped(x1, x2, *args, **kwds): return wrapped - -def axis_keepdims_wrapper(func): - """`func` accepts an array-like as a 1st arg, returns a tensor. - - This decorator implements the generic handling of axis, out and keepdims - arguments for reduction functions. - - Note that we peel off `out=...` and `keepdims=...` args (torch functions never - see them). The `axis` argument we normalize and pass through to pytorch functions. - - """ - # TODO: sort out function signatures: how they flow through all decorators etc - @functools.wraps(func) - def wrapped(a, axis=None, keepdims=NoValue, *args, **kwds): - from ._ndarray import asarray, ndarray - - tensor = asarray(a).get() - - # standardize the axis argument - if isinstance(axis, ndarray): - axis = operator.index(axis) - - result = _util.axis_expand_func(func, tensor, axis, *args, **kwds) - - if keepdims: - result = _util.apply_keepdims(result, axis, tensor.ndim) - - return result - - return wrapped - - -def axis_none_ravel_wrapper(func): - """`func` accepts an array-like as a 1st arg, returns a tensor. - - This decorator implements the generic handling of axis=None acting on a - raveled array. One use is cumprod / cumsum. concatenate also uses a - similar logic. - - """ - - @functools.wraps(func) - def wrapped(a, axis=None, *args, **kwds): - from ._ndarray import asarray, ndarray - - tensor = asarray(a).get() - - # standardize the axis argument - if isinstance(axis, ndarray): - axis = operator.index(axis) - - result = _util.axis_ravel_func(func, tensor, axis, *args, **kwds) - return result - - return wrapped From 0bfddd0a203e4802f761a4fa27cc8210a6c716cc Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 9 Mar 2023 23:31:31 +0300 Subject: [PATCH 21/33] MAINT: rework unary and binary ufuncs w/ normalizations --- torch_np/_binary_ufuncs.py | 104 ++++++++++--------- torch_np/_decorators.py | 54 ---------- torch_np/_detail/_binary_ufuncs.py | 30 ++++++ torch_np/_detail/_ufunc_impl.py | 158 ----------------------------- torch_np/_detail/_unary_ufuncs.py | 35 +++++++ torch_np/_funcs.py | 5 +- torch_np/_unary_ufuncs.py | 153 +++++++++++----------------- torch_np/tests/test_basic.py | 5 +- 8 files changed, 185 insertions(+), 359 deletions(-) create mode 100644 torch_np/_detail/_binary_ufuncs.py delete mode 100644 torch_np/_detail/_ufunc_impl.py create mode 100644 torch_np/_detail/_unary_ufuncs.py diff --git a/torch_np/_binary_ufuncs.py b/torch_np/_binary_ufuncs.py index 719771c5..4a93a95f 100644 --- a/torch_np/_binary_ufuncs.py +++ b/torch_np/_binary_ufuncs.py @@ -1,52 +1,60 @@ -from ._decorators import deco_binary_ufunc_from_impl -from ._detail import _ufunc_impl +from ._detail import _binary_ufuncs + +__all__ = [name for name in dir(_binary_ufuncs) if not name.startswith("_") and name != "torch"] + + +# TODO: consolidate normalizations +from ._funcs import normalizer, ArrayLike, SubokLike, DTypeLike +from ._detail import _util +from . import _helpers + + +def deco_binary_ufunc(torch_func): + """Common infra for unary ufuncs. + + Normalize arguments, sort out type casting, broadcasting and delegate to + the pytorch functions for the actual work. + """ + def wrapped( + x1 : ArrayLike, + x2 : ArrayLike, + /, + out=None, + *, + where=True, + casting="same_kind", + order="K", + dtype: DTypeLike=None, + subok: SubokLike=False, + signature=None, + extobj=None + ): + if order != "K" or not where or signature or extobj: + raise NotImplementedError + + # XXX: dtype=... parameter + if dtype is not None: + raise NotImplementedError + + out_shape_dtype = None + if out is not None: + out_shape_dtype = (out.get().dtype, out.get().shape) + + tensors = _util.cast_and_broadcast((x1, x2), out_shape_dtype, casting) + + result = torch_func(*tensors) + return _helpers.result_or_out(result, out) + + return wrapped # -# Functions in this file implement binary ufuncs: wrap two first arguments in -# asarray and delegate to functions from _ufunc_impl. -# -# Functions in _detail/_ufunc_impl.py receive tensors, implement common tasks -# with ufunc args, and delegate heavy lifting to pytorch equivalents. +# For each torch ufunc implementation, decorate and attach the decorated name +# to this module. Its contents is then exported to the public namespace in __init__.py # +for name in __all__: + ufunc = getattr(_binary_ufuncs, name) + decorated = normalizer(deco_binary_ufunc(ufunc)) -# the list is autogenerated, cf autogen/gen_ufunc_2.py -add = deco_binary_ufunc_from_impl(_ufunc_impl.add) -arctan2 = deco_binary_ufunc_from_impl(_ufunc_impl.arctan2) -bitwise_and = deco_binary_ufunc_from_impl(_ufunc_impl.bitwise_and) -bitwise_or = deco_binary_ufunc_from_impl(_ufunc_impl.bitwise_or) -bitwise_xor = deco_binary_ufunc_from_impl(_ufunc_impl.bitwise_xor) -copysign = deco_binary_ufunc_from_impl(_ufunc_impl.copysign) -divide = deco_binary_ufunc_from_impl(_ufunc_impl.divide) -equal = deco_binary_ufunc_from_impl(_ufunc_impl.equal) -float_power = deco_binary_ufunc_from_impl(_ufunc_impl.float_power) -floor_divide = deco_binary_ufunc_from_impl(_ufunc_impl.floor_divide) -fmax = deco_binary_ufunc_from_impl(_ufunc_impl.fmax) -fmin = deco_binary_ufunc_from_impl(_ufunc_impl.fmin) -fmod = deco_binary_ufunc_from_impl(_ufunc_impl.fmod) -gcd = deco_binary_ufunc_from_impl(_ufunc_impl.gcd) -greater = deco_binary_ufunc_from_impl(_ufunc_impl.greater) -greater_equal = deco_binary_ufunc_from_impl(_ufunc_impl.greater_equal) -heaviside = deco_binary_ufunc_from_impl(_ufunc_impl.heaviside) -hypot = deco_binary_ufunc_from_impl(_ufunc_impl.hypot) -lcm = deco_binary_ufunc_from_impl(_ufunc_impl.lcm) -ldexp = deco_binary_ufunc_from_impl(_ufunc_impl.ldexp) -left_shift = deco_binary_ufunc_from_impl(_ufunc_impl.left_shift) -less = deco_binary_ufunc_from_impl(_ufunc_impl.less) -less_equal = deco_binary_ufunc_from_impl(_ufunc_impl.less_equal) -logaddexp = deco_binary_ufunc_from_impl(_ufunc_impl.logaddexp) -logaddexp2 = deco_binary_ufunc_from_impl(_ufunc_impl.logaddexp2) -logical_and = deco_binary_ufunc_from_impl(_ufunc_impl.logical_and) -logical_or = deco_binary_ufunc_from_impl(_ufunc_impl.logical_or) -logical_xor = deco_binary_ufunc_from_impl(_ufunc_impl.logical_xor) -matmul = deco_binary_ufunc_from_impl(_ufunc_impl.matmul) -maximum = deco_binary_ufunc_from_impl(_ufunc_impl.maximum) -minimum = deco_binary_ufunc_from_impl(_ufunc_impl.minimum) -remainder = deco_binary_ufunc_from_impl(_ufunc_impl.remainder) -multiply = deco_binary_ufunc_from_impl(_ufunc_impl.multiply) -nextafter = deco_binary_ufunc_from_impl(_ufunc_impl.nextafter) -not_equal = deco_binary_ufunc_from_impl(_ufunc_impl.not_equal) -power = deco_binary_ufunc_from_impl(_ufunc_impl.power) -remainder = deco_binary_ufunc_from_impl(_ufunc_impl.remainder) -right_shift = deco_binary_ufunc_from_impl(_ufunc_impl.right_shift) -subtract = deco_binary_ufunc_from_impl(_ufunc_impl.subtract) -divide = deco_binary_ufunc_from_impl(_ufunc_impl.divide) + decorated.__qualname__ = name # XXX: is this really correct? + decorated.__name__ = name + vars()[name] = decorated diff --git a/torch_np/_decorators.py b/torch_np/_decorators.py index 2611b43f..9065ff3c 100644 --- a/torch_np/_decorators.py +++ b/torch_np/_decorators.py @@ -9,31 +9,6 @@ NoValue = None -def dtype_to_torch(func): - @functools.wraps(func) - def wrapped(*args, dtype=None, **kwds): - torch_dtype = None - if dtype is not None: - dtype = _dtypes.dtype(dtype) - torch_dtype = dtype.torch_dtype - return func(*args, dtype=torch_dtype, **kwds) - - return wrapped - - -def emulate_out_arg(func): - """Simulate the out=... handling: move the result tensor to the out array. - - With this decorator, the inner function just does not see the out array. - """ - - @functools.wraps(func) - def wrapped(*args, out=None, **kwds): - result_tensor = func(*args, **kwds) - return _helpers.result_or_out(result_tensor, out) - - return wrapped - def out_shape_dtype(func): """Handle out=... kwarg for ufuncs. @@ -53,32 +28,3 @@ def wrapped(*args, out=None, **kwds): return wrapped -def deco_unary_ufunc_from_impl(impl_func): - @functools.wraps(impl_func) - @dtype_to_torch - @out_shape_dtype - def wrapped(x1, *args, **kwds): - from ._ndarray import asarray - - x1_tensor = asarray(x1).get() - result = impl_func((x1_tensor,), *args, **kwds) - return result - - return wrapped - - -# TODO: deduplicate with _ndarray/asarray_replacer, -# and _wrapper/concatenate et al -def deco_binary_ufunc_from_impl(impl_func): - @functools.wraps(impl_func) - @dtype_to_torch - @out_shape_dtype - def wrapped(x1, x2, *args, **kwds): - from ._ndarray import asarray - - x1_tensor = asarray(x1).get() - x2_tensor = asarray(x2).get() - return impl_func((x1_tensor, x2_tensor), *args, **kwds) - - return wrapped - diff --git a/torch_np/_detail/_binary_ufuncs.py b/torch_np/_detail/_binary_ufuncs.py new file mode 100644 index 00000000..d531bc02 --- /dev/null +++ b/torch_np/_detail/_binary_ufuncs.py @@ -0,0 +1,30 @@ +"""Export torch work functions for binary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `torch_np/_binary_ufuncs.py` module. +""" + +import torch + +from . import _dtypes_impl, _util + + +from torch import (add, arctan2, bitwise_and, bitwise_or, bitwise_xor, copysign, divide, + float_power, floor_divide, fmax, fmin, fmod, gcd, greater, greater_equal, heaviside, + hypot, lcm, ldexp, less, less_equal, logaddexp, logaddexp2, logical_and, + logical_or, logical_xor, maximum, minimum, remainder, multiply, nextafter, not_equal, + remainder, subtract, divide) + + +# renames +from torch import (eq as equal, pow as power, bitwise_right_shift as right_shift, + bitwise_left_shift as left_shift,) + + +# work around torch limitations w.r.t. numpy +def matmul(x, y): + # work around RuntimeError: expected scalar type Int but found Double + dtype = _dtypes_impl.result_type_impl((x.dtype, y.dtype)) + x = _util.cast_if_needed(x, dtype) + y = _util.cast_if_needed(y, dtype) + result = torch.matmul(x, y) + return result + diff --git a/torch_np/_detail/_ufunc_impl.py b/torch_np/_detail/_ufunc_impl.py deleted file mode 100644 index 2cd1ecee..00000000 --- a/torch_np/_detail/_ufunc_impl.py +++ /dev/null @@ -1,158 +0,0 @@ -import torch - -from . import _dtypes_impl, _util - - -def deco_ufunc(torch_func): - """Common infra for binary ufuncs: receive tensors, sort out type casting, - broadcasting, and delegate to the pytorch function for actual work. - - - Converting array-likes into arrays, unwrapping them into tensors etc - is the caller responsibility. - """ - - def wrapped( - tensors, - /, - out_shape_dtype=None, - *, - where=True, - casting="same_kind", - order="K", - dtype=None, - subok=False, - **kwds, - ): - _util.subok_not_ok(subok=subok) - if order != "K" or not where: - raise NotImplementedError - - # XXX: dtype=... parameter - if dtype is not None: - raise NotImplementedError - - tensors = _util.cast_and_broadcast(tensors, out_shape_dtype, casting) - - result = torch_func(*tensors) - return result - - return wrapped - - -# binary ufuncs: the list is autogenerated, cf autogen/gen_ufunc_2.py -# And edited manually! np.equal <--> torch.eq, not torch.equal -add = deco_ufunc(torch.add) -arctan2 = deco_ufunc(torch.arctan2) -bitwise_and = deco_ufunc(torch.bitwise_and) -bitwise_or = deco_ufunc(torch.bitwise_or) -bitwise_xor = deco_ufunc(torch.bitwise_xor) -copysign = deco_ufunc(torch.copysign) -divide = deco_ufunc(torch.divide) -equal = deco_ufunc(torch.eq) -float_power = deco_ufunc(torch.float_power) -floor_divide = deco_ufunc(torch.floor_divide) -fmax = deco_ufunc(torch.fmax) -fmin = deco_ufunc(torch.fmin) -fmod = deco_ufunc(torch.fmod) -gcd = deco_ufunc(torch.gcd) -greater = deco_ufunc(torch.greater) -greater_equal = deco_ufunc(torch.greater_equal) -heaviside = deco_ufunc(torch.heaviside) -hypot = deco_ufunc(torch.hypot) -lcm = deco_ufunc(torch.lcm) -ldexp = deco_ufunc(torch.ldexp) -left_shift = deco_ufunc(torch.bitwise_left_shift) -less = deco_ufunc(torch.less) -less_equal = deco_ufunc(torch.less_equal) -logaddexp = deco_ufunc(torch.logaddexp) -logaddexp2 = deco_ufunc(torch.logaddexp2) -logical_and = deco_ufunc(torch.logical_and) -logical_or = deco_ufunc(torch.logical_or) -logical_xor = deco_ufunc(torch.logical_xor) -maximum = deco_ufunc(torch.maximum) -minimum = deco_ufunc(torch.minimum) -remainder = deco_ufunc(torch.remainder) -multiply = deco_ufunc(torch.multiply) -nextafter = deco_ufunc(torch.nextafter) -not_equal = deco_ufunc(torch.not_equal) -power = deco_ufunc(torch.pow) -remainder = deco_ufunc(torch.remainder) -right_shift = deco_ufunc(torch.bitwise_right_shift) -subtract = deco_ufunc(torch.subtract) -divide = deco_ufunc(torch.divide) - - -# unary ufuncs: the list is autogenerated, cf autogen/gen_ufunc_2.py -arccos = deco_ufunc(torch.arccos) -arccosh = deco_ufunc(torch.arccosh) -arcsin = deco_ufunc(torch.arcsin) -arcsinh = deco_ufunc(torch.arcsinh) -arctan = deco_ufunc(torch.arctan) -arctanh = deco_ufunc(torch.arctanh) -ceil = deco_ufunc(torch.ceil) -conjugate = deco_ufunc(torch.conj_physical) -# conjugate = deco_ufunc(torch.conj_physical) -cos = deco_ufunc(torch.cos) -cosh = deco_ufunc(torch.cosh) -deg2rad = deco_ufunc(torch.deg2rad) -degrees = deco_ufunc(torch.rad2deg) -exp = deco_ufunc(torch.exp) -exp2 = deco_ufunc(torch.exp2) -expm1 = deco_ufunc(torch.expm1) -fabs = deco_ufunc(torch.absolute) -floor = deco_ufunc(torch.floor) -isfinite = deco_ufunc(torch.isfinite) -isinf = deco_ufunc(torch.isinf) -isnan = deco_ufunc(torch.isnan) -log = deco_ufunc(torch.log) -log10 = deco_ufunc(torch.log10) -log1p = deco_ufunc(torch.log1p) -log2 = deco_ufunc(torch.log2) -logical_not = deco_ufunc(torch.logical_not) -negative = deco_ufunc(torch.negative) -rad2deg = deco_ufunc(torch.rad2deg) -radians = deco_ufunc(torch.deg2rad) -reciprocal = deco_ufunc(torch.reciprocal) -rint = deco_ufunc(torch.round) -sign = deco_ufunc(torch.sign) -signbit = deco_ufunc(torch.signbit) -sin = deco_ufunc(torch.sin) -sinh = deco_ufunc(torch.sinh) -sqrt = deco_ufunc(torch.sqrt) -square = deco_ufunc(torch.square) -tan = deco_ufunc(torch.tan) -tanh = deco_ufunc(torch.tanh) -trunc = deco_ufunc(torch.trunc) - -invert = deco_ufunc(torch.bitwise_not) - -# special cases: torch does not export these names -def _cbrt(x): - return torch.pow(x, 1 / 3) - - -def _positive(x): - return +x - - -def _absolute(x): - # work around torch.absolute not impl for bools - if x.dtype == torch.bool: - return x - return torch.absolute(x) - - -def _matmul(x, y): - # work around RuntimeError: expected scalar type Int but found Double - dtype = _dtypes_impl.result_type_impl((x.dtype, y.dtype)) - x = _util.cast_if_needed(x, dtype) - y = _util.cast_if_needed(y, dtype) - result = torch.matmul(x, y) - return result - - -cbrt = deco_ufunc(_cbrt) -positive = deco_ufunc(_positive) -absolute = deco_ufunc(_absolute) -matmul = deco_ufunc(_matmul) diff --git a/torch_np/_detail/_unary_ufuncs.py b/torch_np/_detail/_unary_ufuncs.py new file mode 100644 index 00000000..979b6d09 --- /dev/null +++ b/torch_np/_detail/_unary_ufuncs.py @@ -0,0 +1,35 @@ +"""Export torch work functions for unary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `torch_np/_unary_ufuncs.py` module. +""" + +import torch + +from torch import (arccos, arccosh, arcsin, arcsinh, arctan, arctanh, ceil, + cos, cosh, deg2rad, exp, exp2, expm1, + floor, isfinite, isinf, isnan, log, log10, log1p, log2, logical_not, + negative, rad2deg, reciprocal, sign, signbit, + sin, sinh, sqrt, square, tan, tanh, trunc) + +# renames +from torch import (conj_physical as conjugate, round as rint, bitwise_not as invert, rad2deg as degrees, + deg2rad as radians, absolute as fabs, ) + +# special cases: torch does not export these names +def cbrt(x): + return torch.pow(x, 1 / 3) + + +def positive(x): + return +x + + +def absolute(x): + # work around torch.absolute not impl for bools + if x.dtype == torch.bool: + return x + return torch.absolute(x) + + +abs = absolute +conj = conjugate + diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index b8a98d53..9101fbf9 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -139,9 +139,12 @@ def wrapped(*args, **kwds): # 3. [LOOKS OK] optional (tensor_or_none) : untyped => pass through # 4. [LOOKS OK] DTypeLike : positional or kw # 5. axes : live in _impl or in types? several ways of handling them - # 6. keepdims : peel off, postprocess + # 6. [OK, NOT HERE] keepdims : peel off, postprocess # 7. OutLike : normal & keyword-only, peel off, postprocess # 8. [LOOKS OK] *args + # 9. consolidate normalizations (_funcs, _wrapper) + # 10. consolidate decorators (_{unary,binary}_ufuncs) + # 11. out= arg : validate it's an ndarray # finally, pass normalized arguments through result = func(*ba.args, **ba.kwargs) diff --git a/torch_np/_unary_ufuncs.py b/torch_np/_unary_ufuncs.py index e50e96fb..db7fe178 100644 --- a/torch_np/_unary_ufuncs.py +++ b/torch_np/_unary_ufuncs.py @@ -1,103 +1,64 @@ -from ._decorators import deco_unary_ufunc_from_impl -from ._detail import _ufunc_impl +#from ._decorators import deco_unary_ufunc_from_impl +#from ._detail import _ufunc_impl -__all__ = [ - "abs", - "absolute", - "arccos", - "arccosh", - "arcsin", - "arcsinh", - "arctan", - "arctanh", - "cbrt", - "ceil", - "conj", - "conjugate", - "cos", - "cosh", - "deg2rad", - "degrees", - "exp", - "exp2", - "expm1", - "fabs", - "floor", - "isfinite", - "isinf", - "isnan", - "log", - "log10", - "log1p", - "log2", - "logical_not", - "negative", - "positive", - "rad2deg", - "radians", - "reciprocal", - "rint", - "sign", - "signbit", - "sin", - "sinh", - "sqrt", - "square", - "tan", - "tanh", - "trunc", - "invert", -] +from ._detail import _unary_ufuncs -absolute = deco_unary_ufunc_from_impl(_ufunc_impl.absolute) -arccos = deco_unary_ufunc_from_impl(_ufunc_impl.arccos) -arccosh = deco_unary_ufunc_from_impl(_ufunc_impl.arccosh) -arcsin = deco_unary_ufunc_from_impl(_ufunc_impl.arcsin) -arcsinh = deco_unary_ufunc_from_impl(_ufunc_impl.arcsinh) -arctan = deco_unary_ufunc_from_impl(_ufunc_impl.arctan) -arctanh = deco_unary_ufunc_from_impl(_ufunc_impl.arctanh) -ceil = deco_unary_ufunc_from_impl(_ufunc_impl.ceil) -conjugate = deco_unary_ufunc_from_impl(_ufunc_impl.conjugate) -cos = deco_unary_ufunc_from_impl(_ufunc_impl.cos) -cosh = deco_unary_ufunc_from_impl(_ufunc_impl.cosh) -deg2rad = deco_unary_ufunc_from_impl(_ufunc_impl.deg2rad) -degrees = deco_unary_ufunc_from_impl(_ufunc_impl.rad2deg) -exp = deco_unary_ufunc_from_impl(_ufunc_impl.exp) -exp2 = deco_unary_ufunc_from_impl(_ufunc_impl.exp2) -expm1 = deco_unary_ufunc_from_impl(_ufunc_impl.expm1) -fabs = deco_unary_ufunc_from_impl(_ufunc_impl.absolute) -floor = deco_unary_ufunc_from_impl(_ufunc_impl.floor) -isfinite = deco_unary_ufunc_from_impl(_ufunc_impl.isfinite) -isinf = deco_unary_ufunc_from_impl(_ufunc_impl.isinf) -isnan = deco_unary_ufunc_from_impl(_ufunc_impl.isnan) -log = deco_unary_ufunc_from_impl(_ufunc_impl.log) -log10 = deco_unary_ufunc_from_impl(_ufunc_impl.log10) -log1p = deco_unary_ufunc_from_impl(_ufunc_impl.log1p) -log2 = deco_unary_ufunc_from_impl(_ufunc_impl.log2) -logical_not = deco_unary_ufunc_from_impl(_ufunc_impl.logical_not) -negative = deco_unary_ufunc_from_impl(_ufunc_impl.negative) -rad2deg = deco_unary_ufunc_from_impl(_ufunc_impl.rad2deg) -radians = deco_unary_ufunc_from_impl(_ufunc_impl.deg2rad) -reciprocal = deco_unary_ufunc_from_impl(_ufunc_impl.reciprocal) -rint = deco_unary_ufunc_from_impl(_ufunc_impl.rint) -sign = deco_unary_ufunc_from_impl(_ufunc_impl.sign) -signbit = deco_unary_ufunc_from_impl(_ufunc_impl.signbit) -sin = deco_unary_ufunc_from_impl(_ufunc_impl.sin) -sinh = deco_unary_ufunc_from_impl(_ufunc_impl.sinh) -sqrt = deco_unary_ufunc_from_impl(_ufunc_impl.sqrt) -square = deco_unary_ufunc_from_impl(_ufunc_impl.square) -tan = deco_unary_ufunc_from_impl(_ufunc_impl.tan) -tanh = deco_unary_ufunc_from_impl(_ufunc_impl.tanh) -trunc = deco_unary_ufunc_from_impl(_ufunc_impl.trunc) +__all__ = [name for name in dir(_unary_ufuncs) if not name.startswith("_") and name != "torch"] -invert = deco_unary_ufunc_from_impl(_ufunc_impl.invert) +# TODO: consolidate normalizations +from ._funcs import normalizer, ArrayLike, SubokLike, DTypeLike +from ._detail import _util +from . import _helpers +#import torch -cbrt = deco_unary_ufunc_from_impl(_ufunc_impl.cbrt) -positive = deco_unary_ufunc_from_impl(_ufunc_impl.positive) -# numpy has these aliases while torch does not -abs = absolute -conj = conjugate -bitwise_not = invert +def deco_unary_ufunc(torch_func): + """Common infra for unary ufuncs. + + Normalize arguments, sort out type casting, broadcasting and delegate to + the pytorch functions for the actual work. + """ + def wrapped( + x : ArrayLike, + /, + out=None, + *, + where=True, + casting="same_kind", + order="K", + dtype: DTypeLike=None, + subok: SubokLike=False, + signature=None, + extobj=None + ): + if order != "K" or not where or signature or extobj: + raise NotImplementedError + + # XXX: dtype=... parameter + if dtype is not None: + raise NotImplementedError + + out_shape_dtype = None + if out is not None: + out_shape_dtype = (out.get().dtype, out.get().shape) + + tensors = _util.cast_and_broadcast((x,), out_shape_dtype, casting) + + result = torch_func(*tensors) + return _helpers.result_or_out(result, out) + + return wrapped + +# +# For each torch ufunc implementation, decorate and attach the decorated name +# to this module. Its contents is then exported to the public namespace in __init__.py +# +for name in __all__: + ufunc = getattr(_unary_ufuncs, name) + decorated = normalizer(deco_unary_ufunc(ufunc)) + + decorated.__qualname__ = name # XXX: is this really correct? + decorated.__name__ = name + vars()[name] = decorated diff --git a/torch_np/tests/test_basic.py b/torch_np/tests/test_basic.py index 0f43be84..934aab78 100644 --- a/torch_np/tests/test_basic.py +++ b/torch_np/tests/test_basic.py @@ -43,9 +43,10 @@ w.flatnonzero, ] +ufunc_names = _unary_ufuncs.__all__ +ufunc_names.remove('invert') # torch: bitwise_not_cpu not implemented for 'Float' -one_arg_funcs += [getattr(_unary_ufuncs, name) for name in _unary_ufuncs.__all__] -one_arg_funcs = one_arg_funcs[:-1] # FIXME: remove np.invert +one_arg_funcs += [getattr(_unary_ufuncs, name) for name in ufunc_names] @pytest.mark.parametrize("func", one_arg_funcs) From 649431cc84119407b4cfc2aaac93984837f23ced Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 10 Mar 2023 11:34:55 +0300 Subject: [PATCH 22/33] lint --- torch_np/_binary_ufuncs.py | 25 +++--- torch_np/_decorators.py | 4 - torch_np/_detail/_binary_ufuncs.py | 49 ++++++++--- torch_np/_detail/_reductions.py | 7 +- torch_np/_detail/_unary_ufuncs.py | 38 ++++++--- torch_np/_funcs.py | 126 +++++++++++++++++++++++------ torch_np/_unary_ufuncs.py | 30 ++++--- torch_np/_wrapper.py | 31 +++++-- torch_np/tests/test_basic.py | 2 +- 9 files changed, 227 insertions(+), 85 deletions(-) diff --git a/torch_np/_binary_ufuncs.py b/torch_np/_binary_ufuncs.py index 4a93a95f..7e904943 100644 --- a/torch_np/_binary_ufuncs.py +++ b/torch_np/_binary_ufuncs.py @@ -1,12 +1,15 @@ from ._detail import _binary_ufuncs -__all__ = [name for name in dir(_binary_ufuncs) if not name.startswith("_") and name != "torch"] +__all__ = [ + name for name in dir(_binary_ufuncs) if not name.startswith("_") and name != "torch" +] -# TODO: consolidate normalizations -from ._funcs import normalizer, ArrayLike, SubokLike, DTypeLike -from ._detail import _util from . import _helpers +from ._detail import _util + +# TODO: consolidate normalizations +from ._funcs import ArrayLike, DTypeLike, SubokLike, normalizer def deco_binary_ufunc(torch_func): @@ -15,19 +18,20 @@ def deco_binary_ufunc(torch_func): Normalize arguments, sort out type casting, broadcasting and delegate to the pytorch functions for the actual work. """ + def wrapped( - x1 : ArrayLike, - x2 : ArrayLike, + x1: ArrayLike, + x2: ArrayLike, /, out=None, *, where=True, casting="same_kind", order="K", - dtype: DTypeLike=None, - subok: SubokLike=False, + dtype: DTypeLike = None, + subok: SubokLike = False, signature=None, - extobj=None + extobj=None, ): if order != "K" or not where or signature or extobj: raise NotImplementedError @@ -47,6 +51,7 @@ def wrapped( return wrapped + # # For each torch ufunc implementation, decorate and attach the decorated name # to this module. Its contents is then exported to the public namespace in __init__.py @@ -55,6 +60,6 @@ def wrapped( ufunc = getattr(_binary_ufuncs, name) decorated = normalizer(deco_binary_ufunc(ufunc)) - decorated.__qualname__ = name # XXX: is this really correct? + decorated.__qualname__ = name # XXX: is this really correct? decorated.__name__ = name vars()[name] = decorated diff --git a/torch_np/_decorators.py b/torch_np/_decorators.py index 9065ff3c..2d82fa45 100644 --- a/torch_np/_decorators.py +++ b/torch_np/_decorators.py @@ -1,5 +1,4 @@ import functools -import operator import torch @@ -9,7 +8,6 @@ NoValue = None - def out_shape_dtype(func): """Handle out=... kwarg for ufuncs. @@ -26,5 +24,3 @@ def wrapped(*args, out=None, **kwds): return _helpers.result_or_out(result_tensor, out) return wrapped - - diff --git a/torch_np/_detail/_binary_ufuncs.py b/torch_np/_detail/_binary_ufuncs.py index d531bc02..fbdff059 100644 --- a/torch_np/_detail/_binary_ufuncs.py +++ b/torch_np/_detail/_binary_ufuncs.py @@ -4,19 +4,43 @@ import torch -from . import _dtypes_impl, _util - - -from torch import (add, arctan2, bitwise_and, bitwise_or, bitwise_xor, copysign, divide, - float_power, floor_divide, fmax, fmin, fmod, gcd, greater, greater_equal, heaviside, - hypot, lcm, ldexp, less, less_equal, logaddexp, logaddexp2, logical_and, - logical_or, logical_xor, maximum, minimum, remainder, multiply, nextafter, not_equal, - remainder, subtract, divide) - - # renames -from torch import (eq as equal, pow as power, bitwise_right_shift as right_shift, - bitwise_left_shift as left_shift,) +from torch import add, arctan2, bitwise_and +from torch import bitwise_left_shift as left_shift +from torch import bitwise_or +from torch import bitwise_right_shift as right_shift +from torch import bitwise_xor, copysign, divide +from torch import eq as equal +from torch import ( + float_power, + floor_divide, + fmax, + fmin, + fmod, + gcd, + greater, + greater_equal, + heaviside, + hypot, + lcm, + ldexp, + less, + less_equal, + logaddexp, + logaddexp2, + logical_and, + logical_or, + logical_xor, + maximum, + minimum, + multiply, + nextafter, + not_equal, +) +from torch import pow as power +from torch import remainder, subtract + +from . import _dtypes_impl, _util # work around torch limitations w.r.t. numpy @@ -27,4 +51,3 @@ def matmul(x, y): y = _util.cast_if_needed(y, dtype) result = torch.matmul(x, y) return result - diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index a005fe74..4db55f40 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -13,13 +13,13 @@ import functools - ############# XXX ### From _util.axis_expand_func def deco_axis_expand(func): """Generically handle axis arguments in reductions.""" + @functools.wraps(func) def wrapped(tensor, axis, *args, **kwds): @@ -46,11 +46,13 @@ def wrapped(tensor, axis=None, keepdims=NoValue, *args, **kwds): if keepdims: result = _util.apply_keepdims(result, axis, tensor.ndim) return result + return wrapped def deco_axis_ravel(func): """Generically handle 'axis=None ravels' behavior.""" + @functools.wraps(func) def wrapped(tensor, axis, *args, **kwds): if axis is not None: @@ -61,6 +63,7 @@ def wrapped(tensor, axis, *args, **kwds): result = func(tensor, axis=axis, *args, **kwds) return result + return wrapped @@ -292,7 +295,6 @@ def cumsum(tensor, axis, dtype=None): return result - def average(a, axis, weights, returned=False, keepdims=False): if weights is None: result, wsum = average_noweights(a, axis, keepdims=keepdims) @@ -384,7 +386,6 @@ def quantile(a_tensor, q_tensor, axis, method, keepdims=False): q_tensor = _util.cast_if_needed(q_tensor, a_tensor.dtype) - # axis=None ravels, so store the originals to reuse with keepdims=True below ax, ndim = axis, a_tensor.ndim (a_tensor, q_tensor), axis = _util.axis_none_ravel(a_tensor, q_tensor, axis=axis) diff --git a/torch_np/_detail/_unary_ufuncs.py b/torch_np/_detail/_unary_ufuncs.py index 979b6d09..e482e85f 100644 --- a/torch_np/_detail/_unary_ufuncs.py +++ b/torch_np/_detail/_unary_ufuncs.py @@ -4,15 +4,36 @@ import torch -from torch import (arccos, arccosh, arcsin, arcsinh, arctan, arctanh, ceil, - cos, cosh, deg2rad, exp, exp2, expm1, - floor, isfinite, isinf, isnan, log, log10, log1p, log2, logical_not, - negative, rad2deg, reciprocal, sign, signbit, - sin, sinh, sqrt, square, tan, tanh, trunc) - # renames -from torch import (conj_physical as conjugate, round as rint, bitwise_not as invert, rad2deg as degrees, - deg2rad as radians, absolute as fabs, ) +from torch import absolute as fabs +from torch import arccos, arccosh, arcsin, arcsinh, arctan, arctanh +from torch import bitwise_not as invert +from torch import ceil +from torch import conj_physical as conjugate +from torch import cos, cosh +from torch import deg2rad +from torch import deg2rad as radians +from torch import ( + exp, + exp2, + expm1, + floor, + isfinite, + isinf, + isnan, + log, + log1p, + log2, + log10, + logical_not, + negative, +) +from torch import rad2deg +from torch import rad2deg as degrees +from torch import reciprocal +from torch import round as rint +from torch import sign, signbit, sin, sinh, sqrt, square, tan, tanh, trunc + # special cases: torch does not export these names def cbrt(x): @@ -32,4 +53,3 @@ def absolute(x): abs = absolute conj = conjugate - diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 9101fbf9..5093b9ad 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -5,7 +5,7 @@ import torch from . import _decorators, _helpers -from ._detail import _dtypes_impl, _flips, _util, _reductions +from ._detail import _dtypes_impl, _flips, _reductions, _util from ._detail import implementations as _impl ################################## normalizations @@ -364,17 +364,38 @@ def round_(a: ArrayLike, decimals=0, out=None): # ### reductions ### -NoValue = None # FIXME +NoValue = None # FIXME + @normalizer -def sum(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - result = _reductions.sum(a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims) +def sum( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out=None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _reductions.sum( + a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims + ) return _helpers.result_or_out(result, out) @normalizer -def prod(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - result = _reductions.prod(a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims) +def prod( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out=None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _reductions.prod( + a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims + ) return _helpers.result_or_out(result, out) @@ -382,38 +403,79 @@ def prod(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, k @normalizer -def mean(a : ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, keepdims=NoValue, *, where=NoValue): - result = _reductions.mean(a, axis=axis, dtype=dtype, where=NoValue, keepdims=keepdims) +def mean( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out=None, + keepdims=NoValue, + *, + where=NoValue, +): + result = _reductions.mean( + a, axis=axis, dtype=dtype, where=NoValue, keepdims=keepdims + ) return _helpers.result_or_out(result, out) @normalizer -def var(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): - result = _reductions.var(a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims) +def var( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out=None, + ddof=0, + keepdims=NoValue, + *, + where=NoValue, +): + result = _reductions.var( + a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims + ) return _helpers.result_or_out(result, out) @normalizer -def std(a: ArrayLike, axis: AxisLike=None, dtype : DTypeLike=None, out=None, ddof=0, keepdims=NoValue, *, where=NoValue): - result = _reductions.std(a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims) +def std( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out=None, + ddof=0, + keepdims=NoValue, + *, + where=NoValue, +): + result = _reductions.std( + a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims + ) return _helpers.result_or_out(result, out) @normalizer -def argmin(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): +def argmin(a: ArrayLike, axis: AxisLike = None, out=None, *, keepdims=NoValue): result = _reductions.argmin(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def argmax(a: ArrayLike, axis: AxisLike=None, out=None, *, keepdims=NoValue): +def argmax(a: ArrayLike, axis: AxisLike = None, out=None, *, keepdims=NoValue): result = _reductions.argmax(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def amax(a : ArrayLike, axis : AxisLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - result = _reductions.max(a, axis=axis, initial=initial, where=where, keepdims=keepdims) +def amax( + a: ArrayLike, + axis: AxisLike = None, + out=None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _reductions.max( + a, axis=axis, initial=initial, where=where, keepdims=keepdims + ) return _helpers.result_or_out(result, out) @@ -421,33 +483,47 @@ def amax(a : ArrayLike, axis : AxisLike=None, out=None, keepdims=NoValue, initia @normalizer -def amin(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue, initial=NoValue, where=NoValue): - result = _reductions.min(a, axis=axis, initial=initial, where=where, keepdims=keepdims) +def amin( + a: ArrayLike, + axis: AxisLike = None, + out=None, + keepdims=NoValue, + initial=NoValue, + where=NoValue, +): + result = _reductions.min( + a, axis=axis, initial=initial, where=where, keepdims=keepdims + ) return _helpers.result_or_out(result, out) + min = amin @normalizer -def ptp(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue): +def ptp(a: ArrayLike, axis: AxisLike = None, out=None, keepdims=NoValue): result = _reductions.ptp(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def all(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue, *, where=NoValue): +def all( + a: ArrayLike, axis: AxisLike = None, out=None, keepdims=NoValue, *, where=NoValue +): result = _reductions.all(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def any(a: ArrayLike, axis: AxisLike=None, out=None, keepdims=NoValue, *, where=NoValue): +def any( + a: ArrayLike, axis: AxisLike = None, out=None, keepdims=NoValue, *, where=NoValue +): result = _reductions.any(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def count_nonzero(a: ArrayLike, axis: AxisLike=None, *, keepdims=False): +def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims=False): result = _reductions.count_nonzero(a, axis=axis, keepdims=keepdims) return _helpers.array_from(result) @@ -469,9 +545,9 @@ def cumprod(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=No @normalizer def quantile( - a : ArrayLike, - q : ArrayLike, - axis: AxisLike=None, + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, out=None, overwrite_input=False, method="linear", diff --git a/torch_np/_unary_ufuncs.py b/torch_np/_unary_ufuncs.py index db7fe178..12d89d2c 100644 --- a/torch_np/_unary_ufuncs.py +++ b/torch_np/_unary_ufuncs.py @@ -1,17 +1,21 @@ -#from ._decorators import deco_unary_ufunc_from_impl -#from ._detail import _ufunc_impl +# from ._decorators import deco_unary_ufunc_from_impl +# from ._detail import _ufunc_impl from ._detail import _unary_ufuncs -__all__ = [name for name in dir(_unary_ufuncs) if not name.startswith("_") and name != "torch"] +__all__ = [ + name for name in dir(_unary_ufuncs) if not name.startswith("_") and name != "torch" +] -# TODO: consolidate normalizations -from ._funcs import normalizer, ArrayLike, SubokLike, DTypeLike -from ._detail import _util from . import _helpers -#import torch +from ._detail import _util + +# TODO: consolidate normalizations +from ._funcs import ArrayLike, DTypeLike, SubokLike, normalizer + +# import torch def deco_unary_ufunc(torch_func): @@ -20,18 +24,19 @@ def deco_unary_ufunc(torch_func): Normalize arguments, sort out type casting, broadcasting and delegate to the pytorch functions for the actual work. """ + def wrapped( - x : ArrayLike, + x: ArrayLike, /, out=None, *, where=True, casting="same_kind", order="K", - dtype: DTypeLike=None, - subok: SubokLike=False, + dtype: DTypeLike = None, + subok: SubokLike = False, signature=None, - extobj=None + extobj=None, ): if order != "K" or not where or signature or extobj: raise NotImplementedError @@ -51,6 +56,7 @@ def wrapped( return wrapped + # # For each torch ufunc implementation, decorate and attach the decorated name # to this module. Its contents is then exported to the public namespace in __init__.py @@ -59,6 +65,6 @@ def wrapped( ufunc = getattr(_unary_ufuncs, name) decorated = normalizer(deco_unary_ufunc(ufunc)) - decorated.__qualname__ = name # XXX: is this really correct? + decorated.__qualname__ = name # XXX: is this really correct? decorated.__name__ = name vars()[name] = decorated diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 409b5193..884e0c77 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -133,7 +133,6 @@ def vstack(tup: Sequence[ArrayLike], *, dtype: DTypeLike = None, casting="same_k return _helpers.array_from(result) - row_stack = vstack @@ -227,6 +226,7 @@ def tile(A: ArrayLike, reps): result = torch.tile(A, reps) return _helpers.array_from(result) + @normalizer def vander(x: ArrayLike, N=None, increasing=False): result = torch.vander(x, N, increasing) @@ -307,7 +307,12 @@ def empty_like( @normalizer def full( - shape, fill_value: ArrayLike, dtype: DTypeLike = None, order="C", *, like: SubokLike = None + shape, + fill_value: ArrayLike, + dtype: DTypeLike = None, + order="C", + *, + like: SubokLike = None, ): if isinstance(shape, int): shape = (shape,) @@ -471,7 +476,7 @@ def where( @normalizer -def ndim(a : ArrayLike): +def ndim(a: ArrayLike): return a.ndim @@ -521,6 +526,7 @@ def rot90(m: ArrayLike, k=1, axes=(0, 1)): result = _flips.rot90(m, k, axes) return _helpers.array_from(result) + @normalizer def broadcast_to(array: ArrayLike, shape, subok: SubokLike = False): result = torch.broadcast_to(array, size=shape) @@ -564,7 +570,7 @@ def meshgrid(*xi: UnpackedSeqArrayLike, copy=True, sparse=False, indexing="xy"): xi = xi[0] # undo the *xi wrapping in normalizer output = _impl.meshgrid(*xi, copy=copy, sparse=sparse, indexing=indexing) outp = _helpers.tuple_arrays_from(output) - return list(outp) # match numpy, return a list + return list(outp) # match numpy, return a list @normalizer @@ -630,9 +636,19 @@ def tri(N, M=None, k=0, dtype: DTypeLike = float, *, like: SubokLike = None): ###### reductions + @normalizer -def average(a: ArrayLike, axis=None, weights: ArrayLike=None, returned=False, *, keepdims=NoValue): - result, wsum = _reductions.average(a, axis, weights, returned=returned, keepdims=keepdims) +def average( + a: ArrayLike, + axis=None, + weights: ArrayLike = None, + returned=False, + *, + keepdims=NoValue, +): + result, wsum = _reductions.average( + a, axis, weights, returned=returned, keepdims=keepdims + ) if returned: return _helpers.tuple_arrays_from((result, wsum)) else: @@ -656,7 +672,6 @@ def percentile( ) - def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): return _funcs.quantile( a, 0.5, axis=axis, overwrite_input=overwrite_input, out=out, keepdims=keepdims @@ -677,6 +692,7 @@ def outer(a: ArrayLike, b: ArrayLike, out=None): # ### FIXME: this is a stub + @normalizer def nanmean( a: ArrayLike, @@ -936,7 +952,6 @@ def unique( return _helpers.array_from(result) - ###### mapping from numpy API objects to wrappers from this module ###### # All is in the mapping dict in _mapping.py diff --git a/torch_np/tests/test_basic.py b/torch_np/tests/test_basic.py index 934aab78..f6fab76d 100644 --- a/torch_np/tests/test_basic.py +++ b/torch_np/tests/test_basic.py @@ -44,7 +44,7 @@ ] ufunc_names = _unary_ufuncs.__all__ -ufunc_names.remove('invert') # torch: bitwise_not_cpu not implemented for 'Float' +ufunc_names.remove("invert") # torch: bitwise_not_cpu not implemented for 'Float' one_arg_funcs += [getattr(_unary_ufuncs, name) for name in ufunc_names] From a7ac2801fea2fbf31aa114c9eec7e568d7ee5fd4 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 10 Mar 2023 16:33:43 +0300 Subject: [PATCH 23/33] MAINT: move normalization logic to _normalizations --- torch_np/_binary_ufuncs.py | 27 ++---- torch_np/_decorators.py | 2 - torch_np/_funcs.py | 163 +++--------------------------------- torch_np/_helpers.py | 20 +++++ torch_np/_normalizations.py | 151 +++++++++++++++++++++++++++++++++ torch_np/_unary_ufuncs.py | 27 ++---- torch_np/_wrapper.py | 10 ++- 7 files changed, 201 insertions(+), 199 deletions(-) create mode 100644 torch_np/_normalizations.py diff --git a/torch_np/_binary_ufuncs.py b/torch_np/_binary_ufuncs.py index 7e904943..6807094f 100644 --- a/torch_np/_binary_ufuncs.py +++ b/torch_np/_binary_ufuncs.py @@ -1,19 +1,14 @@ +from . import _helpers from ._detail import _binary_ufuncs +from ._normalizations import ArrayLike, DTypeLike, SubokLike, normalizer __all__ = [ name for name in dir(_binary_ufuncs) if not name.startswith("_") and name != "torch" ] -from . import _helpers -from ._detail import _util - -# TODO: consolidate normalizations -from ._funcs import ArrayLike, DTypeLike, SubokLike, normalizer - - def deco_binary_ufunc(torch_func): - """Common infra for unary ufuncs. + """Common infra for binary ufuncs. Normalize arguments, sort out type casting, broadcasting and delegate to the pytorch functions for the actual work. @@ -33,19 +28,9 @@ def wrapped( signature=None, extobj=None, ): - if order != "K" or not where or signature or extobj: - raise NotImplementedError - - # XXX: dtype=... parameter - if dtype is not None: - raise NotImplementedError - - out_shape_dtype = None - if out is not None: - out_shape_dtype = (out.get().dtype, out.get().shape) - - tensors = _util.cast_and_broadcast((x1, x2), out_shape_dtype, casting) - + tensors = _helpers.ufunc_preprocess( + (x1, x2), out, where, casting, order, dtype, subok, signature, extobj + ) result = torch_func(*tensors) return _helpers.result_or_out(result, out) diff --git a/torch_np/_decorators.py b/torch_np/_decorators.py index 2d82fa45..c8542e1b 100644 --- a/torch_np/_decorators.py +++ b/torch_np/_decorators.py @@ -5,8 +5,6 @@ from . import _dtypes, _helpers from ._detail import _util -NoValue = None - def out_shape_dtype(func): """Handle out=... kwarg for ufuncs. diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 5093b9ad..4e68b8b9 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -1,159 +1,18 @@ -import operator -import typing -from typing import Optional, Sequence +from typing import Optional import torch -from . import _decorators, _helpers -from ._detail import _dtypes_impl, _flips, _reductions, _util +from . import _helpers +from ._detail import _flips, _reductions, _util from ._detail import implementations as _impl - -################################## normalizations - -ArrayLike = typing.TypeVar("ArrayLike") -DTypeLike = typing.TypeVar("DTypeLike") -SubokLike = typing.TypeVar("SubokLike") -AxisLike = typing.TypeVar("AxisLike") - -# annotate e.g. atleast_1d(*arys) -UnpackedSeqArrayLike = typing.TypeVar("UnpackedSeqArrayLike") - - -import inspect - -from . import _dtypes - - -def normalize_array_like(x, name=None): - (tensor,) = _helpers.to_tensors(x) - return tensor - - -def normalize_optional_array_like(x, name=None): - # This explicit normalizer is needed because otherwise normalize_array_like - # does not run for a parameter annotated as Optional[ArrayLike] - return None if x is None else normalize_array_like(x, name) - - -def normalize_seq_array_like(x, name=None): - tensors = _helpers.to_tensors(*x) - return tensors - - -def normalize_dtype(dtype, name=None): - # cf _decorators.dtype_to_torch - torch_dtype = None - if dtype is not None: - dtype = _dtypes.dtype(dtype) - torch_dtype = dtype.torch_dtype - return torch_dtype - - -def normalize_subok_like(arg, name): - if arg: - raise ValueError(f"'{name}' parameter is not supported.") - - -def normalize_axis_like(arg, name=None): - from ._ndarray import ndarray - - if isinstance(arg, ndarray): - arg = operator.index(arg) - return arg - - -normalizers = { - ArrayLike: normalize_array_like, - Optional[ArrayLike]: normalize_optional_array_like, - Sequence[ArrayLike]: normalize_seq_array_like, - UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize - DTypeLike: normalize_dtype, - SubokLike: normalize_subok_like, - AxisLike: normalize_axis_like, -} - -import functools - - -def normalize_this(arg, parm): - """Normalize arg if a normalizer is registred.""" - normalizer = normalizers.get(parm.annotation, None) - if normalizer: - return normalizer(arg) - else: - # untyped arguments pass through - return arg - - -def normalizer(func): - @functools.wraps(func) - def wrapped(*args, **kwds): - sig = inspect.signature(func) - - # first, check for *args in positional parameters. Case in point: - # atleast_1d(*arys: UnpackedSequenceArrayLike) - # if found, consume all args into a tuple to normalize as a whole - for j, param in enumerate(sig.parameters.values()): - if param.annotation == UnpackedSeqArrayLike: - if j == 0: - args = (args,) - else: - # args = args[:j] + (args[j:],) would likely work - # not present in numpy codebase, so do not bother just yet. - # NB: branching on j ==0 is to avoid the empty tuple, args[:j] - raise NotImplementedError - break - - # loop over positional parameters and actual arguments - lst, dct = [], {} - for arg, (name, parm) in zip(args, sig.parameters.items()): - print(arg, name, parm.annotation) - lst.append(normalize_this(arg, parm)) - - # normalize keyword arguments - for name, arg in kwds.items(): - if not name in sig.parameters: - # unknown kwarg, bail out - raise TypeError( - f"{func.__name__}() got an unexpected keyword argument '{name}'." - ) - - print("kw: ", name, sig.parameters[name].annotation) - parm = sig.parameters[name] - dct[name] = normalize_this(arg, parm) - - ba = sig.bind(*lst, **dct) - ba.apply_defaults() - - # Now that all parameters have been consumed, check: - # Anything that has not been bound is unexpected positional arg => raise. - # If there are too few actual arguments, this fill fail in func(*ba.args) below - if len(args) > len(ba.args): - raise TypeError( - f"{func.__name__}() takes {len(ba.args)} positional argument but {len(args)} were given." - ) - - # TODO: - # 1. [LOOKS OK] kw-only parameters : see vstack - # 2. [LOOKS OK] extra unknown args -- error out : nonzero([2, 0, 3], oops=42) - # 3. [LOOKS OK] optional (tensor_or_none) : untyped => pass through - # 4. [LOOKS OK] DTypeLike : positional or kw - # 5. axes : live in _impl or in types? several ways of handling them - # 6. [OK, NOT HERE] keepdims : peel off, postprocess - # 7. OutLike : normal & keyword-only, peel off, postprocess - # 8. [LOOKS OK] *args - # 9. consolidate normalizations (_funcs, _wrapper) - # 10. consolidate decorators (_{unary,binary}_ufuncs) - # 11. out= arg : validate it's an ndarray - - # finally, pass normalized arguments through - result = func(*ba.args, **ba.kwargs) - return result - - return wrapped - - -################################## +from ._normalizations import ( + ArrayLike, + AxisLike, + DTypeLike, + SubokLike, + UnpackedSeqArrayLike, + normalizer, +) @normalizer diff --git a/torch_np/_helpers.py b/torch_np/_helpers.py index f1589f28..49f781b9 100644 --- a/torch_np/_helpers.py +++ b/torch_np/_helpers.py @@ -41,6 +41,26 @@ def cast_and_broadcast(tensors, out, casting): return tuple(tensors) +def ufunc_preprocess( + tensors, out, where, casting, order, dtype, subok, signature, extobj +): + # internal preprocessing or args in ufuncs (cf _unary_ufuncs, _binary_ufuncs) + if order != "K" or not where or signature or extobj: + raise NotImplementedError + + # XXX: dtype=... parameter + if dtype is not None: + raise NotImplementedError + + out_shape_dtype = None + if out is not None: + out_shape_dtype = (out.get().dtype, out.get().shape) + + tensors = _util.cast_and_broadcast(tensors, out_shape_dtype, casting) + + return tensors + + # ### Return helpers: wrap a single tensor, a tuple of tensors, out= etc ### diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py new file mode 100644 index 00000000..4a42f6de --- /dev/null +++ b/torch_np/_normalizations.py @@ -0,0 +1,151 @@ +""" "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on. +""" +import operator +import typing +from typing import Optional, Sequence + +import torch + +from . import _helpers + +ArrayLike = typing.TypeVar("ArrayLike") +DTypeLike = typing.TypeVar("DTypeLike") +SubokLike = typing.TypeVar("SubokLike") +AxisLike = typing.TypeVar("AxisLike") + +# annotate e.g. atleast_1d(*arys) +UnpackedSeqArrayLike = typing.TypeVar("UnpackedSeqArrayLike") + + +import inspect + +from . import _dtypes + + +def normalize_array_like(x, name=None): + (tensor,) = _helpers.to_tensors(x) + return tensor + + +def normalize_optional_array_like(x, name=None): + # This explicit normalizer is needed because otherwise normalize_array_like + # does not run for a parameter annotated as Optional[ArrayLike] + return None if x is None else normalize_array_like(x, name) + + +def normalize_seq_array_like(x, name=None): + tensors = _helpers.to_tensors(*x) + return tensors + + +def normalize_dtype(dtype, name=None): + # cf _decorators.dtype_to_torch + torch_dtype = None + if dtype is not None: + dtype = _dtypes.dtype(dtype) + torch_dtype = dtype.torch_dtype + return torch_dtype + + +def normalize_subok_like(arg, name): + if arg: + raise ValueError(f"'{name}' parameter is not supported.") + + +def normalize_axis_like(arg, name=None): + from ._ndarray import ndarray + + if isinstance(arg, ndarray): + arg = operator.index(arg) + return arg + + +normalizers = { + ArrayLike: normalize_array_like, + Optional[ArrayLike]: normalize_optional_array_like, + Sequence[ArrayLike]: normalize_seq_array_like, + UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize + DTypeLike: normalize_dtype, + SubokLike: normalize_subok_like, + AxisLike: normalize_axis_like, +} + +import functools + + +def normalize_this(arg, parm): + """Normalize arg if a normalizer is registred.""" + normalizer = normalizers.get(parm.annotation, None) + if normalizer: + return normalizer(arg) + else: + # untyped arguments pass through + return arg + + +def normalizer(func): + @functools.wraps(func) + def wrapped(*args, **kwds): + sig = inspect.signature(func) + + # first, check for *args in positional parameters. Case in point: + # atleast_1d(*arys: UnpackedSequenceArrayLike) + # if found, consume all args into a tuple to normalize as a whole + for j, param in enumerate(sig.parameters.values()): + if param.annotation == UnpackedSeqArrayLike: + if j == 0: + args = (args,) + else: + # args = args[:j] + (args[j:],) would likely work + # not present in numpy codebase, so do not bother just yet. + # NB: branching on j ==0 is to avoid the empty tuple, args[:j] + raise NotImplementedError + break + + # loop over positional parameters and actual arguments + lst, dct = [], {} + for arg, (name, parm) in zip(args, sig.parameters.items()): + print(arg, name, parm.annotation) + lst.append(normalize_this(arg, parm)) + + # normalize keyword arguments + for name, arg in kwds.items(): + if not name in sig.parameters: + # unknown kwarg, bail out + raise TypeError( + f"{func.__name__}() got an unexpected keyword argument '{name}'." + ) + + print("kw: ", name, sig.parameters[name].annotation) + parm = sig.parameters[name] + dct[name] = normalize_this(arg, parm) + + ba = sig.bind(*lst, **dct) + ba.apply_defaults() + + # Now that all parameters have been consumed, check: + # Anything that has not been bound is unexpected positional arg => raise. + # If there are too few actual arguments, this fill fail in func(*ba.args) below + if len(args) > len(ba.args): + raise TypeError( + f"{func.__name__}() takes {len(ba.args)} positional argument but {len(args)} were given." + ) + + # TODO: + # 1. [LOOKS OK] kw-only parameters : see vstack + # 2. [LOOKS OK] extra unknown args -- error out : nonzero([2, 0, 3], oops=42) + # 3. [LOOKS OK] optional (tensor_or_none) : untyped => pass through + # 4. [LOOKS OK] DTypeLike : positional or kw + # 5. axes : live in _impl or in types? several ways of handling them + # 6. [OK, NOT HERE] keepdims : peel off, postprocess + # 7. OutLike : normal & keyword-only, peel off, postprocess + # 8. [LOOKS OK] *args + # 9. [LOOKS OK] consolidate normalizations (_funcs, _wrapper) + # 10. [LOOKS OK] consolidate decorators (_{unary,binary}_ufuncs) + # 11. out= arg : validate it's an ndarray + + # finally, pass normalized arguments through + result = func(*ba.args, **ba.kwargs) + return result + + return wrapped diff --git a/torch_np/_unary_ufuncs.py b/torch_np/_unary_ufuncs.py index 12d89d2c..79bae89d 100644 --- a/torch_np/_unary_ufuncs.py +++ b/torch_np/_unary_ufuncs.py @@ -2,22 +2,15 @@ # from ._detail import _ufunc_impl +from . import _helpers from ._detail import _unary_ufuncs +from ._normalizations import ArrayLike, DTypeLike, SubokLike, normalizer __all__ = [ name for name in dir(_unary_ufuncs) if not name.startswith("_") and name != "torch" ] -from . import _helpers -from ._detail import _util - -# TODO: consolidate normalizations -from ._funcs import ArrayLike, DTypeLike, SubokLike, normalizer - -# import torch - - def deco_unary_ufunc(torch_func): """Common infra for unary ufuncs. @@ -38,19 +31,9 @@ def wrapped( signature=None, extobj=None, ): - if order != "K" or not where or signature or extobj: - raise NotImplementedError - - # XXX: dtype=... parameter - if dtype is not None: - raise NotImplementedError - - out_shape_dtype = None - if out is not None: - out_shape_dtype = (out.get().dtype, out.get().shape) - - tensors = _util.cast_and_broadcast((x,), out_shape_dtype, casting) - + tensors = _helpers.ufunc_preprocess( + (x,), out, where, casting, order, dtype, subok, signature, extobj + ) result = torch_func(*tensors) return _helpers.result_or_out(result, out) diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 884e0c77..782d7f23 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -11,10 +11,16 @@ from . import _funcs from ._detail import _dtypes_impl, _flips, _reductions, _util from ._detail import implementations as _impl +from ._ndarray import array, asarray, maybe_set_base, ndarray ### XXX: order the imports DAG -from ._funcs import ArrayLike, DTypeLike, SubokLike, UnpackedSeqArrayLike, normalizer -from ._ndarray import array, asarray, maybe_set_base, ndarray +from ._normalizations import ( + ArrayLike, + DTypeLike, + SubokLike, + UnpackedSeqArrayLike, + normalizer, +) from . import _dtypes, _helpers, _decorators # isort: skip # XXX From 10672bb95076681886feba04b75f102001f3619f Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 10 Mar 2023 17:09:37 +0300 Subject: [PATCH 24/33] MAINT: use normalizations in tnp.random --- torch_np/random.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/torch_np/random.py b/torch_np/random.py index 4bc3426c..e72b629c 100644 --- a/torch_np/random.py +++ b/torch_np/random.py @@ -9,8 +9,10 @@ import torch -from . import asarray from ._detail import _dtypes_impl, _util +from . import _helpers +from ._normalizations import normalizer, ArrayLike +from typing import Optional _default_dtype = _dtypes_impl.default_float_dtype @@ -33,7 +35,7 @@ def array_or_scalar(values, py_type=float, return_scalar=False): if return_scalar: return py_type(values.item()) else: - return asarray(values) + return _helpers.array_from(values) def seed(seed=None): @@ -75,11 +77,11 @@ def normal(loc=0.0, scale=1.0, size=None): return array_or_scalar(values, return_scalar=size is None) -def shuffle(x): - x_tensor = asarray(x).get() - perm = torch.randperm(x_tensor.shape[0]) - xp = x_tensor[perm] - x_tensor.copy_(xp) +@normalizer +def shuffle(x: ArrayLike): + perm = torch.randperm(x.shape[0]) + xp = x[perm] + x.copy_(xp) def randint(low, high=None, size=None): @@ -93,12 +95,14 @@ def randint(low, high=None, size=None): return array_or_scalar(values, int, return_scalar=size is None) -def choice(a, size=None, replace=True, p=None): +@normalizer +def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike]=None): + # https://stackoverflow.com/questions/59461811/random-choice-with-pytorch - if isinstance(a, int): - a_tensor = torch.arange(a) - else: - a_tensor = asarray(a).get() + if a.numel() == 1: + a = torch.arange(a) + + # TODO: check a.dtype is integer -- cf np.random.choice(3.4) which raises # number of draws if size is None: @@ -112,21 +116,19 @@ def choice(a, size=None, replace=True, p=None): # prepare the probabilities if p is None: - p_tensor = torch.ones_like(a_tensor) / a_tensor.shape[0] - else: - p_tensor = asarray(p, dtype=float).get() + p = torch.ones_like(a) / a.shape[0] # cf https://github.com/numpy/numpy/blob/main/numpy/random/mtrand.pyx#L973 atol = sqrt(torch.finfo(torch.float64).eps) - if abs(p_tensor.sum() - 1.0) > atol: + if abs(p.sum() - 1.0) > atol: raise ValueError("probabilities do not sum to 1.") # actually sample - indices = torch.multinomial(p_tensor, num_el, replacement=replace) + indices = torch.multinomial(p, num_el, replacement=replace) if _util.is_sequence(size): indices = indices.reshape(size) - samples = a_tensor[indices] + samples = a[indices] - return asarray(samples) + return _helpers.array_from(samples) From 16c5aedb66c3482bf8444a1551547e11512ce817 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 10 Mar 2023 20:14:07 +0300 Subject: [PATCH 25/33] MAINT: annotate out as NDArray, remove scattered isinstance checks --- torch_np/_binary_ufuncs.py | 6 ++++-- torch_np/_funcs.py | 40 +++++++++++++++++++------------------ torch_np/_helpers.py | 11 +--------- torch_np/_normalizations.py | 14 +++++++++++++ torch_np/_unary_ufuncs.py | 5 +++-- torch_np/_wrapper.py | 22 ++++++++++---------- 6 files changed, 53 insertions(+), 45 deletions(-) diff --git a/torch_np/_binary_ufuncs.py b/torch_np/_binary_ufuncs.py index 6807094f..e38292c1 100644 --- a/torch_np/_binary_ufuncs.py +++ b/torch_np/_binary_ufuncs.py @@ -1,6 +1,8 @@ from . import _helpers from ._detail import _binary_ufuncs -from ._normalizations import ArrayLike, DTypeLike, SubokLike, normalizer +from ._normalizations import ArrayLike, DTypeLike, SubokLike, NDArray, normalizer +from typing import Optional + __all__ = [ name for name in dir(_binary_ufuncs) if not name.startswith("_") and name != "torch" @@ -18,7 +20,7 @@ def wrapped( x1: ArrayLike, x2: ArrayLike, /, - out=None, + out: Optional[NDArray] = None, *, where=True, casting="same_kind", diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 4e68b8b9..e809af90 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -9,10 +9,12 @@ ArrayLike, AxisLike, DTypeLike, + NDArray, SubokLike, UnpackedSeqArrayLike, normalizer, ) +from typing import Optional @normalizer @@ -32,7 +34,7 @@ def clip( a: ArrayLike, min: Optional[ArrayLike] = None, max: Optional[ArrayLike] = None, - out=None, + out: Optional[NDArray] = None, ): # np.clip requires both a_min and a_max not None, while ndarray.clip allows # one of them to be None. Follow the more lax version. @@ -57,7 +59,7 @@ def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1): @normalizer -def trace(a: ArrayLike, offset=0, axis1=0, axis2=1, dtype: DTypeLike = None, out=None): +def trace(a: ArrayLike, offset=0, axis1=0, axis2=1, dtype: DTypeLike = None, out: Optional[NDArray] = None): result = _impl.trace(a, offset, axis1, axis2, dtype) return _helpers.result_or_out(result, out) @@ -112,7 +114,7 @@ def vdot(a: ArrayLike, b: ArrayLike, /): @normalizer -def dot(a: ArrayLike, b: ArrayLike, out=None): +def dot(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): result = _impl.dot(a, b) return _helpers.result_or_out(result, out) @@ -211,7 +213,7 @@ def imag(a: ArrayLike): @normalizer -def round_(a: ArrayLike, decimals=0, out=None): +def round_(a: ArrayLike, decimals=0, out: Optional[NDArray]=None): result = _impl.round(a, decimals) return _helpers.result_or_out(result, out) @@ -231,7 +233,7 @@ def sum( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out=None, + out: Optional[NDArray]=None, keepdims=NoValue, initial=NoValue, where=NoValue, @@ -247,7 +249,7 @@ def prod( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out=None, + out: Optional[NDArray]=None, keepdims=NoValue, initial=NoValue, where=NoValue, @@ -266,7 +268,7 @@ def mean( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out=None, + out: Optional[NDArray]=None, keepdims=NoValue, *, where=NoValue, @@ -282,7 +284,7 @@ def var( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out=None, + out: Optional[NDArray]=None, ddof=0, keepdims=NoValue, *, @@ -299,7 +301,7 @@ def std( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out=None, + out: Optional[NDArray]=None, ddof=0, keepdims=NoValue, *, @@ -312,13 +314,13 @@ def std( @normalizer -def argmin(a: ArrayLike, axis: AxisLike = None, out=None, *, keepdims=NoValue): +def argmin(a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, *, keepdims=NoValue): result = _reductions.argmin(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def argmax(a: ArrayLike, axis: AxisLike = None, out=None, *, keepdims=NoValue): +def argmax(a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, *, keepdims=NoValue): result = _reductions.argmax(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @@ -327,7 +329,7 @@ def argmax(a: ArrayLike, axis: AxisLike = None, out=None, *, keepdims=NoValue): def amax( a: ArrayLike, axis: AxisLike = None, - out=None, + out: Optional[NDArray] = None, keepdims=NoValue, initial=NoValue, where=NoValue, @@ -345,7 +347,7 @@ def amax( def amin( a: ArrayLike, axis: AxisLike = None, - out=None, + out: Optional[NDArray] = None, keepdims=NoValue, initial=NoValue, where=NoValue, @@ -360,14 +362,14 @@ def amin( @normalizer -def ptp(a: ArrayLike, axis: AxisLike = None, out=None, keepdims=NoValue): +def ptp(a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue): result = _reductions.ptp(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer def all( - a: ArrayLike, axis: AxisLike = None, out=None, keepdims=NoValue, *, where=NoValue + a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue, *, where=NoValue ): result = _reductions.all(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @@ -375,7 +377,7 @@ def all( @normalizer def any( - a: ArrayLike, axis: AxisLike = None, out=None, keepdims=NoValue, *, where=NoValue + a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue, *, where=NoValue ): result = _reductions.any(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @@ -388,13 +390,13 @@ def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims=False): @normalizer -def cumsum(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=None): +def cumsum(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out: Optional[NDArray] = None): result = _reductions.cumsum(a, axis=axis, dtype=dtype) return _helpers.result_or_out(result, out) @normalizer -def cumprod(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out=None): +def cumprod(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out: Optional[NDArray] = None): result = _reductions.cumprod(a, axis=axis, dtype=dtype) return _helpers.result_or_out(result, out) @@ -407,7 +409,7 @@ def quantile( a: ArrayLike, q: ArrayLike, axis: AxisLike = None, - out=None, + out: Optional[NDArray] = None, overwrite_input=False, method="linear", keepdims=False, diff --git a/torch_np/_helpers.py b/torch_np/_helpers.py index 49f781b9..a894b3da 100644 --- a/torch_np/_helpers.py +++ b/torch_np/_helpers.py @@ -29,11 +29,6 @@ def cast_and_broadcast(tensors, out, casting): if out is None: return tensors else: - from ._ndarray import asarray, ndarray - - if not isinstance(out, ndarray): - raise TypeError("Return arrays must be of ArrayType") - tensors = _util.cast_and_broadcast( tensors, out.dtype.type.torch_dtype, out.shape, casting ) @@ -72,11 +67,7 @@ def result_or_out(result_tensor, out_array=None, promote_scalar=False): result_tensor is placed into the out array. This weirdness is used e.g. in `np.percentile` """ - from ._ndarray import asarray, ndarray - if out_array is not None: - if not isinstance(out_array, ndarray): - raise TypeError("Return arrays must be of ArrayType") if result_tensor.shape != out_array.shape: can_fit = result_tensor.numel() == 1 and out_array.ndim == 0 if promote_scalar and can_fit: @@ -90,7 +81,7 @@ def result_or_out(result_tensor, out_array=None, promote_scalar=False): out_tensor.copy_(result_tensor) return out_array else: - return asarray(result_tensor) + return array_from(result_tensor) def array_from(tensor, base=None): diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py index 4a42f6de..90db0e94 100644 --- a/torch_np/_normalizations.py +++ b/torch_np/_normalizations.py @@ -12,6 +12,7 @@ DTypeLike = typing.TypeVar("DTypeLike") SubokLike = typing.TypeVar("SubokLike") AxisLike = typing.TypeVar("AxisLike") +NDArray = typing.TypeVar("NDarray") # annotate e.g. atleast_1d(*arys) UnpackedSeqArrayLike = typing.TypeVar("UnpackedSeqArrayLike") @@ -60,11 +61,24 @@ def normalize_axis_like(arg, name=None): return arg +def normalize_ndarray(arg, name=None): + if arg is None: + return arg + + from ._ndarray import ndarray + + if not isinstance(arg, ndarray): + raise TypeError("'out' must be an array") + return arg + + + normalizers = { ArrayLike: normalize_array_like, Optional[ArrayLike]: normalize_optional_array_like, Sequence[ArrayLike]: normalize_seq_array_like, UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize + Optional[NDArray]: normalize_ndarray, DTypeLike: normalize_dtype, SubokLike: normalize_subok_like, AxisLike: normalize_axis_like, diff --git a/torch_np/_unary_ufuncs.py b/torch_np/_unary_ufuncs.py index 79bae89d..366b397b 100644 --- a/torch_np/_unary_ufuncs.py +++ b/torch_np/_unary_ufuncs.py @@ -4,7 +4,8 @@ from . import _helpers from ._detail import _unary_ufuncs -from ._normalizations import ArrayLike, DTypeLike, SubokLike, normalizer +from ._normalizations import ArrayLike, DTypeLike, SubokLike, NDArray, normalizer +from typing import Optional __all__ = [ name for name in dir(_unary_ufuncs) if not name.startswith("_") and name != "torch" @@ -21,7 +22,7 @@ def deco_unary_ufunc(torch_func): def wrapped( x: ArrayLike, /, - out=None, + out: Optional[NDArray] = None, *, where=True, casting="same_kind", diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 782d7f23..a853f1dd 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -13,14 +13,15 @@ from ._detail import implementations as _impl from ._ndarray import array, asarray, maybe_set_base, ndarray -### XXX: order the imports DAG from ._normalizations import ( ArrayLike, DTypeLike, SubokLike, UnpackedSeqArrayLike, + NDArray, normalizer, ) +from typing import Optional from . import _dtypes, _helpers, _decorators # isort: skip # XXX @@ -108,9 +109,6 @@ def _concat_check(tup, dtype, out): raise ValueError("need at least one array to concatenate") if out is not None: - if not isinstance(out, ndarray): - raise ValueError("'out' must be an array") - if dtype is not None: # mimic numpy raise TypeError( @@ -123,7 +121,7 @@ def _concat_check(tup, dtype, out): def concatenate( ar_tuple: Sequence[ArrayLike], axis=0, - out=None, + out: Optional[NDArray]=None, dtype: DTypeLike = None, casting="same_kind", ): @@ -173,7 +171,7 @@ def column_stack( def stack( arrays: Sequence[ArrayLike], axis=0, - out=None, + out: Optional[NDArray] = None, *, dtype: DTypeLike = None, casting="same_kind", @@ -666,7 +664,7 @@ def percentile( a, q, axis=None, - out=None, + out: Optional[NDArray] = None, overwrite_input=False, method="linear", keepdims=False, @@ -678,7 +676,7 @@ def percentile( ) -def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): +def median(a, axis=None, out: Optional[NDArray] = None, overwrite_input=False, keepdims=False): return _funcs.quantile( a, 0.5, axis=axis, overwrite_input=overwrite_input, out=out, keepdims=keepdims ) @@ -691,7 +689,7 @@ def inner(a: ArrayLike, b: ArrayLike, /): @normalizer -def outer(a: ArrayLike, b: ArrayLike, out=None): +def outer(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): result = torch.outer(a, b) return _helpers.result_or_out(result, out) @@ -704,7 +702,7 @@ def nanmean( a: ArrayLike, axis=None, dtype: DTypeLike = None, - out=None, + out: Optional[NDArray] = None, keepdims=NoValue, *, where=NoValue, @@ -847,13 +845,13 @@ def isrealobj(x: ArrayLike): @normalizer -def isneginf(x: ArrayLike, out=None): +def isneginf(x: ArrayLike, out: Optional[NDArray] = None): result = torch.isneginf(x, out=out) return _helpers.array_from(result) @normalizer -def isposinf(x: ArrayLike, out=None): +def isposinf(x: ArrayLike, out: Optional[NDArray] = None): result = torch.isposinf(x, out=out) return _helpers.array_from(result) From fe9011df4770508dd6e5a41cb06e62723cf08eee Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 10 Mar 2023 20:17:41 +0300 Subject: [PATCH 26/33] MAINT: better error message for wrong axis arguments Co-authored-by: Matthew Barber --- torch_np/_detail/_reductions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index 4db55f40..550f3580 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -25,6 +25,8 @@ def wrapped(tensor, axis, *args, **kwds): if axis is not None: if not isinstance(axis, (list, tuple)): + if not isinstance(axis, typing.SupportsIndex): + raise TypeError(f"{type(axis)=}, but should be a list/tuple or support operator.index()") axis = (axis,) axis = _util.normalize_axis_tuple(axis, tensor.ndim) From b3d5f0a8afe81c9c87a3a3fed89e082abfec130f Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 10 Mar 2023 20:35:13 +0300 Subject: [PATCH 27/33] MAINT: remove isort:skip directives (circ imports are well hidden now) --- torch_np/__init__.py | 2 +- torch_np/_binary_ufuncs.py | 6 +-- torch_np/_detail/_reductions.py | 6 ++- torch_np/_funcs.py | 70 ++++++++++++++++++++++++++------- torch_np/_normalizations.py | 1 - torch_np/_unary_ufuncs.py | 5 ++- torch_np/_wrapper.py | 27 ++++++------- torch_np/random.py | 8 ++-- 8 files changed, 83 insertions(+), 42 deletions(-) diff --git a/torch_np/__init__.py b/torch_np/__init__.py index f48fcdfa..57cb6cd9 100644 --- a/torch_np/__init__.py +++ b/torch_np/__init__.py @@ -1,4 +1,3 @@ -from ._wrapper import * # isort: skip # XXX: currently this prevents circular imports from . import random from ._binary_ufuncs import * from ._detail._index_tricks import * @@ -8,6 +7,7 @@ from ._getlimits import errstate, finfo, iinfo from ._ndarray import array, asarray, can_cast, ndarray, newaxis, result_type from ._unary_ufuncs import * +from ._wrapper import * # from . import testing diff --git a/torch_np/_binary_ufuncs.py b/torch_np/_binary_ufuncs.py index e38292c1..9f2ca4a0 100644 --- a/torch_np/_binary_ufuncs.py +++ b/torch_np/_binary_ufuncs.py @@ -1,8 +1,8 @@ -from . import _helpers -from ._detail import _binary_ufuncs -from ._normalizations import ArrayLike, DTypeLike, SubokLike, NDArray, normalizer from typing import Optional +from . import _helpers +from ._detail import _binary_ufuncs +from ._normalizations import ArrayLike, DTypeLike, NDArray, SubokLike, normalizer __all__ = [ name for name in dir(_binary_ufuncs) if not name.startswith("_") and name != "torch" diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index 550f3580..8aba8eaf 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -4,6 +4,8 @@ Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc """ +import typing + import torch from . import _dtypes_impl, _util @@ -26,7 +28,9 @@ def wrapped(tensor, axis, *args, **kwds): if axis is not None: if not isinstance(axis, (list, tuple)): if not isinstance(axis, typing.SupportsIndex): - raise TypeError(f"{type(axis)=}, but should be a list/tuple or support operator.index()") + raise TypeError( + f"{type(axis)=}, but should be a list/tuple or support operator.index()" + ) axis = (axis,) axis = _util.normalize_axis_tuple(axis, tensor.ndim) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index e809af90..cab55a08 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -14,7 +14,6 @@ UnpackedSeqArrayLike, normalizer, ) -from typing import Optional @normalizer @@ -59,7 +58,14 @@ def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1): @normalizer -def trace(a: ArrayLike, offset=0, axis1=0, axis2=1, dtype: DTypeLike = None, out: Optional[NDArray] = None): +def trace( + a: ArrayLike, + offset=0, + axis1=0, + axis2=1, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, +): result = _impl.trace(a, offset, axis1, axis2, dtype) return _helpers.result_or_out(result, out) @@ -213,7 +219,7 @@ def imag(a: ArrayLike): @normalizer -def round_(a: ArrayLike, decimals=0, out: Optional[NDArray]=None): +def round_(a: ArrayLike, decimals=0, out: Optional[NDArray] = None): result = _impl.round(a, decimals) return _helpers.result_or_out(result, out) @@ -233,7 +239,7 @@ def sum( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out: Optional[NDArray]=None, + out: Optional[NDArray] = None, keepdims=NoValue, initial=NoValue, where=NoValue, @@ -249,7 +255,7 @@ def prod( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out: Optional[NDArray]=None, + out: Optional[NDArray] = None, keepdims=NoValue, initial=NoValue, where=NoValue, @@ -268,7 +274,7 @@ def mean( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out: Optional[NDArray]=None, + out: Optional[NDArray] = None, keepdims=NoValue, *, where=NoValue, @@ -284,7 +290,7 @@ def var( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out: Optional[NDArray]=None, + out: Optional[NDArray] = None, ddof=0, keepdims=NoValue, *, @@ -301,7 +307,7 @@ def std( a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, - out: Optional[NDArray]=None, + out: Optional[NDArray] = None, ddof=0, keepdims=NoValue, *, @@ -314,13 +320,25 @@ def std( @normalizer -def argmin(a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, *, keepdims=NoValue): +def argmin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + *, + keepdims=NoValue, +): result = _reductions.argmin(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer -def argmax(a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, *, keepdims=NoValue): +def argmax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + *, + keepdims=NoValue, +): result = _reductions.argmax(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @@ -362,14 +380,21 @@ def amin( @normalizer -def ptp(a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue): +def ptp( + a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue +): result = _reductions.ptp(a, axis=axis, keepdims=keepdims) return _helpers.result_or_out(result, out) @normalizer def all( - a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue, *, where=NoValue + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + *, + where=NoValue, ): result = _reductions.all(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @@ -377,7 +402,12 @@ def all( @normalizer def any( - a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue, *, where=NoValue + a: ArrayLike, + axis: AxisLike = None, + out: Optional[NDArray] = None, + keepdims=NoValue, + *, + where=NoValue, ): result = _reductions.any(a, axis=axis, where=where, keepdims=keepdims) return _helpers.result_or_out(result, out) @@ -390,13 +420,23 @@ def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims=False): @normalizer -def cumsum(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out: Optional[NDArray] = None): +def cumsum( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, +): result = _reductions.cumsum(a, axis=axis, dtype=dtype) return _helpers.result_or_out(result, out) @normalizer -def cumprod(a: ArrayLike, axis: AxisLike = None, dtype: DTypeLike = None, out: Optional[NDArray] = None): +def cumprod( + a: ArrayLike, + axis: AxisLike = None, + dtype: DTypeLike = None, + out: Optional[NDArray] = None, +): result = _reductions.cumprod(a, axis=axis, dtype=dtype) return _helpers.result_or_out(result, out) diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py index 90db0e94..c3b5ff01 100644 --- a/torch_np/_normalizations.py +++ b/torch_np/_normalizations.py @@ -72,7 +72,6 @@ def normalize_ndarray(arg, name=None): return arg - normalizers = { ArrayLike: normalize_array_like, Optional[ArrayLike]: normalize_optional_array_like, diff --git a/torch_np/_unary_ufuncs.py b/torch_np/_unary_ufuncs.py index 366b397b..8990743b 100644 --- a/torch_np/_unary_ufuncs.py +++ b/torch_np/_unary_ufuncs.py @@ -2,10 +2,11 @@ # from ._detail import _ufunc_impl +from typing import Optional + from . import _helpers from ._detail import _unary_ufuncs -from ._normalizations import ArrayLike, DTypeLike, SubokLike, NDArray, normalizer -from typing import Optional +from ._normalizations import ArrayLike, DTypeLike, NDArray, SubokLike, normalizer __all__ = [ name for name in dir(_unary_ufuncs) if not name.startswith("_") and name != "torch" diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index a853f1dd..f85b5b80 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -8,23 +8,18 @@ import torch -from . import _funcs +from . import _decorators, _dtypes, _funcs, _helpers from ._detail import _dtypes_impl, _flips, _reductions, _util from ._detail import implementations as _impl from ._ndarray import array, asarray, maybe_set_base, ndarray - from ._normalizations import ( ArrayLike, DTypeLike, + NDArray, SubokLike, UnpackedSeqArrayLike, - NDArray, normalizer, ) -from typing import Optional - -from . import _dtypes, _helpers, _decorators # isort: skip # XXX - # Things to decide on (punt for now) # @@ -121,7 +116,7 @@ def _concat_check(tup, dtype, out): def concatenate( ar_tuple: Sequence[ArrayLike], axis=0, - out: Optional[NDArray]=None, + out: Optional[NDArray] = None, dtype: DTypeLike = None, casting="same_kind", ): @@ -171,7 +166,7 @@ def column_stack( def stack( arrays: Sequence[ArrayLike], axis=0, - out: Optional[NDArray] = None, + out: Optional[NDArray] = None, *, dtype: DTypeLike = None, casting="same_kind", @@ -664,7 +659,7 @@ def percentile( a, q, axis=None, - out: Optional[NDArray] = None, + out: Optional[NDArray] = None, overwrite_input=False, method="linear", keepdims=False, @@ -676,7 +671,9 @@ def percentile( ) -def median(a, axis=None, out: Optional[NDArray] = None, overwrite_input=False, keepdims=False): +def median( + a, axis=None, out: Optional[NDArray] = None, overwrite_input=False, keepdims=False +): return _funcs.quantile( a, 0.5, axis=axis, overwrite_input=overwrite_input, out=out, keepdims=keepdims ) @@ -689,7 +686,7 @@ def inner(a: ArrayLike, b: ArrayLike, /): @normalizer -def outer(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): +def outer(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): result = torch.outer(a, b) return _helpers.result_or_out(result, out) @@ -702,7 +699,7 @@ def nanmean( a: ArrayLike, axis=None, dtype: DTypeLike = None, - out: Optional[NDArray] = None, + out: Optional[NDArray] = None, keepdims=NoValue, *, where=NoValue, @@ -845,13 +842,13 @@ def isrealobj(x: ArrayLike): @normalizer -def isneginf(x: ArrayLike, out: Optional[NDArray] = None): +def isneginf(x: ArrayLike, out: Optional[NDArray] = None): result = torch.isneginf(x, out=out) return _helpers.array_from(result) @normalizer -def isposinf(x: ArrayLike, out: Optional[NDArray] = None): +def isposinf(x: ArrayLike, out: Optional[NDArray] = None): result = torch.isposinf(x, out=out) return _helpers.array_from(result) diff --git a/torch_np/random.py b/torch_np/random.py index e72b629c..21d5faa2 100644 --- a/torch_np/random.py +++ b/torch_np/random.py @@ -6,13 +6,13 @@ """ from math import sqrt +from typing import Optional import torch -from ._detail import _dtypes_impl, _util from . import _helpers -from ._normalizations import normalizer, ArrayLike -from typing import Optional +from ._detail import _dtypes_impl, _util +from ._normalizations import ArrayLike, normalizer _default_dtype = _dtypes_impl.default_float_dtype @@ -96,7 +96,7 @@ def randint(low, high=None, size=None): @normalizer -def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike]=None): +def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike] = None): # https://stackoverflow.com/questions/59461811/random-choice-with-pytorch if a.numel() == 1: From 69e657a0b10509b1ef3028851dac004306dda7ba Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Wed, 15 Mar 2023 22:35:57 +0300 Subject: [PATCH 28/33] TST: unxfail tests of out and dtype as positional args --- torch_np/tests/numpy_tests/core/test_multiarray.py | 1 - torch_np/tests/numpy_tests/core/test_numeric.py | 1 - torch_np/tests/numpy_tests/lib/test_shape_base_.py | 2 +- torch_np/tests/test_ndarray_methods.py | 1 - 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/torch_np/tests/numpy_tests/core/test_multiarray.py b/torch_np/tests/numpy_tests/core/test_multiarray.py index 49e8f01d..8ec56146 100644 --- a/torch_np/tests/numpy_tests/core/test_multiarray.py +++ b/torch_np/tests/numpy_tests/core/test_multiarray.py @@ -3743,7 +3743,6 @@ def test_ret_is_out(self, ndim, method): ret = arg_method(axis=0, out=out) assert ret is out - @pytest.mark.xfail(reason='FIXME: out w/ positional args?') @pytest.mark.parametrize('arr_method, np_method', [('argmax', np.argmax), ('argmin', np.argmin)]) diff --git a/torch_np/tests/numpy_tests/core/test_numeric.py b/torch_np/tests/numpy_tests/core/test_numeric.py index 9b0e7df4..0d1b01ab 100644 --- a/torch_np/tests/numpy_tests/core/test_numeric.py +++ b/torch_np/tests/numpy_tests/core/test_numeric.py @@ -2486,7 +2486,6 @@ def test_mode(self): class TestDtypePositional: - @pytest.mark.xfail(reason='TODO: restore dtypes as positional args') def test_dtype_positional(self): np.empty((2,), bool) diff --git a/torch_np/tests/numpy_tests/lib/test_shape_base_.py b/torch_np/tests/numpy_tests/lib/test_shape_base_.py index 70d2b261..cd43c8fb 100644 --- a/torch_np/tests/numpy_tests/lib/test_shape_base_.py +++ b/torch_np/tests/numpy_tests/lib/test_shape_base_.py @@ -719,7 +719,7 @@ def test_kroncompare(self): for s in shape: b = randint(0, 10, size=s) for r in reps: - a = np.ones(r, dtype=b.dtype) # TODO: restore dtype positional arg + a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) diff --git a/torch_np/tests/test_ndarray_methods.py b/torch_np/tests/test_ndarray_methods.py index 66fa4e58..c31aaf9a 100644 --- a/torch_np/tests/test_ndarray_methods.py +++ b/torch_np/tests/test_ndarray_methods.py @@ -323,7 +323,6 @@ def test_np_vs_ndarray(self, arr_method, np_method): assert_equal(arg_method(out=out1, axis=0), np_method(a, out=out2, axis=0)) assert_equal(out1, out2) - @pytest.mark.xfail(reason="out=... as a positional arg") @pytest.mark.parametrize( "arr_method, np_method", [("argmax", np.argmax), ("argmin", np.argmin)] ) From 27cb10f679f1a74aeb316415035c9fbc594cbb02 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Tue, 21 Mar 2023 00:00:16 +0300 Subject: [PATCH 29/33] MAINT: remove debug leftovers --- torch_np/_normalizations.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py index c3b5ff01..8d3309eb 100644 --- a/torch_np/_normalizations.py +++ b/torch_np/_normalizations.py @@ -118,7 +118,6 @@ def wrapped(*args, **kwds): # loop over positional parameters and actual arguments lst, dct = [], {} for arg, (name, parm) in zip(args, sig.parameters.items()): - print(arg, name, parm.annotation) lst.append(normalize_this(arg, parm)) # normalize keyword arguments @@ -129,7 +128,6 @@ def wrapped(*args, **kwds): f"{func.__name__}() got an unexpected keyword argument '{name}'." ) - print("kw: ", name, sig.parameters[name].annotation) parm = sig.parameters[name] dct[name] = normalize_this(arg, parm) @@ -144,19 +142,6 @@ def wrapped(*args, **kwds): f"{func.__name__}() takes {len(ba.args)} positional argument but {len(args)} were given." ) - # TODO: - # 1. [LOOKS OK] kw-only parameters : see vstack - # 2. [LOOKS OK] extra unknown args -- error out : nonzero([2, 0, 3], oops=42) - # 3. [LOOKS OK] optional (tensor_or_none) : untyped => pass through - # 4. [LOOKS OK] DTypeLike : positional or kw - # 5. axes : live in _impl or in types? several ways of handling them - # 6. [OK, NOT HERE] keepdims : peel off, postprocess - # 7. OutLike : normal & keyword-only, peel off, postprocess - # 8. [LOOKS OK] *args - # 9. [LOOKS OK] consolidate normalizations (_funcs, _wrapper) - # 10. [LOOKS OK] consolidate decorators (_{unary,binary}_ufuncs) - # 11. out= arg : validate it's an ndarray - # finally, pass normalized arguments through result = func(*ba.args, **ba.kwargs) return result From a6eb5815174580eaff5a09e3745551b8c3186004 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Tue, 21 Mar 2023 00:26:53 +0300 Subject: [PATCH 30/33] MAINT: add a comment on axis=() in reductions --- torch_np/_detail/_reductions.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/torch_np/_detail/_reductions.py b/torch_np/_detail/_reductions.py index 8aba8eaf..549f20e0 100644 --- a/torch_np/_detail/_reductions.py +++ b/torch_np/_detail/_reductions.py @@ -35,6 +35,10 @@ def wrapped(tensor, axis, *args, **kwds): axis = _util.normalize_axis_tuple(axis, tensor.ndim) if axis == (): + # NumPy does essentially an identity operation: + # >>> np.sum(np.ones(2), axis=()) + # array([1., 1.]) + # So we insert a length-one axis and run the reduction along it. newshape = _util.expand_shape(tensor.shape, axis=0) tensor = tensor.reshape(newshape) axis = (0,) From 8c78725def3682bfca3036a23c955241cfac7f67 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Tue, 21 Mar 2023 13:10:18 +0300 Subject: [PATCH 31/33] MAINT: simplify arg/param handing in normalize --- torch_np/_normalizations.py | 43 ++++++++++++------------------------ torch_np/tests/test_basic.py | 4 ++++ 2 files changed, 18 insertions(+), 29 deletions(-) diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py index 8d3309eb..908b3229 100644 --- a/torch_np/_normalizations.py +++ b/torch_np/_normalizations.py @@ -115,35 +115,20 @@ def wrapped(*args, **kwds): raise NotImplementedError break - # loop over positional parameters and actual arguments - lst, dct = [], {} - for arg, (name, parm) in zip(args, sig.parameters.items()): - lst.append(normalize_this(arg, parm)) - - # normalize keyword arguments - for name, arg in kwds.items(): - if not name in sig.parameters: - # unknown kwarg, bail out - raise TypeError( - f"{func.__name__}() got an unexpected keyword argument '{name}'." - ) - - parm = sig.parameters[name] - dct[name] = normalize_this(arg, parm) - - ba = sig.bind(*lst, **dct) - ba.apply_defaults() - - # Now that all parameters have been consumed, check: - # Anything that has not been bound is unexpected positional arg => raise. - # If there are too few actual arguments, this fill fail in func(*ba.args) below - if len(args) > len(ba.args): - raise TypeError( - f"{func.__name__}() takes {len(ba.args)} positional argument but {len(args)} were given." - ) - - # finally, pass normalized arguments through - result = func(*ba.args, **ba.kwargs) + # normalize positional and keyword arguments + # NB: extra unknown arguments: pass through, will raise in func(*lst) below + sp = sig.parameters + + lst = [normalize_this(arg, parm) for arg, parm in zip(args, sp.values())] + lst += args[len(lst) :] + + dct = { + name: normalize_this(arg, sp[name]) if name in sp else arg + for name, arg in kwds.items() + } + + result = func(*lst, **dct) + return result return wrapped diff --git a/torch_np/tests/test_basic.py b/torch_np/tests/test_basic.py index f6fab76d..23ea784a 100644 --- a/torch_np/tests/test_basic.py +++ b/torch_np/tests/test_basic.py @@ -401,6 +401,10 @@ def test_unknown_args(self): with assert_raises(TypeError): w.nonzero(a, oops="ouch") + def test_too_few_args_positional(self): + with assert_raises(TypeError): + w.nonzero() + def test_unknown_args_with_defaults(self): # check a function 5 arguments and 4 defaults: this should work w.eye(3) From 9d75cab63adaad254996e27f4a927ee4591d0ff0 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Wed, 22 Mar 2023 01:45:09 +0300 Subject: [PATCH 32/33] MAINT: simplify handling of variadic *args in normalize --- torch_np/_funcs.py | 1 - torch_np/_normalizations.py | 29 ++++++++++----------------- torch_np/_wrapper.py | 39 ++++++++++++++----------------------- 3 files changed, 25 insertions(+), 44 deletions(-) diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index cab55a08..59dd594f 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -11,7 +11,6 @@ DTypeLike, NDArray, SubokLike, - UnpackedSeqArrayLike, normalizer, ) diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py index 908b3229..04a2d785 100644 --- a/torch_np/_normalizations.py +++ b/torch_np/_normalizations.py @@ -14,9 +14,6 @@ AxisLike = typing.TypeVar("AxisLike") NDArray = typing.TypeVar("NDarray") -# annotate e.g. atleast_1d(*arys) -UnpackedSeqArrayLike = typing.TypeVar("UnpackedSeqArrayLike") - import inspect @@ -76,7 +73,6 @@ def normalize_ndarray(arg, name=None): ArrayLike: normalize_array_like, Optional[ArrayLike]: normalize_optional_array_like, Sequence[ArrayLike]: normalize_seq_array_like, - UnpackedSeqArrayLike: normalize_seq_array_like, # cf handling in normalize Optional[NDArray]: normalize_ndarray, DTypeLike: normalize_dtype, SubokLike: normalize_subok_like, @@ -100,25 +96,20 @@ def normalizer(func): @functools.wraps(func) def wrapped(*args, **kwds): sig = inspect.signature(func) - - # first, check for *args in positional parameters. Case in point: - # atleast_1d(*arys: UnpackedSequenceArrayLike) - # if found, consume all args into a tuple to normalize as a whole - for j, param in enumerate(sig.parameters.values()): - if param.annotation == UnpackedSeqArrayLike: - if j == 0: - args = (args,) - else: - # args = args[:j] + (args[j:],) would likely work - # not present in numpy codebase, so do not bother just yet. - # NB: branching on j ==0 is to avoid the empty tuple, args[:j] - raise NotImplementedError + sp = dict(sig.parameters) + + # check for *args. If detected, duplicate the correspoding parameter + # to have len(args) annotations for each element of *args. + for j, param in enumerate(sp.values()): + if param.kind == inspect.Parameter.VAR_POSITIONAL: + sp.pop(param.name) + variadic = {param.name + str(i): param for i in range(len(args))} + variadic.update(sp) + sp = variadic break # normalize positional and keyword arguments # NB: extra unknown arguments: pass through, will raise in func(*lst) below - sp = sig.parameters - lst = [normalize_this(arg, parm) for arg, parm in zip(args, sp.values())] lst += args[len(lst) :] diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index f85b5b80..39bff119 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -12,14 +12,7 @@ from ._detail import _dtypes_impl, _flips, _reductions, _util from ._detail import implementations as _impl from ._ndarray import array, asarray, maybe_set_base, ndarray -from ._normalizations import ( - ArrayLike, - DTypeLike, - NDArray, - SubokLike, - UnpackedSeqArrayLike, - normalizer, -) +from ._normalizations import ArrayLike, DTypeLike, NDArray, SubokLike, normalizer # Things to decide on (punt for now) # @@ -71,30 +64,30 @@ def copy(a: ArrayLike, order="K", subok: SubokLike = False): @normalizer -def atleast_1d(*arys: UnpackedSeqArrayLike): +def atleast_1d(*arys: ArrayLike): res = torch.atleast_1d(*arys) - if len(res) == 1: - return _helpers.array_from(res[0]) - else: + if isinstance(res, tuple): return list(_helpers.tuple_arrays_from(res)) + else: + return _helpers.array_from(res) @normalizer -def atleast_2d(*arys: UnpackedSeqArrayLike): +def atleast_2d(*arys: ArrayLike): res = torch.atleast_2d(*arys) - if len(res) == 1: - return _helpers.array_from(res[0]) - else: + if isinstance(res, tuple): return list(_helpers.tuple_arrays_from(res)) + else: + return _helpers.array_from(res) @normalizer -def atleast_3d(*arys: UnpackedSeqArrayLike): +def atleast_3d(*arys: ArrayLike): res = torch.atleast_3d(*arys) - if len(res) == 1: - return _helpers.array_from(res[0]) - else: + if isinstance(res, tuple): return list(_helpers.tuple_arrays_from(res)) + else: + return _helpers.array_from(res) def _concat_check(tup, dtype, out): @@ -537,8 +530,7 @@ def broadcast_to(array: ArrayLike, shape, subok: SubokLike = False): # YYY: pattern: tuple of arrays as input, tuple of arrays as output; cf nonzero @normalizer -def broadcast_arrays(*args: UnpackedSeqArrayLike, subok: SubokLike = False): - args = args[0] # undo the *args wrapping in normalizer +def broadcast_arrays(*args: ArrayLike, subok: SubokLike = False): res = torch.broadcast_tensors(*args) return _helpers.tuple_arrays_from(res) @@ -565,8 +557,7 @@ def ravel_multi_index(multi_index, dims, mode="raise", order="C"): @normalizer -def meshgrid(*xi: UnpackedSeqArrayLike, copy=True, sparse=False, indexing="xy"): - xi = xi[0] # undo the *xi wrapping in normalizer +def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"): output = _impl.meshgrid(*xi, copy=copy, sparse=sparse, indexing=indexing) outp = _helpers.tuple_arrays_from(output) return list(outp) # match numpy, return a list From 7dced32f66bbce9ebcbaaf794caca9ecbd5e01a9 Mon Sep 17 00:00:00 2001 From: Mario Lezcano Casado <3291265+lezcano@users.noreply.github.com> Date: Wed, 22 Mar 2023 22:15:53 +0300 Subject: [PATCH 33/33] MAINT: simplify normalizer --- torch_np/_normalizations.py | 50 +++++++++++++++---------------------- 1 file changed, 20 insertions(+), 30 deletions(-) diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py index 04a2d785..7f2ace98 100644 --- a/torch_np/_normalizations.py +++ b/torch_np/_normalizations.py @@ -82,44 +82,34 @@ def normalize_ndarray(arg, name=None): import functools -def normalize_this(arg, parm): +def maybe_normalize(arg, parm): """Normalize arg if a normalizer is registred.""" normalizer = normalizers.get(parm.annotation, None) - if normalizer: - return normalizer(arg) - else: - # untyped arguments pass through - return arg + return normalizer(arg) if normalizer else arg def normalizer(func): @functools.wraps(func) def wrapped(*args, **kwds): - sig = inspect.signature(func) - sp = dict(sig.parameters) - - # check for *args. If detected, duplicate the correspoding parameter - # to have len(args) annotations for each element of *args. - for j, param in enumerate(sp.values()): - if param.kind == inspect.Parameter.VAR_POSITIONAL: - sp.pop(param.name) - variadic = {param.name + str(i): param for i in range(len(args))} - variadic.update(sp) - sp = variadic - break - - # normalize positional and keyword arguments - # NB: extra unknown arguments: pass through, will raise in func(*lst) below - lst = [normalize_this(arg, parm) for arg, parm in zip(args, sp.values())] - lst += args[len(lst) :] - - dct = { - name: normalize_this(arg, sp[name]) if name in sp else arg + params = inspect.signature(func).parameters + first_param = next(iter(params.values())) + # NumPy's API does not have positional args before variadic positional args + if first_param.kind == inspect.Parameter.VAR_POSITIONAL: + args = [maybe_normalize(arg, first_param) for arg in args] + else: + # NB: extra unknown arguments: pass through, will raise in func(*args) below + args = ( + tuple( + maybe_normalize(arg, parm) + for arg, parm in zip(args, params.values()) + ) + + args[len(params.values()) :] + ) + + kwds = { + name: maybe_normalize(arg, params[name]) if name in params else arg for name, arg in kwds.items() } - - result = func(*lst, **dct) - - return result + return func(*args, **kwds) return wrapped