diff --git a/torch_np/_binary_ufuncs.py b/torch_np/_binary_ufuncs.py index 70e6101e..64327488 100644 --- a/torch_np/_binary_ufuncs.py +++ b/torch_np/_binary_ufuncs.py @@ -44,7 +44,7 @@ def wrapped( tensors = tuple(torch.broadcast_to(t, shape) for t in tensors) result = torch_func(*tensors) - return _helpers.result_or_out(result, out) + return result return wrapped @@ -77,69 +77,57 @@ def matmul( # NB: do not broadcast input tensors against the out=... array result = _binary_ufuncs.matmul(*tensors) - return _helpers.result_or_out(result, out) - - -# -# For each torch ufunc implementation, decorate and attach the decorated name -# to this module. Its contents is then exported to the public namespace in __init__.py -# -for name in __all__: - ufunc = getattr(_binary_ufuncs, name) - decorated = normalizer(deco_binary_ufunc(ufunc)) - - decorated.__qualname__ = name # XXX: is this really correct? - decorated.__name__ = name - vars()[name] = decorated - - -# a stub implementation of divmod, should be improved after -# https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch -# -# Implementation details: we just call two ufuncs which have been created -# just above, for x1 // x2 and x1 % x2. -# This means we are normalizing x1, x2 in each of the ufuncs --- note that there -# is no @normalizer on divmod. + return result def divmod( - x1, - x2, + x1: ArrayLike, + x2: ArrayLike, + out1: Optional[NDArray] = None, + out2: Optional[NDArray] = None, /, - out=None, + out: Optional[tuple[NDArray]] = (None, None), *, where=True, casting="same_kind", order="K", - dtype=None, + dtype: DTypeLike = None, subok: SubokLike = False, signature=None, extobj=None, ): - out1, out2 = None, None + num_outs = sum(x is None for x in [out1, out2]) + if sum_outs == 1: + raise ValueError("both out1 and out2 need to be provided") + if sum_outs != 0 and out != (None, None): + raise ValueError("Either provide out1 and out2, or out.") if out is not None: out1, out2 = out + if out1.shape != out2.shape or out1.dtype != out2.dtype: + raise ValueError("out1, out2 must be compatible") - kwds = dict( - where=where, - casting=casting, - order=order, - dtype=dtype, - subok=subok, - signature=signature, - extobj=extobj, + tensors = _helpers.ufunc_preprocess( + (x1, x2), out, True, casting, order, dtype, subok, signature, extobj ) - # NB: use local names for - quot = floor_divide(x1, x2, out=out1, **kwds) - rem = remainder(x1, x2, out=out2, **kwds) - - quot = _helpers.result_or_out(quot.tensor, out1) - rem = _helpers.result_or_out(rem.tensor, out2) + result = _binary_ufuncs.divmod(*tensors) return quot, rem +# +# For each torch ufunc implementation, decorate and attach the decorated name +# to this module. Its contents is then exported to the public namespace in __init__.py +# +for name in __all__: + ufunc = getattr(_binary_ufuncs, name) + decorated = normalizer(deco_binary_ufunc(ufunc)) + + decorated.__qualname__ = name # XXX: is this really correct? + decorated.__name__ = name + vars()[name] = decorated + + def modf(x, /, *args, **kwds): quot, rem = divmod(x, 1, *args, **kwds) return rem, quot diff --git a/torch_np/_decorators.py b/torch_np/_decorators.py deleted file mode 100644 index e33cb53f..00000000 --- a/torch_np/_decorators.py +++ /dev/null @@ -1,24 +0,0 @@ -import functools - -import torch - -from . import _dtypes, _helpers -from ._detail import _util - - -def out_shape_dtype(func): - """Handle out=... kwarg for ufuncs. - - With ufuncs, `out` array can typcast and broadcast ufunc arguments, hence - extract the shape and dtype of the tensor which backs the `out` array - and pass these through. - """ - - @functools.wraps(func) - def wrapped(*args, out=None, **kwds): - if out is not None: - kwds.update({"out_shape_dtype": (out.tensor.dtype, out.tensor.shape)}) - result_tensor = func(*args, **kwds) - return _helpers.result_or_out(result_tensor, out) - - return wrapped diff --git a/torch_np/_detail/_binary_ufuncs.py b/torch_np/_detail/_binary_ufuncs.py index 9ff634e8..dfd7dce3 100644 --- a/torch_np/_detail/_binary_ufuncs.py +++ b/torch_np/_detail/_binary_ufuncs.py @@ -70,3 +70,9 @@ def matmul(x, y): result = result.to(dtype) return result + + +# a stub implementation of divmod, should be improved after +# https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch +def divmod(x, y): + return x // y, x % y diff --git a/torch_np/_funcs.py b/torch_np/_funcs.py index 4e349007..eec7360c 100644 --- a/torch_np/_funcs.py +++ b/torch_np/_funcs.py @@ -45,7 +45,7 @@ def clip( # np.clip requires both a_min and a_max not None, while ndarray.clip allows # one of them to be None. Follow the more lax version. result = _impl.clip(a, min, max) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -80,7 +80,7 @@ def trace( out: Optional[NDArray] = None, ): result = _impl.trace(a, offset, axis1, axis2, dtype) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -135,7 +135,7 @@ def vdot(a: ArrayLike, b: ArrayLike, /): @normalizer def dot(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): result = _impl.dot(a, b) - return _helpers.result_or_out(result, out) + return result # ### sort and partition ### @@ -234,7 +234,7 @@ def imag(a: ArrayLike): @normalizer def round_(a: ArrayLike, decimals=0, out: Optional[NDArray] = None): result = _impl.round(a, decimals) - return _helpers.result_or_out(result, out) + return result around = round_ @@ -257,7 +257,7 @@ def sum( result = _impl.sum( a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims ) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -273,7 +273,7 @@ def prod( result = _impl.prod( a, axis=axis, dtype=dtype, initial=initial, where=where, keepdims=keepdims ) - return _helpers.result_or_out(result, out) + return result product = prod @@ -290,7 +290,7 @@ def mean( where=NoValue, ): result = _impl.mean(a, axis=axis, dtype=dtype, where=NoValue, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -307,7 +307,7 @@ def var( result = _impl.var( a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims ) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -324,7 +324,7 @@ def std( result = _impl.std( a, axis=axis, dtype=dtype, ddof=ddof, where=where, keepdims=keepdims ) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -336,7 +336,7 @@ def argmin( keepdims=NoValue, ): result = _impl.argmin(a, axis=axis, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -348,7 +348,7 @@ def argmax( keepdims=NoValue, ): result = _impl.argmax(a, axis=axis, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -361,7 +361,7 @@ def amax( where=NoValue, ): result = _impl.max(a, axis=axis, initial=initial, where=where, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result max = amax @@ -377,7 +377,7 @@ def amin( where=NoValue, ): result = _impl.min(a, axis=axis, initial=initial, where=where, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result min = amin @@ -388,7 +388,7 @@ def ptp( a: ArrayLike, axis: AxisLike = None, out: Optional[NDArray] = None, keepdims=NoValue ): result = _impl.ptp(a, axis=axis, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -401,7 +401,7 @@ def all( where=NoValue, ): result = _impl.all(a, axis=axis, where=where, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -414,7 +414,7 @@ def any( where=NoValue, ): result = _impl.any(a, axis=axis, where=where, keepdims=keepdims) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -431,7 +431,7 @@ def cumsum( out: Optional[NDArray] = None, ): result = _impl.cumsum(a, axis=axis, dtype=dtype) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -442,13 +442,13 @@ def cumprod( out: Optional[NDArray] = None, ): result = _impl.cumprod(a, axis=axis, dtype=dtype) - return _helpers.result_or_out(result, out) + return result cumproduct = cumprod -@normalizer +@normalizer(promote_scalar_result=True) def quantile( a: ArrayLike, q: ArrayLike, @@ -469,10 +469,10 @@ def quantile( keepdims=keepdims, interpolation=interpolation, ) - return _helpers.result_or_out(result, out, promote_scalar=True) + return result -@normalizer +@normalizer(promote_scalar_result=True) def percentile( a: ArrayLike, q: ArrayLike, @@ -493,7 +493,7 @@ def percentile( keepdims=keepdims, interpolation=interpolation, ) - return _helpers.result_or_out(result, out, promote_scalar=True) + return result def median( diff --git a/torch_np/_helpers.py b/torch_np/_helpers.py index 20dc4cfd..dab3d5f8 100644 --- a/torch_np/_helpers.py +++ b/torch_np/_helpers.py @@ -30,39 +30,6 @@ def ufunc_preprocess( return tensors -# ### Return helpers: wrap a single tensor, a tuple of tensors, out= etc ### - - -def result_or_out(result_tensor, out_array=None, promote_scalar=False): - """A helper for returns with out= argument. - - If `promote_scalar is True`, then: - if result_tensor.numel() == 1 and out is zero-dimensional, - result_tensor is placed into the out array. - This weirdness is used e.g. in `np.percentile` - """ - if out_array is not None: - if result_tensor.shape != out_array.shape: - can_fit = result_tensor.numel() == 1 and out_array.ndim == 0 - if promote_scalar and can_fit: - result_tensor = result_tensor.squeeze() - else: - raise ValueError( - f"Bad size of the out array: out.shape = {out_array.shape}" - f" while result.shape = {result_tensor.shape}." - ) - out_tensor = out_array.tensor - out_tensor.copy_(result_tensor) - return out_array - else: - from ._ndarray import ndarray - - return ndarray(result_tensor) - - -# ### Various ways of converting array-likes to tensors ### - - def ndarrays_to_tensors(*inputs): """Convert all ndarrays from `inputs` to tensors. (other things are intact)""" from ._ndarray import asarray, ndarray diff --git a/torch_np/_normalizations.py b/torch_np/_normalizations.py index b5e10e93..da006c11 100644 --- a/torch_np/_normalizations.py +++ b/torch_np/_normalizations.py @@ -96,16 +96,40 @@ def maybe_normalize(arg, parm, return_on_failure=_sentinel): raise exc from None +# ### Return value helpers ### + + +def maybe_copy_to(out, result, promote_scalar_result=False): + # NB: here out is either an ndarray or None + if out is None: + return result + elif isinstance(result, torch.Tensor): + if result.shape != out.shape: + can_fit = result.numel() == 1 and out.ndim == 0 + if promote_scalar_result and can_fit: + result = result.squeeze() + else: + raise ValueError( + f"Bad size of the out array: out.shape = {out.shape}" + f" while result.shape = {result.shape}." + ) + out.tensor.copy_(result) + return out + elif isinstance(result, (tuple, list)): + return type(result)(map(copy_to, zip(result, out))) + else: + assert False # We should never hit this path + + def wrap_tensors(result): from ._ndarray import ndarray if isinstance(result, torch.Tensor): - result = ndarray(result) + return ndarray(result) elif isinstance(result, (tuple, list)): result = type(result)( ndarray(x) if isinstance(x, torch.Tensor) else x for x in result ) - return result @@ -118,11 +142,15 @@ def array_or_scalar(values, py_type=float, return_scalar=False): return ndarray(values) -def normalizer(_func=None, *, return_on_failure=_sentinel): +# ### The main decorator to normalize arguments / postprocess the output ### + + +def normalizer(_func=None, *, return_on_failure=_sentinel, promote_scalar_result=False): def normalizer_inner(func): @functools.wraps(func) def wrapped(*args, **kwds): - params = inspect.signature(func).parameters + sig = inspect.signature(func) + params = sig.parameters first_param = next(iter(params.values())) # NumPy's API does not have positional args before variadic positional args if first_param.kind == inspect.Parameter.VAR_POSITIONAL: @@ -144,7 +172,12 @@ def wrapped(*args, **kwds): for name, arg in kwds.items() } result = func(*args, **kwds) + + if "out" in params: + out = sig.bind(*args, **kwds).arguments.get("out") + result = maybe_copy_to(out, result, promote_scalar_result) result = wrap_tensors(result) + return result return wrapped diff --git a/torch_np/_unary_ufuncs.py b/torch_np/_unary_ufuncs.py index 168d48f2..c6679bee 100644 --- a/torch_np/_unary_ufuncs.py +++ b/torch_np/_unary_ufuncs.py @@ -1,7 +1,3 @@ -# from ._decorators import deco_unary_ufunc_from_impl -# from ._detail import _ufunc_impl - - from typing import Optional import torch @@ -44,7 +40,7 @@ def wrapped( shape = out.shape tensors = tuple(torch.broadcast_to(t, shape) for t in tensors) result = torch_func(*tensors) - return _helpers.result_or_out(result, out) + return result return wrapped diff --git a/torch_np/_wrapper.py b/torch_np/_wrapper.py index 1a9ed8d6..2b9246eb 100644 --- a/torch_np/_wrapper.py +++ b/torch_np/_wrapper.py @@ -8,7 +8,6 @@ import torch -from . import _decorators from . import _detail as _impl from . import _dtypes, _funcs, _helpers from ._detail import _dtypes_impl, _util @@ -116,7 +115,7 @@ def concatenate( ): _concat_check(ar_tuple, dtype, out=out) result = _impl.concatenate(ar_tuple, axis, out, dtype, casting) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -167,7 +166,7 @@ def stack( ): _concat_check(arrays, dtype, out=out) result = _impl.stack(arrays, axis=axis, out=out, dtype=dtype, casting=casting) - return _helpers.result_or_out(result, out) + return result @normalizer @@ -601,7 +600,7 @@ def inner(a: ArrayLike, b: ArrayLike, /): @normalizer def outer(a: ArrayLike, b: ArrayLike, out: Optional[NDArray] = None): result = torch.outer(a, b) - return _helpers.result_or_out(result, out) + return result # ### FIXME: this is a stub diff --git a/torch_np/testing/utils.py b/torch_np/testing/utils.py index 9dd321dc..292134fc 100644 --- a/torch_np/testing/utils.py +++ b/torch_np/testing/utils.py @@ -580,7 +580,6 @@ def func_assert_same_pos(x, y, func=isnan, hasval="nan"): """ __tracebackhide__ = True # Hide traceback for py.test - x_id = func(x) y_id = func(y) # We include work-arounds here to handle three types of slightly diff --git a/torch_np/tests/numpy_tests/core/test_multiarray.py b/torch_np/tests/numpy_tests/core/test_multiarray.py index 4e513a4a..b44e89d2 100644 --- a/torch_np/tests/numpy_tests/core/test_multiarray.py +++ b/torch_np/tests/numpy_tests/core/test_multiarray.py @@ -2878,7 +2878,7 @@ def test_array_contains(self): assert_(4.0 in np.arange(16.).reshape(4,4)) assert_(20.0 not in np.arange(16.).reshape(4,4)) -@pytest.mark.xfail(reason='TODO: true_divide') + class TestBinop: def test_inplace(self): # test refcount 1 inplace conversion @@ -2911,369 +2911,7 @@ def test_inplace(self): assert_equal(a, 5) assert_equal(b, 3) - # ndarray.__rop__ always calls ufunc - # ndarray.__iop__ always calls ufunc - # ndarray.__op__, __rop__: - # - defer if other has __array_ufunc__ and it is None - # or other is not a subclass and has higher array priority - # - else, call ufunc - def test_ufunc_binop_interaction(self): - # Python method name (without underscores) - # -> (numpy ufunc, has_in_place_version, preferred_dtype) - ops = { - 'add': (np.add, True, float), - 'sub': (np.subtract, True, float), - 'mul': (np.multiply, True, float), - 'truediv': (np.true_divide, True, float), - 'floordiv': (np.floor_divide, True, float), - 'mod': (np.remainder, True, float), - 'divmod': (np.divmod, False, float), - 'pow': (np.power, True, int), - 'lshift': (np.left_shift, True, int), - 'rshift': (np.right_shift, True, int), - 'and': (np.bitwise_and, True, int), - 'xor': (np.bitwise_xor, True, int), - 'or': (np.bitwise_or, True, int), - 'matmul': (np.matmul, False, float), - # 'ge': (np.less_equal, False), - # 'gt': (np.less, False), - # 'le': (np.greater_equal, False), - # 'lt': (np.greater, False), - # 'eq': (np.equal, False), - # 'ne': (np.not_equal, False), - } - - class Coerced(Exception): - pass - - def array_impl(self): - raise Coerced - - def op_impl(self, other): - return "forward" - - def rop_impl(self, other): - return "reverse" - - def iop_impl(self, other): - return "in-place" - - def array_ufunc_impl(self, ufunc, method, *args, **kwargs): - return ("__array_ufunc__", ufunc, method, args, kwargs) - - # Create an object with the given base, in the given module, with a - # bunch of placeholder __op__ methods, and optionally a - # __array_ufunc__ and __array_priority__. - def make_obj(base, array_priority=False, array_ufunc=False, - alleged_module="__main__"): - class_namespace = {"__array__": array_impl} - if array_priority is not False: - class_namespace["__array_priority__"] = array_priority - for op in ops: - class_namespace["__{0}__".format(op)] = op_impl - class_namespace["__r{0}__".format(op)] = rop_impl - class_namespace["__i{0}__".format(op)] = iop_impl - if array_ufunc is not False: - class_namespace["__array_ufunc__"] = array_ufunc - eval_namespace = {"base": base, - "class_namespace": class_namespace, - "__name__": alleged_module, - } - MyType = eval("type('MyType', (base,), class_namespace)", - eval_namespace) - if issubclass(MyType, np.ndarray): - # Use this range to avoid special case weirdnesses around - # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. - return np.arange(3, 7).reshape(2, 2).view(MyType) - else: - return MyType() - - def check(obj, binop_override_expected, ufunc_override_expected, - inplace_override_expected, check_scalar=True): - for op, (ufunc, has_inplace, dtype) in ops.items(): - err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' - % (op, ufunc, has_inplace, dtype)) - check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] - if check_scalar: - check_objs.append(check_objs[0][0]) - for arr in check_objs: - arr_method = getattr(arr, "__{0}__".format(op)) - - def first_out_arg(result): - if op == "divmod": - assert_(isinstance(result, tuple)) - return result[0] - else: - return result - - # arr __op__ obj - if binop_override_expected: - assert_equal(arr_method(obj), NotImplemented, err_msg) - elif ufunc_override_expected: - assert_equal(arr_method(obj)[0], "__array_ufunc__", - err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_method(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_method, obj, err_msg=err_msg) - # obj __op__ arr - arr_rmethod = getattr(arr, "__r{0}__".format(op)) - if ufunc_override_expected: - res = arr_rmethod(obj) - assert_equal(res[0], "__array_ufunc__", - err_msg=err_msg) - assert_equal(res[1], ufunc, err_msg=err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_rmethod(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - # __array_ufunc__ = "asdf" creates a TypeError - assert_raises((TypeError, Coerced), - arr_rmethod, obj, err_msg=err_msg) - - # arr __iop__ obj - # array scalars don't have in-place operators - if has_inplace and isinstance(arr, np.ndarray): - arr_imethod = getattr(arr, "__i{0}__".format(op)) - if inplace_override_expected: - assert_equal(arr_method(obj), NotImplemented, - err_msg=err_msg) - elif ufunc_override_expected: - res = arr_imethod(obj) - assert_equal(res[0], "__array_ufunc__", err_msg) - assert_equal(res[1], ufunc, err_msg) - assert_(type(res[-1]["out"]) is tuple, err_msg) - assert_(res[-1]["out"][0] is arr, err_msg) - else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - assert_(arr_imethod(obj) is arr, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_imethod, obj, - err_msg=err_msg) - - op_fn = getattr(operator, op, None) - if op_fn is None: - op_fn = getattr(operator, op + "_", None) - if op_fn is None: - op_fn = getattr(builtins, op) - assert_equal(op_fn(obj, arr), "forward", err_msg) - if not isinstance(obj, np.ndarray): - if binop_override_expected: - assert_equal(op_fn(arr, obj), "reverse", err_msg) - elif ufunc_override_expected: - assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", - err_msg) - if ufunc_override_expected: - assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", - err_msg) - - # No array priority, no array_ufunc -> nothing called - check(make_obj(object), False, False, False) - # Negative array priority, no array_ufunc -> nothing called - # (has to be very negative, because scalar priority is -1000000.0) - check(make_obj(object, array_priority=-2**30), False, False, False) - # Positive array priority, no array_ufunc -> binops and iops only - check(make_obj(object, array_priority=1), True, False, True) - # ndarray ignores array_priority for ndarray subclasses - check(make_obj(np.ndarray, array_priority=1), False, False, False, - check_scalar=False) - # Positive array_priority and array_ufunc -> array_ufunc only - check(make_obj(object, array_priority=1, - array_ufunc=array_ufunc_impl), False, True, False) - check(make_obj(np.ndarray, array_priority=1, - array_ufunc=array_ufunc_impl), False, True, False) - # array_ufunc set to None -> defer binops only - check(make_obj(object, array_ufunc=None), True, False, False) - check(make_obj(np.ndarray, array_ufunc=None), True, False, False, - check_scalar=False) - - @pytest.mark.parametrize("priority", [None, "runtime error"]) - def test_ufunc_binop_bad_array_priority(self, priority): - # Mainly checks that this does not crash. The second array has a lower - # priority than -1 ("error value"). If the __radd__ actually exists, - # bad things can happen (I think via the scalar paths). - # In principle both of these can probably just be errors in the future. - class BadPriority: - @property - def __array_priority__(self): - if priority == "runtime error": - raise RuntimeError("RuntimeError in __array_priority__!") - return priority - - def __radd__(self, other): - return "result" - - class LowPriority(np.ndarray): - __array_priority__ = -1000 - - # Priority failure uses the same as scalars (smaller -1000). So the - # LowPriority wins with 'result' for each element (inner operation). - res = np.arange(3).view(LowPriority) + BadPriority() - assert res.shape == (3,) - assert res[0] == 'result' - - - def test_ufunc_override_normalize_signature(self): - # gh-5674 - class SomeClass: - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - return kw - - a = SomeClass() - kw = np.add(a, [1]) - assert_('sig' not in kw and 'signature' not in kw) - kw = np.add(a, [1], sig='ii->i') - assert_('sig' not in kw and 'signature' in kw) - assert_equal(kw['signature'], 'ii->i') - kw = np.add(a, [1], signature='ii->i') - assert_('sig' not in kw and 'signature' in kw) - assert_equal(kw['signature'], 'ii->i') - - def test_array_ufunc_index(self): - # Check that index is set appropriately, also if only an output - # is passed on (latter is another regression tests for github bug 4753) - # This also checks implicitly that 'out' is always a tuple. - class CheckIndex: - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - for i, a in enumerate(inputs): - if a is self: - return i - # calls below mean we must be in an output. - for j, a in enumerate(kw['out']): - if a is self: - return (j,) - - a = CheckIndex() - dummy = np.arange(2.) - # 1 input, 1 output - assert_equal(np.sin(a), 0) - assert_equal(np.sin(dummy, a), (0,)) - assert_equal(np.sin(dummy, out=a), (0,)) - assert_equal(np.sin(dummy, out=(a,)), (0,)) - assert_equal(np.sin(a, a), 0) - assert_equal(np.sin(a, out=a), 0) - assert_equal(np.sin(a, out=(a,)), 0) - # 1 input, 2 outputs - assert_equal(np.modf(dummy, a), (0,)) - assert_equal(np.modf(dummy, None, a), (1,)) - assert_equal(np.modf(dummy, dummy, a), (1,)) - assert_equal(np.modf(dummy, out=(a, None)), (0,)) - assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) - assert_equal(np.modf(dummy, out=(None, a)), (1,)) - assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) - assert_equal(np.modf(a, out=(dummy, a)), 0) - with assert_raises(TypeError): - # Out argument must be tuple, since there are multiple outputs - np.modf(dummy, out=a) - - assert_raises(ValueError, np.modf, dummy, out=(a,)) - - # 2 inputs, 1 output - assert_equal(np.add(a, dummy), 0) - assert_equal(np.add(dummy, a), 1) - assert_equal(np.add(dummy, dummy, a), (0,)) - assert_equal(np.add(dummy, a, a), 1) - assert_equal(np.add(dummy, dummy, out=a), (0,)) - assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) - assert_equal(np.add(a, dummy, out=a), 0) - - def test_out_override(self): - # regression test for github bug 4753 - class OutClass(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if 'out' in kw: - tmp_kw = kw.copy() - tmp_kw.pop('out') - func = getattr(ufunc, method) - kw['out'][0][...] = func(*inputs, **tmp_kw) - - A = np.array([0]).view(OutClass) - B = np.array([5]) - C = np.array([6]) - np.multiply(C, B, A) - assert_equal(A[0], 30) - assert_(isinstance(A, OutClass)) - A[0] = 0 - np.multiply(C, B, out=A) - assert_equal(A[0], 30) - assert_(isinstance(A, OutClass)) - - def test_pow_override_with_errors(self): - # regression test for gh-9112 - class PowerOnly(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if ufunc is not np.power: - raise NotImplementedError - return "POWER!" - # explicit cast to float, to ensure the fast power path is taken. - a = np.array(5., dtype=np.float64).view(PowerOnly) - assert_equal(a ** 2.5, "POWER!") - with assert_raises(NotImplementedError): - a ** 0.5 - with assert_raises(NotImplementedError): - a ** 0 - with assert_raises(NotImplementedError): - a ** 1 - with assert_raises(NotImplementedError): - a ** -1 - with assert_raises(NotImplementedError): - a ** 2 - - def test_pow_array_object_dtype(self): - # test pow on arrays of object dtype - class SomeClass: - def __init__(self, num=None): - self.num = num - - # want to ensure a fast pow path is not taken - def __mul__(self, other): - raise AssertionError('__mul__ should not be called') - - def __div__(self, other): - raise AssertionError('__div__ should not be called') - - def __pow__(self, exp): - return SomeClass(num=self.num ** exp) - - def __eq__(self, other): - if isinstance(other, SomeClass): - return self.num == other.num - - __rpow__ = __pow__ - - def pow_for(exp, arr): - return np.array([x ** exp for x in arr]) - - obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) - - assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) - assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) - assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) - assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) - assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) - - def test_pos_array_ufunc_override(self): - class A(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return getattr(ufunc, method)(*[i.view(np.ndarray) for - i in inputs], **kwargs) - tst = np.array('foo').view(A) - with assert_raises(TypeError): - +tst + @pytest.mark.skip(reason='segfaults') class TestTemporaryElide: