Skip to content

Commit 6bc212f

Browse files
committed
Skip rather than xfail cases which clearly won't be implemented
1 parent 3c7db71 commit 6bc212f

File tree

1 file changed

+18
-18
lines changed

1 file changed

+18
-18
lines changed

torch_np/tests/numpy_tests/core/test_indexing.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def test_slicing_no_floats(self):
9292
# should still get the DeprecationWarning if step = 0.
9393
assert_raises(TypeError, lambda: a[::0.0])
9494

95-
@pytest.mark.xfail(reason="torch allows slicing with non-0d array components")
95+
@pytest.mark.skip(reason="torch allows slicing with non-0d array components")
9696
def test_index_no_array_to_index(self):
9797
# No non-scalar arrays.
9898
a = np.array([[[1]]])
@@ -117,12 +117,12 @@ def test_empty_tuple_index(self):
117117
assert_equal(a[()], a)
118118
assert_(a[()].base is a)
119119
a = np.array(0)
120-
pytest.xfail(
120+
pytest.skip(
121121
"torch doesn't have scalar types with distinct instancing behaviours"
122122
)
123123
assert_(isinstance(a[()], np.int_))
124124

125-
@pytest.mark.xfail(reason="torch does not have an equivalent to np.void")
125+
@pytest.mark.skip(reason="torch does not have an equivalent to np.void")
126126
def test_void_scalar_empty_tuple(self):
127127
s = np.zeros((), dtype='V4')
128128
assert_equal(s[()].dtype, s.dtype)
@@ -253,7 +253,7 @@ def f(a, v):
253253
assert_raises((ValueError, RuntimeError), f, a, [1, 2, 3])
254254
assert_raises((ValueError, RuntimeError), f, a[:1], [1, 2, 3])
255255

256-
@pytest.mark.xfail(reason="torch does not support object dtype")
256+
@pytest.mark.skip(reason="torch does not support object dtype")
257257
def test_boolean_assignment_needs_api(self):
258258
# See also gh-7666
259259
# This caused a segfault on Python 2 due to the GIL not being
@@ -335,7 +335,7 @@ def test_uncontiguous_subspace_assignment(self):
335335

336336
assert_equal(a, b)
337337

338-
@pytest.mark.xfail(reason="torch does not limit dims to 32")
338+
@pytest.mark.skip(reason="torch does not limit dims to 32")
339339
def test_too_many_fancy_indices_special_case(self):
340340
# Just documents behaviour, this is a small limitation.
341341
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
@@ -453,7 +453,7 @@ def test_memory_order(self):
453453
a = a.reshape(-1, 1)
454454
assert_(a[b, 0].flags.f_contiguous)
455455

456-
@pytest.mark.xfail(reason="torch has no type distinct from a 0-d array")
456+
@pytest.mark.skip(reason="torch has no type distinct from a 0-d array")
457457
def test_scalar_return_type(self):
458458
# Full scalar indices should return scalars and object
459459
# arrays should not call PyArray_Return on their items
@@ -596,7 +596,7 @@ def test_indexing_array_negative_strides(self):
596596
arr[slices] = 10
597597
assert_array_equal(arr, 10.)
598598

599-
@pytest.mark.xfail(reason="torch does not support character/string dtypes")
599+
@pytest.mark.skip(reason="torch does not support character/string dtypes")
600600
def test_character_assignment(self):
601601
# This is an example a function going through CopyObject which
602602
# used to have an untested special path for scalars
@@ -615,8 +615,8 @@ def test_too_many_advanced_indices(self, index, num, original_ndim):
615615
# For `num=32` (and all boolean cases), the result is actually define;
616616
# but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
617617
if not (isinstance(index, np.ndarray) and original_ndim < num):
618-
# non-xfail cases fail because of assigning too many indices
619-
pytest.xfail("torch does not limit dims to 32")
618+
# unskipped cases fail because of assigning too many indices
619+
pytest.skip("torch does not limit dims to 32")
620620
arr = np.ones((1,) * original_ndim)
621621
with pytest.raises(IndexError):
622622
arr[(index,) * num]
@@ -654,7 +654,7 @@ def test_nontuple_ndindex(self):
654654
a = np.arange(25).reshape((5, 5))
655655
assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
656656
assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
657-
pytest.xfail(
657+
pytest.skip(
658658
"torch happily consumes non-tuple sequences with multi-axis "
659659
"indices (i.e. slices) as an index, whereas NumPy invalidates "
660660
"them, assumedly to keep things simple. This invalidation "
@@ -664,7 +664,7 @@ def test_nontuple_ndindex(self):
664664

665665

666666
class TestFieldIndexing:
667-
@pytest.mark.xfail(reason="torch has no type distinct from a 0-d array")
667+
@pytest.mark.skip(reason="torch has no type distinct from a 0-d array")
668668
def test_scalar_return_type(self):
669669
# Field access on an array should return an array, even if it
670670
# is 0-d.
@@ -834,7 +834,7 @@ def test_boolean_index_cast_assign(self):
834834
assert_equal(zero_array[0, 1], 0)
835835

836836
class TestFancyIndexingEquivalence:
837-
@pytest.mark.xfail(reason="torch does not support object dtype")
837+
@pytest.mark.skip(reason="torch does not support object dtype")
838838
def test_object_assign(self):
839839
# Check that the field and object special case using copyto is active.
840840
# The right hand side cannot be converted to an array here.
@@ -867,7 +867,7 @@ def test_object_assign(self):
867867
arr[[0], ...] = [[[1], [2], [3], [4]]]
868868
assert_array_equal(arr, cmp_arr)
869869

870-
@pytest.mark.xfail(reason="torch does not support character/string dtypes")
870+
@pytest.mark.skip(reason="torch does not support character/string dtypes")
871871
def test_cast_equivalence(self):
872872
# Yes, normal slicing uses unsafe casting.
873873
a = np.arange(5)
@@ -1346,7 +1346,7 @@ def test_non_integer_argument_errors(self):
13461346
assert_raises(TypeError, np.take, a, [0], 1.)
13471347
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
13481348

1349-
@pytest.mark.xfail(
1349+
@pytest.mark.skip(
13501350
reason=(
13511351
"torch doesn't have scalar types with distinct element-wise behaviours"
13521352
)
@@ -1381,7 +1381,7 @@ def test_bool_as_int_argument_errors(self):
13811381
pytest.xfail("XXX: take not implemented")
13821382
assert_raises(TypeError, np.take, args=(a, [0], False))
13831383

1384-
pytest.xfail("torch consumes boolean tensors as ints, no bother raising here")
1384+
pytest.skip("torch consumes boolean tensors as ints, no bother raising here")
13851385
assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
13861386
assert_raises(TypeError, operator.index, np.array(True))
13871387

@@ -1430,13 +1430,13 @@ def test_array_to_index_error(self):
14301430
pytest.xfail("XXX: take not implemented")
14311431
assert_raises(TypeError, np.take, a, [0], a)
14321432

1433-
pytest.xfail(
1433+
pytest.skip(
14341434
"Multi-dimensional tensors are indexable just as long as they only "
14351435
"contain a single element, no bother raising here"
14361436
)
14371437
assert_raises(TypeError, operator.index, np.array([1]))
14381438

1439-
pytest.xfail("torch consumes tensors as ints, no bother raising here")
1439+
pytest.skip("torch consumes tensors as ints, no bother raising here")
14401440
assert_raises(TypeError, np.reshape, a, (a, -1))
14411441

14421442

@@ -1447,7 +1447,7 @@ class TestNonIntegerArrayLike:
14471447
an integer.
14481448
14491449
"""
1450-
@pytest.mark.xfail(
1450+
@pytest.mark.skip(
14511451
reason=(
14521452
"torch consumes floats by way of falling back on its deprecated "
14531453
"__index__ behaviour, no bother raising here"

0 commit comments

Comments
 (0)