@@ -92,7 +92,7 @@ def test_slicing_no_floats(self):
92
92
# should still get the DeprecationWarning if step = 0.
93
93
assert_raises (TypeError , lambda : a [::0.0 ])
94
94
95
- @pytest .mark .xfail (reason = "torch allows slicing with non-0d array components" )
95
+ @pytest .mark .skip (reason = "torch allows slicing with non-0d array components" )
96
96
def test_index_no_array_to_index (self ):
97
97
# No non-scalar arrays.
98
98
a = np .array ([[[1 ]]])
@@ -117,12 +117,12 @@ def test_empty_tuple_index(self):
117
117
assert_equal (a [()], a )
118
118
assert_ (a [()].base is a )
119
119
a = np .array (0 )
120
- pytest .xfail (
120
+ pytest .skip (
121
121
"torch doesn't have scalar types with distinct instancing behaviours"
122
122
)
123
123
assert_ (isinstance (a [()], np .int_ ))
124
124
125
- @pytest .mark .xfail (reason = "torch does not have an equivalent to np.void" )
125
+ @pytest .mark .skip (reason = "torch does not have an equivalent to np.void" )
126
126
def test_void_scalar_empty_tuple (self ):
127
127
s = np .zeros ((), dtype = 'V4' )
128
128
assert_equal (s [()].dtype , s .dtype )
@@ -253,7 +253,7 @@ def f(a, v):
253
253
assert_raises ((ValueError , RuntimeError ), f , a , [1 , 2 , 3 ])
254
254
assert_raises ((ValueError , RuntimeError ), f , a [:1 ], [1 , 2 , 3 ])
255
255
256
- @pytest .mark .xfail (reason = "torch does not support object dtype" )
256
+ @pytest .mark .skip (reason = "torch does not support object dtype" )
257
257
def test_boolean_assignment_needs_api (self ):
258
258
# See also gh-7666
259
259
# This caused a segfault on Python 2 due to the GIL not being
@@ -335,7 +335,7 @@ def test_uncontiguous_subspace_assignment(self):
335
335
336
336
assert_equal (a , b )
337
337
338
- @pytest .mark .xfail (reason = "torch does not limit dims to 32" )
338
+ @pytest .mark .skip (reason = "torch does not limit dims to 32" )
339
339
def test_too_many_fancy_indices_special_case (self ):
340
340
# Just documents behaviour, this is a small limitation.
341
341
a = np .ones ((1 ,) * 32 ) # 32 is NPY_MAXDIMS
@@ -453,7 +453,7 @@ def test_memory_order(self):
453
453
a = a .reshape (- 1 , 1 )
454
454
assert_ (a [b , 0 ].flags .f_contiguous )
455
455
456
- @pytest .mark .xfail (reason = "torch has no type distinct from a 0-d array" )
456
+ @pytest .mark .skip (reason = "torch has no type distinct from a 0-d array" )
457
457
def test_scalar_return_type (self ):
458
458
# Full scalar indices should return scalars and object
459
459
# arrays should not call PyArray_Return on their items
@@ -596,7 +596,7 @@ def test_indexing_array_negative_strides(self):
596
596
arr [slices ] = 10
597
597
assert_array_equal (arr , 10. )
598
598
599
- @pytest .mark .xfail (reason = "torch does not support character/string dtypes" )
599
+ @pytest .mark .skip (reason = "torch does not support character/string dtypes" )
600
600
def test_character_assignment (self ):
601
601
# This is an example a function going through CopyObject which
602
602
# used to have an untested special path for scalars
@@ -615,8 +615,8 @@ def test_too_many_advanced_indices(self, index, num, original_ndim):
615
615
# For `num=32` (and all boolean cases), the result is actually define;
616
616
# but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
617
617
if not (isinstance (index , np .ndarray ) and original_ndim < num ):
618
- # non-xfail cases fail because of assigning too many indices
619
- pytest .xfail ("torch does not limit dims to 32" )
618
+ # unskipped cases fail because of assigning too many indices
619
+ pytest .skip ("torch does not limit dims to 32" )
620
620
arr = np .ones ((1 ,) * original_ndim )
621
621
with pytest .raises (IndexError ):
622
622
arr [(index ,) * num ]
@@ -654,7 +654,7 @@ def test_nontuple_ndindex(self):
654
654
a = np .arange (25 ).reshape ((5 , 5 ))
655
655
assert_equal (a [[0 , 1 ]], np .array ([a [0 ], a [1 ]]))
656
656
assert_equal (a [[0 , 1 ], [0 , 1 ]], np .array ([0 , 6 ]))
657
- pytest .xfail (
657
+ pytest .skip (
658
658
"torch happily consumes non-tuple sequences with multi-axis "
659
659
"indices (i.e. slices) as an index, whereas NumPy invalidates "
660
660
"them, assumedly to keep things simple. This invalidation "
@@ -664,7 +664,7 @@ def test_nontuple_ndindex(self):
664
664
665
665
666
666
class TestFieldIndexing :
667
- @pytest .mark .xfail (reason = "torch has no type distinct from a 0-d array" )
667
+ @pytest .mark .skip (reason = "torch has no type distinct from a 0-d array" )
668
668
def test_scalar_return_type (self ):
669
669
# Field access on an array should return an array, even if it
670
670
# is 0-d.
@@ -834,7 +834,7 @@ def test_boolean_index_cast_assign(self):
834
834
assert_equal (zero_array [0 , 1 ], 0 )
835
835
836
836
class TestFancyIndexingEquivalence :
837
- @pytest .mark .xfail (reason = "torch does not support object dtype" )
837
+ @pytest .mark .skip (reason = "torch does not support object dtype" )
838
838
def test_object_assign (self ):
839
839
# Check that the field and object special case using copyto is active.
840
840
# The right hand side cannot be converted to an array here.
@@ -867,7 +867,7 @@ def test_object_assign(self):
867
867
arr [[0 ], ...] = [[[1 ], [2 ], [3 ], [4 ]]]
868
868
assert_array_equal (arr , cmp_arr )
869
869
870
- @pytest .mark .xfail (reason = "torch does not support character/string dtypes" )
870
+ @pytest .mark .skip (reason = "torch does not support character/string dtypes" )
871
871
def test_cast_equivalence (self ):
872
872
# Yes, normal slicing uses unsafe casting.
873
873
a = np .arange (5 )
@@ -1346,7 +1346,7 @@ def test_non_integer_argument_errors(self):
1346
1346
assert_raises (TypeError , np .take , a , [0 ], 1. )
1347
1347
assert_raises (TypeError , np .take , a , [0 ], np .float64 (1. ))
1348
1348
1349
- @pytest .mark .xfail (
1349
+ @pytest .mark .skip (
1350
1350
reason = (
1351
1351
"torch doesn't have scalar types with distinct element-wise behaviours"
1352
1352
)
@@ -1381,7 +1381,7 @@ def test_bool_as_int_argument_errors(self):
1381
1381
pytest .xfail ("XXX: take not implemented" )
1382
1382
assert_raises (TypeError , np .take , args = (a , [0 ], False ))
1383
1383
1384
- pytest .xfail ("torch consumes boolean tensors as ints, no bother raising here" )
1384
+ pytest .skip ("torch consumes boolean tensors as ints, no bother raising here" )
1385
1385
assert_raises (TypeError , np .reshape , a , (np .bool_ (True ), - 1 ))
1386
1386
assert_raises (TypeError , operator .index , np .array (True ))
1387
1387
@@ -1430,13 +1430,13 @@ def test_array_to_index_error(self):
1430
1430
pytest .xfail ("XXX: take not implemented" )
1431
1431
assert_raises (TypeError , np .take , a , [0 ], a )
1432
1432
1433
- pytest .xfail (
1433
+ pytest .skip (
1434
1434
"Multi-dimensional tensors are indexable just as long as they only "
1435
1435
"contain a single element, no bother raising here"
1436
1436
)
1437
1437
assert_raises (TypeError , operator .index , np .array ([1 ]))
1438
1438
1439
- pytest .xfail ("torch consumes tensors as ints, no bother raising here" )
1439
+ pytest .skip ("torch consumes tensors as ints, no bother raising here" )
1440
1440
assert_raises (TypeError , np .reshape , a , (a , - 1 ))
1441
1441
1442
1442
@@ -1447,7 +1447,7 @@ class TestNonIntegerArrayLike:
1447
1447
an integer.
1448
1448
1449
1449
"""
1450
- @pytest .mark .xfail (
1450
+ @pytest .mark .skip (
1451
1451
reason = (
1452
1452
"torch consumes floats by way of falling back on its deprecated "
1453
1453
"__index__ behaviour, no bother raising here"
0 commit comments