Skip to content

Commit 9492b80

Browse files
mroeschkeyehoshuadimarsky
authored andcommitted
TST: skip -> xfail (pandas-dev#46387)
1 parent 6a206a8 commit 9492b80

18 files changed

+98
-85
lines changed

pandas/tests/frame/methods/test_between_time.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919

2020
class TestBetweenTime:
21-
@td.skip_if_has_locale
21+
@td.skip_if_not_us_locale
2222
def test_between_time_formats(self, frame_or_series):
2323
# GH#11818
2424
rng = date_range("1/1/2000", "1/5/2000", freq="5min")

pandas/tests/groupby/test_quantile.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -34,10 +34,13 @@
3434
],
3535
)
3636
@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
37-
def test_quantile(interpolation, a_vals, b_vals, q):
37+
def test_quantile(interpolation, a_vals, b_vals, q, request):
3838
if interpolation == "nearest" and q == 0.5 and b_vals == [4, 3, 2, 1]:
39-
pytest.skip(
40-
"Unclear numpy expectation for nearest result with equidistant data"
39+
request.node.add_marker(
40+
pytest.mark.xfail(
41+
reason="Unclear numpy expectation for nearest "
42+
"result with equidistant data"
43+
)
4144
)
4245

4346
a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)

pandas/tests/io/formats/test_series_info.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def test_info_shows_dtypes():
116116
assert name in res
117117

118118

119-
@pytest.mark.skipif(PYPY, reason="on PyPy deep=True doesn't change result")
119+
@pytest.mark.xfail(PYPY, reason="on PyPy deep=True doesn't change result")
120120
def test_info_memory_usage_deep_not_pypy():
121121
s_with_object_index = Series({"a": [1]}, index=["foo"])
122122
assert s_with_object_index.memory_usage(
@@ -127,7 +127,7 @@ def test_info_memory_usage_deep_not_pypy():
127127
assert s_object.memory_usage(deep=True) > s_object.memory_usage()
128128

129129

130-
@pytest.mark.skipif(not PYPY, reason="on PyPy deep=True does not change result")
130+
@pytest.mark.xfail(not PYPY, reason="on PyPy deep=True does not change result")
131131
def test_info_memory_usage_deep_pypy():
132132
s_with_object_index = Series({"a": [1]}, index=["foo"])
133133
assert s_with_object_index.memory_usage(

pandas/tests/io/json/test_ujson.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -945,9 +945,11 @@ def test_array_numpy_labelled(self):
945945

946946

947947
class TestPandasJSONTests:
948-
def test_dataframe(self, orient, numpy):
948+
def test_dataframe(self, request, orient, numpy):
949949
if orient == "records" and numpy:
950-
pytest.skip("Not idiomatic pandas")
950+
request.node.add_marker(
951+
pytest.mark.xfail(reason=f"Not idiomatic pandas if orient={orient}")
952+
)
951953

952954
dtype = get_int32_compat_dtype(numpy, orient)
953955

pandas/tests/io/parser/common/test_read_errors.py

+7-2
Original file line numberDiff line numberDiff line change
@@ -241,11 +241,16 @@ def test_null_byte_char(all_parsers):
241241

242242

243243
@td.check_file_leaks
244-
def test_open_file(all_parsers):
244+
def test_open_file(request, all_parsers):
245245
# GH 39024
246246
parser = all_parsers
247247
if parser.engine == "c":
248-
pytest.skip("'c' engine does not support sep=None with delim_whitespace=False")
248+
request.node.add_marker(
249+
pytest.mark.xfail(
250+
reason=f"{parser.engine} engine does not support sep=None "
251+
f"with delim_whitespace=False"
252+
)
253+
)
249254

250255
with tm.ensure_clean() as path:
251256
file = Path(path)

pandas/tests/io/parser/dtypes/test_dtypes_basic.py

+16-8
Original file line numberDiff line numberDiff line change
@@ -191,31 +191,39 @@ def test_delimiter_with_usecols_and_parse_dates(all_parsers):
191191

192192

193193
@pytest.mark.parametrize("thousands", ["_", None])
194-
def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):
194+
def test_decimal_and_exponential(
195+
request, python_parser_only, numeric_decimal, thousands
196+
):
195197
# GH#31920
196-
decimal_number_check(python_parser_only, numeric_decimal, thousands, None)
198+
decimal_number_check(request, python_parser_only, numeric_decimal, thousands, None)
197199

198200

199201
@pytest.mark.parametrize("thousands", ["_", None])
200202
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
201203
def test_1000_sep_decimal_float_precision(
202-
c_parser_only, numeric_decimal, float_precision, thousands
204+
request, c_parser_only, numeric_decimal, float_precision, thousands
203205
):
204206
# test decimal and thousand sep handling in across 'float_precision'
205207
# parsers
206-
decimal_number_check(c_parser_only, numeric_decimal, thousands, float_precision)
208+
decimal_number_check(
209+
request, c_parser_only, numeric_decimal, thousands, float_precision
210+
)
207211
text, value = numeric_decimal
208212
text = " " + text + " "
209213
if isinstance(value, str): # the negative cases (parse as text)
210214
value = " " + value + " "
211-
decimal_number_check(c_parser_only, (text, value), thousands, float_precision)
215+
decimal_number_check(
216+
request, c_parser_only, (text, value), thousands, float_precision
217+
)
212218

213219

214-
def decimal_number_check(parser, numeric_decimal, thousands, float_precision):
220+
def decimal_number_check(request, parser, numeric_decimal, thousands, float_precision):
215221
# GH#31920
216222
value = numeric_decimal[0]
217-
if thousands is None and "_" in value:
218-
pytest.skip("Skip test if no thousands sep is defined and sep is in value")
223+
if thousands is None and value in ("1_,", "1_234,56", "1_234,56e0"):
224+
request.node.add_marker(
225+
pytest.mark.xfail(reason=f"thousands={thousands} and sep is in {value}")
226+
)
219227
df = parser.read_csv(
220228
StringIO(value),
221229
float_precision=float_precision,

pandas/tests/io/parser/test_compression.py

+6-2
Original file line numberDiff line numberDiff line change
@@ -91,15 +91,19 @@ def test_zip_error_invalid_zip(parser_and_data):
9191

9292
@skip_pyarrow
9393
@pytest.mark.parametrize("filename", [None, "test.{ext}"])
94-
def test_compression(parser_and_data, compression_only, buffer, filename):
94+
def test_compression(request, parser_and_data, compression_only, buffer, filename):
9595
parser, data, expected = parser_and_data
9696
compress_type = compression_only
9797

9898
ext = icom._compression_to_extension[compress_type]
9999
filename = filename if filename is None else filename.format(ext=ext)
100100

101101
if filename and buffer:
102-
pytest.skip("Cannot deduce compression from buffer of compressed data.")
102+
request.node.add_marker(
103+
pytest.mark.xfail(
104+
reason="Cannot deduce compression from buffer of compressed data."
105+
)
106+
)
103107

104108
with tm.ensure_clean(filename=filename) as path:
105109
tm.write_to_compressed(compress_type, path, data)

pandas/tests/io/parser/test_unsupported.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -191,11 +191,13 @@ def test_close_file_handle_on_invalid_usecols(all_parsers):
191191
os.unlink(fname)
192192

193193

194-
def test_invalid_file_inputs(all_parsers):
194+
def test_invalid_file_inputs(request, all_parsers):
195195
# GH#45957
196196
parser = all_parsers
197197
if parser.engine == "python":
198-
pytest.skip("Python engine supports lists.")
198+
request.node.add_marker(
199+
pytest.mark.xfail(reason=f"{parser.engine} engine supports lists.")
200+
)
199201

200202
with pytest.raises(ValueError, match="Invalid"):
201203
parser.read_csv([])

pandas/tests/io/test_sql.py

+9-4
Original file line numberDiff line numberDiff line change
@@ -1719,7 +1719,7 @@ def test_default_date_load(self):
17191719
# MySQL SHOULD be converted.
17201720
assert issubclass(df.DateCol.dtype.type, np.datetime64)
17211721

1722-
def test_datetime_with_timezone(self):
1722+
def test_datetime_with_timezone(self, request):
17231723
# edge case that converts postgresql datetime with time zone types
17241724
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
17251725
# but should be more natural, so coerce to datetime64[ns] for now
@@ -1760,7 +1760,9 @@ def check(col):
17601760
# GH11216
17611761
df = read_sql_query("select * from types", self.conn)
17621762
if not hasattr(df, "DateColWithTz"):
1763-
pytest.skip("no column with datetime with time zone")
1763+
request.node.add_marker(
1764+
pytest.mark.xfail(reason="no column with datetime with time zone")
1765+
)
17641766

17651767
# this is parsed on Travis (linux), but not on macosx for some reason
17661768
# even with the same versions of psycopg2 & sqlalchemy, possibly a
@@ -1772,7 +1774,9 @@ def check(col):
17721774
"select * from types", self.conn, parse_dates=["DateColWithTz"]
17731775
)
17741776
if not hasattr(df, "DateColWithTz"):
1775-
pytest.skip("no column with datetime with time zone")
1777+
request.node.add_marker(
1778+
pytest.mark.xfail(reason="no column with datetime with time zone")
1779+
)
17761780
col = df.DateColWithTz
17771781
assert is_datetime64tz_dtype(col.dtype)
17781782
assert str(col.dt.tz) == "UTC"
@@ -2275,8 +2279,9 @@ def test_get_engine_auto_error_message(self):
22752279

22762280

22772281
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
2282+
@pytest.mark.xfail(reason="Nested transactions rollbacks don't work with Pandas")
22782283
def test_transactions(self):
2279-
pytest.skip("Nested transactions rollbacks don't work with Pandas")
2284+
super().test_transactions()
22802285

22812286

22822287
class _TestSQLiteAlchemy:

pandas/tests/series/indexing/test_setitem.py

-8
Original file line numberDiff line numberDiff line change
@@ -759,21 +759,13 @@ def test_series_where(self, obj, key, expected, val, is_inplace):
759759
self._check_inplace(is_inplace, orig, arr, obj)
760760

761761
def test_index_where(self, obj, key, expected, val):
762-
if obj.dtype.kind == "c" or expected.dtype.kind == "c":
763-
# TODO(Index[complex]): Should become unreachable
764-
pytest.skip("test not applicable for this dtype")
765-
766762
mask = np.zeros(obj.shape, dtype=bool)
767763
mask[key] = True
768764

769765
res = Index(obj).where(~mask, val)
770766
tm.assert_index_equal(res, Index(expected, dtype=expected.dtype))
771767

772768
def test_index_putmask(self, obj, key, expected, val):
773-
if obj.dtype.kind == "c" or expected.dtype.kind == "c":
774-
# TODO(Index[complex]): Should become unreachable
775-
pytest.skip("test not applicable for this dtype")
776-
777769
mask = np.zeros(obj.shape, dtype=bool)
778770
mask[key] = True
779771

pandas/tests/series/test_arithmetic.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -823,11 +823,13 @@ def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul):
823823
tm.assert_series_equal(ser1, expected)
824824

825825

826-
def test_none_comparison(series_with_simple_index):
826+
def test_none_comparison(request, series_with_simple_index):
827827
series = series_with_simple_index
828828

829829
if len(series) < 1:
830-
pytest.skip("Test doesn't make sense on empty data")
830+
request.node.add_marker(
831+
pytest.mark.xfail(reason="Test doesn't make sense on empty data")
832+
)
831833

832834
# bug brought up by #1079
833835
# changed from TypeError in 0.17.0

pandas/tests/series/test_constructors.py

+1-28
Original file line numberDiff line numberDiff line change
@@ -801,31 +801,6 @@ def test_constructor_floating_data_int_dtype(self, frame_or_series):
801801
obj = frame_or_series(list(arr), dtype="i8")
802802
tm.assert_equal(obj, expected)
803803

804-
@td.skip_if_no("dask")
805-
def test_construct_dask_float_array_int_dtype_match_ndarray(self):
806-
# GH#40110 make sure we treat a float-dtype dask array with the same
807-
# rules we would for an ndarray
808-
import dask.dataframe as dd
809-
810-
arr = np.array([1, 2.5, 3])
811-
darr = dd.from_array(arr)
812-
813-
res = Series(darr)
814-
expected = Series(arr)
815-
tm.assert_series_equal(res, expected)
816-
817-
res = Series(darr, dtype="i8")
818-
expected = Series(arr, dtype="i8")
819-
tm.assert_series_equal(res, expected)
820-
821-
msg = "In a future version, passing float-dtype values containing NaN"
822-
arr[2] = np.nan
823-
with tm.assert_produces_warning(FutureWarning, match=msg):
824-
res = Series(darr, dtype="i8")
825-
with tm.assert_produces_warning(FutureWarning, match=msg):
826-
expected = Series(arr, dtype="i8")
827-
tm.assert_series_equal(res, expected)
828-
829804
def test_constructor_coerce_float_fail(self, any_int_numpy_dtype):
830805
# see gh-15832
831806
# Updated: make sure we treat this list the same as we would treat
@@ -1989,9 +1964,7 @@ def test_numpy_array(input_dict, expected):
19891964
tm.assert_numpy_array_equal(result, expected)
19901965

19911966

1992-
@pytest.mark.skipif(
1993-
not np_version_under1p19, reason="check failure on numpy below 1.19"
1994-
)
1967+
@pytest.mark.xfail(not np_version_under1p19, reason="check failure on numpy below 1.19")
19951968
def test_numpy_array_np_v1p19():
19961969
with pytest.raises(KeyError, match="0"):
19971970
np.array([Series({1: 1})])

pandas/tests/strings/test_extract.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -174,13 +174,13 @@ def test_extract_expand_capture_groups(any_string_dtype):
174174
tm.assert_frame_equal(result, expected)
175175

176176

177-
def test_extract_expand_capture_groups_index(index, any_string_dtype):
177+
def test_extract_expand_capture_groups_index(request, index, any_string_dtype):
178178
# https://github.com/pandas-dev/pandas/issues/6348
179179
# not passing index to the extractor
180180
data = ["A1", "B2", "C"]
181181

182182
if len(index) < len(data):
183-
pytest.skip("Index too short")
183+
request.node.add_marker(pytest.mark.xfail(reason="Index too short."))
184184

185185
index = index[: len(data)]
186186
s = Series(data, index=index, dtype=any_string_dtype)

pandas/tests/test_downstream.py

+32-3
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,10 @@
1111
import pandas.util._test_decorators as td
1212

1313
import pandas as pd
14-
from pandas import DataFrame
14+
from pandas import (
15+
DataFrame,
16+
Series,
17+
)
1518
import pandas._testing as tm
1619

1720
# geopandas, xarray, fsspec, fastparquet all produce these
@@ -70,7 +73,7 @@ def test_dask_ufunc():
7073
import dask.array as da
7174
import dask.dataframe as dd
7275

73-
s = pd.Series([1.5, 2.3, 3.7, 4.0])
76+
s = Series([1.5, 2.3, 3.7, 4.0])
7477
ds = dd.from_pandas(s, npartitions=2)
7578

7679
result = da.fix(ds).compute()
@@ -80,6 +83,32 @@ def test_dask_ufunc():
8083
pd.set_option("compute.use_numexpr", olduse)
8184

8285

86+
@td.skip_if_no("dask")
87+
def test_construct_dask_float_array_int_dtype_match_ndarray():
88+
# GH#40110 make sure we treat a float-dtype dask array with the same
89+
# rules we would for an ndarray
90+
import dask.dataframe as dd
91+
92+
arr = np.array([1, 2.5, 3])
93+
darr = dd.from_array(arr)
94+
95+
res = Series(darr)
96+
expected = Series(arr)
97+
tm.assert_series_equal(res, expected)
98+
99+
res = Series(darr, dtype="i8")
100+
expected = Series(arr, dtype="i8")
101+
tm.assert_series_equal(res, expected)
102+
103+
msg = "In a future version, passing float-dtype values containing NaN"
104+
arr[2] = np.nan
105+
with tm.assert_produces_warning(FutureWarning, match=msg):
106+
res = Series(darr, dtype="i8")
107+
with tm.assert_produces_warning(FutureWarning, match=msg):
108+
expected = Series(arr, dtype="i8")
109+
tm.assert_series_equal(res, expected)
110+
111+
83112
def test_xarray(df):
84113

85114
xarray = import_module("xarray") # noqa:F841
@@ -224,7 +253,7 @@ def test_torch_frame_construction(using_array_manager):
224253
if not using_array_manager:
225254
assert np.shares_memory(df, val_tensor)
226255

227-
ser = pd.Series(val_tensor[0])
256+
ser = Series(val_tensor[0])
228257
assert np.shares_memory(ser, val_tensor)
229258

230259

pandas/tests/tools/test_to_datetime.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ def test_to_datetime_format_microsecond(self, cache):
315315
def test_to_datetime_format_time(self, cache, value, format, dt):
316316
assert to_datetime(value, format=format, cache=cache) == dt
317317

318-
@td.skip_if_has_locale
318+
@td.skip_if_not_us_locale
319319
def test_to_datetime_with_non_exact(self, cache):
320320
# GH 10834
321321
# 8904
@@ -1738,7 +1738,7 @@ def test_to_datetime_with_space_in_series(self, cache):
17381738
result_ignore = to_datetime(ser, errors="ignore", cache=cache)
17391739
tm.assert_series_equal(result_ignore, ser)
17401740

1741-
@td.skip_if_has_locale
1741+
@td.skip_if_not_us_locale
17421742
def test_to_datetime_with_apply(self, cache):
17431743
# this is only locale tested with US/None locales
17441744
# GH 5195
@@ -1748,7 +1748,7 @@ def test_to_datetime_with_apply(self, cache):
17481748
result = td.apply(to_datetime, format="%b %y", cache=cache)
17491749
tm.assert_series_equal(result, expected)
17501750

1751-
@td.skip_if_has_locale
1751+
@td.skip_if_not_us_locale
17521752
def test_to_datetime_with_apply_with_empty_str(self, cache):
17531753
# this is only locale tested with US/None locales
17541754
# GH 5195

0 commit comments

Comments
 (0)