Skip to content

TST: skip -> xfail #46387

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/tests/frame/methods/test_between_time.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@


class TestBetweenTime:
@td.skip_if_has_locale
@td.skip_if_not_us_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
Expand Down
9 changes: 6 additions & 3 deletions pandas/tests/groupby/test_quantile.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,13 @@
],
)
@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
def test_quantile(interpolation, a_vals, b_vals, q):
def test_quantile(interpolation, a_vals, b_vals, q, request):
if interpolation == "nearest" and q == 0.5 and b_vals == [4, 3, 2, 1]:
pytest.skip(
"Unclear numpy expectation for nearest result with equidistant data"
request.node.add_marker(
pytest.mark.xfail(
reason="Unclear numpy expectation for nearest "
"result with equidistant data"
)
)

a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/io/formats/test_series_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def test_info_shows_dtypes():
assert name in res


@pytest.mark.skipif(PYPY, reason="on PyPy deep=True doesn't change result")
@pytest.mark.xfail(PYPY, reason="on PyPy deep=True doesn't change result")
def test_info_memory_usage_deep_not_pypy():
s_with_object_index = Series({"a": [1]}, index=["foo"])
assert s_with_object_index.memory_usage(
Expand All @@ -127,7 +127,7 @@ def test_info_memory_usage_deep_not_pypy():
assert s_object.memory_usage(deep=True) > s_object.memory_usage()


@pytest.mark.skipif(not PYPY, reason="on PyPy deep=True does not change result")
@pytest.mark.xfail(not PYPY, reason="on PyPy deep=True does not change result")
def test_info_memory_usage_deep_pypy():
s_with_object_index = Series({"a": [1]}, index=["foo"])
assert s_with_object_index.memory_usage(
Expand Down
6 changes: 4 additions & 2 deletions pandas/tests/io/json/test_ujson.py
Original file line number Diff line number Diff line change
Expand Up @@ -945,9 +945,11 @@ def test_array_numpy_labelled(self):


class TestPandasJSONTests:
def test_dataframe(self, orient, numpy):
def test_dataframe(self, request, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
request.node.add_marker(
pytest.mark.xfail(reason=f"Not idiomatic pandas if orient={orient}")
)

dtype = get_int32_compat_dtype(numpy, orient)

Expand Down
9 changes: 7 additions & 2 deletions pandas/tests/io/parser/common/test_read_errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,11 +241,16 @@ def test_null_byte_char(all_parsers):


@td.check_file_leaks
def test_open_file(all_parsers):
def test_open_file(request, all_parsers):
# GH 39024
parser = all_parsers
if parser.engine == "c":
pytest.skip("'c' engine does not support sep=None with delim_whitespace=False")
request.node.add_marker(
pytest.mark.xfail(
reason=f"{parser.engine} engine does not support sep=None "
f"with delim_whitespace=False"
)
)

with tm.ensure_clean() as path:
file = Path(path)
Expand Down
24 changes: 16 additions & 8 deletions pandas/tests/io/parser/dtypes/test_dtypes_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,31 +191,39 @@ def test_delimiter_with_usecols_and_parse_dates(all_parsers):


@pytest.mark.parametrize("thousands", ["_", None])
def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):
def test_decimal_and_exponential(
request, python_parser_only, numeric_decimal, thousands
):
# GH#31920
decimal_number_check(python_parser_only, numeric_decimal, thousands, None)
decimal_number_check(request, python_parser_only, numeric_decimal, thousands, None)


@pytest.mark.parametrize("thousands", ["_", None])
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
def test_1000_sep_decimal_float_precision(
c_parser_only, numeric_decimal, float_precision, thousands
request, c_parser_only, numeric_decimal, float_precision, thousands
):
# test decimal and thousand sep handling in across 'float_precision'
# parsers
decimal_number_check(c_parser_only, numeric_decimal, thousands, float_precision)
decimal_number_check(
request, c_parser_only, numeric_decimal, thousands, float_precision
)
text, value = numeric_decimal
text = " " + text + " "
if isinstance(value, str): # the negative cases (parse as text)
value = " " + value + " "
decimal_number_check(c_parser_only, (text, value), thousands, float_precision)
decimal_number_check(
request, c_parser_only, (text, value), thousands, float_precision
)


def decimal_number_check(parser, numeric_decimal, thousands, float_precision):
def decimal_number_check(request, parser, numeric_decimal, thousands, float_precision):
# GH#31920
value = numeric_decimal[0]
if thousands is None and "_" in value:
pytest.skip("Skip test if no thousands sep is defined and sep is in value")
if thousands is None and value in ("1_,", "1_234,56", "1_234,56e0"):
request.node.add_marker(
pytest.mark.xfail(reason=f"thousands={thousands} and sep is in {value}")
)
df = parser.read_csv(
StringIO(value),
float_precision=float_precision,
Expand Down
8 changes: 6 additions & 2 deletions pandas/tests/io/parser/test_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,15 +91,19 @@ def test_zip_error_invalid_zip(parser_and_data):

@skip_pyarrow
@pytest.mark.parametrize("filename", [None, "test.{ext}"])
def test_compression(parser_and_data, compression_only, buffer, filename):
def test_compression(request, parser_and_data, compression_only, buffer, filename):
parser, data, expected = parser_and_data
compress_type = compression_only

ext = icom._compression_to_extension[compress_type]
filename = filename if filename is None else filename.format(ext=ext)

if filename and buffer:
pytest.skip("Cannot deduce compression from buffer of compressed data.")
request.node.add_marker(
pytest.mark.xfail(
reason="Cannot deduce compression from buffer of compressed data."
)
)

with tm.ensure_clean(filename=filename) as path:
tm.write_to_compressed(compress_type, path, data)
Expand Down
6 changes: 4 additions & 2 deletions pandas/tests/io/parser/test_unsupported.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,11 +191,13 @@ def test_close_file_handle_on_invalid_usecols(all_parsers):
os.unlink(fname)


def test_invalid_file_inputs(all_parsers):
def test_invalid_file_inputs(request, all_parsers):
# GH#45957
parser = all_parsers
if parser.engine == "python":
pytest.skip("Python engine supports lists.")
request.node.add_marker(
pytest.mark.xfail(reason=f"{parser.engine} engine supports lists.")
)

with pytest.raises(ValueError, match="Invalid"):
parser.read_csv([])
13 changes: 9 additions & 4 deletions pandas/tests/io/test_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -1719,7 +1719,7 @@ def test_default_date_load(self):
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)

def test_datetime_with_timezone(self):
def test_datetime_with_timezone(self, request):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
Expand Down Expand Up @@ -1760,7 +1760,9 @@ def check(col):
# GH11216
df = read_sql_query("select * from types", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
request.node.add_marker(
pytest.mark.xfail(reason="no column with datetime with time zone")
)

# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
Expand All @@ -1772,7 +1774,9 @@ def check(col):
"select * from types", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
request.node.add_marker(
pytest.mark.xfail(reason="no column with datetime with time zone")
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
Expand Down Expand Up @@ -2275,8 +2279,9 @@ def test_get_engine_auto_error_message(self):


class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
@pytest.mark.xfail(reason="Nested transactions rollbacks don't work with Pandas")
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
super().test_transactions()


class _TestSQLiteAlchemy:
Expand Down
8 changes: 0 additions & 8 deletions pandas/tests/series/indexing/test_setitem.py
Original file line number Diff line number Diff line change
Expand Up @@ -759,21 +759,13 @@ def test_series_where(self, obj, key, expected, val, is_inplace):
self._check_inplace(is_inplace, orig, arr, obj)

def test_index_where(self, obj, key, expected, val):
if obj.dtype.kind == "c" or expected.dtype.kind == "c":
# TODO(Index[complex]): Should become unreachable
pytest.skip("test not applicable for this dtype")

mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True

res = Index(obj).where(~mask, val)
tm.assert_index_equal(res, Index(expected, dtype=expected.dtype))

def test_index_putmask(self, obj, key, expected, val):
if obj.dtype.kind == "c" or expected.dtype.kind == "c":
# TODO(Index[complex]): Should become unreachable
pytest.skip("test not applicable for this dtype")

mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True

Expand Down
6 changes: 4 additions & 2 deletions pandas/tests/series/test_arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -823,11 +823,13 @@ def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul):
tm.assert_series_equal(ser1, expected)


def test_none_comparison(series_with_simple_index):
def test_none_comparison(request, series_with_simple_index):
series = series_with_simple_index

if len(series) < 1:
pytest.skip("Test doesn't make sense on empty data")
request.node.add_marker(
pytest.mark.xfail(reason="Test doesn't make sense on empty data")
)

# bug brought up by #1079
# changed from TypeError in 0.17.0
Expand Down
29 changes: 1 addition & 28 deletions pandas/tests/series/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -801,31 +801,6 @@ def test_constructor_floating_data_int_dtype(self, frame_or_series):
obj = frame_or_series(list(arr), dtype="i8")
tm.assert_equal(obj, expected)

@td.skip_if_no("dask")
def test_construct_dask_float_array_int_dtype_match_ndarray(self):
# GH#40110 make sure we treat a float-dtype dask array with the same
# rules we would for an ndarray
import dask.dataframe as dd

arr = np.array([1, 2.5, 3])
darr = dd.from_array(arr)

res = Series(darr)
expected = Series(arr)
tm.assert_series_equal(res, expected)

res = Series(darr, dtype="i8")
expected = Series(arr, dtype="i8")
tm.assert_series_equal(res, expected)

msg = "In a future version, passing float-dtype values containing NaN"
arr[2] = np.nan
with tm.assert_produces_warning(FutureWarning, match=msg):
res = Series(darr, dtype="i8")
with tm.assert_produces_warning(FutureWarning, match=msg):
expected = Series(arr, dtype="i8")
tm.assert_series_equal(res, expected)

def test_constructor_coerce_float_fail(self, any_int_numpy_dtype):
# see gh-15832
# Updated: make sure we treat this list the same as we would treat
Expand Down Expand Up @@ -1989,9 +1964,7 @@ def test_numpy_array(input_dict, expected):
tm.assert_numpy_array_equal(result, expected)


@pytest.mark.skipif(
not np_version_under1p19, reason="check failure on numpy below 1.19"
)
@pytest.mark.xfail(not np_version_under1p19, reason="check failure on numpy below 1.19")
def test_numpy_array_np_v1p19():
with pytest.raises(KeyError, match="0"):
np.array([Series({1: 1})])
4 changes: 2 additions & 2 deletions pandas/tests/strings/test_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,13 @@ def test_extract_expand_capture_groups(any_string_dtype):
tm.assert_frame_equal(result, expected)


def test_extract_expand_capture_groups_index(index, any_string_dtype):
def test_extract_expand_capture_groups_index(request, index, any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/6348
# not passing index to the extractor
data = ["A1", "B2", "C"]

if len(index) < len(data):
pytest.skip("Index too short")
request.node.add_marker(pytest.mark.xfail(reason="Index too short."))

index = index[: len(data)]
s = Series(data, index=index, dtype=any_string_dtype)
Expand Down
35 changes: 32 additions & 3 deletions pandas/tests/test_downstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@
import pandas.util._test_decorators as td

import pandas as pd
from pandas import DataFrame
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm

# geopandas, xarray, fsspec, fastparquet all produce these
Expand Down Expand Up @@ -70,7 +73,7 @@ def test_dask_ufunc():
import dask.array as da
import dask.dataframe as dd

s = pd.Series([1.5, 2.3, 3.7, 4.0])
s = Series([1.5, 2.3, 3.7, 4.0])
ds = dd.from_pandas(s, npartitions=2)

result = da.fix(ds).compute()
Expand All @@ -80,6 +83,32 @@ def test_dask_ufunc():
pd.set_option("compute.use_numexpr", olduse)


@td.skip_if_no("dask")
def test_construct_dask_float_array_int_dtype_match_ndarray():
# GH#40110 make sure we treat a float-dtype dask array with the same
# rules we would for an ndarray
import dask.dataframe as dd

arr = np.array([1, 2.5, 3])
darr = dd.from_array(arr)

res = Series(darr)
expected = Series(arr)
tm.assert_series_equal(res, expected)

res = Series(darr, dtype="i8")
expected = Series(arr, dtype="i8")
tm.assert_series_equal(res, expected)

msg = "In a future version, passing float-dtype values containing NaN"
arr[2] = np.nan
with tm.assert_produces_warning(FutureWarning, match=msg):
res = Series(darr, dtype="i8")
with tm.assert_produces_warning(FutureWarning, match=msg):
expected = Series(arr, dtype="i8")
tm.assert_series_equal(res, expected)


def test_xarray(df):

xarray = import_module("xarray") # noqa:F841
Expand Down Expand Up @@ -224,7 +253,7 @@ def test_torch_frame_construction(using_array_manager):
if not using_array_manager:
assert np.shares_memory(df, val_tensor)

ser = pd.Series(val_tensor[0])
ser = Series(val_tensor[0])
assert np.shares_memory(ser, val_tensor)


Expand Down
6 changes: 3 additions & 3 deletions pandas/tests/tools/test_to_datetime.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ def test_to_datetime_format_microsecond(self, cache):
def test_to_datetime_format_time(self, cache, value, format, dt):
assert to_datetime(value, format=format, cache=cache) == dt

@td.skip_if_has_locale
@td.skip_if_not_us_locale
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
Expand Down Expand Up @@ -1738,7 +1738,7 @@ def test_to_datetime_with_space_in_series(self, cache):
result_ignore = to_datetime(ser, errors="ignore", cache=cache)
tm.assert_series_equal(result_ignore, ser)

@td.skip_if_has_locale
@td.skip_if_not_us_locale
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
Expand All @@ -1748,7 +1748,7 @@ def test_to_datetime_with_apply(self, cache):
result = td.apply(to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)

@td.skip_if_has_locale
@td.skip_if_not_us_locale
def test_to_datetime_with_apply_with_empty_str(self, cache):
# this is only locale tested with US/None locales
# GH 5195
Expand Down
Loading