Skip to content

Commit bc0c1d3

Browse files
authored
CLN: TODOs/noqas/xfails (#44586)
1 parent d5b958a commit bc0c1d3

File tree

17 files changed

+64
-90
lines changed

17 files changed

+64
-90
lines changed

asv_bench/benchmarks/pandas_vb_common.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
try:
1818
import pandas._testing as tm
1919
except ImportError:
20-
import pandas.util.testing as tm # noqa
20+
import pandas.util.testing as tm # noqa:F401
2121

2222

2323
numeric_dtypes = [

pandas/core/tools/datetimes.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
nat_strings,
2929
parsing,
3030
)
31-
from pandas._libs.tslibs.parsing import ( # noqa
31+
from pandas._libs.tslibs.parsing import ( # noqa:F401
3232
DateParseError,
3333
format_is_iso,
3434
guess_datetime_format,

pandas/io/parquet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def __init__(self):
151151
import pyarrow.parquet
152152

153153
# import utils to register the pyarrow extension types
154-
import pandas.core.arrays._arrow_utils # noqa
154+
import pandas.core.arrays._arrow_utils # noqa:F401
155155

156156
self.api = pyarrow
157157

pandas/tests/arrays/categorical/test_constructors.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -236,14 +236,14 @@ def test_constructor(self):
236236
# - when the first is an integer dtype and the second is not
237237
# - when the resulting codes are all -1/NaN
238238
with tm.assert_produces_warning(None):
239-
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])
239+
Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])
240240

241241
with tm.assert_produces_warning(None):
242-
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) # noqa
242+
Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5])
243243

244244
# the next one are from the old docs
245245
with tm.assert_produces_warning(None):
246-
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
246+
Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
247247
cat = Categorical([1, 2], categories=[1, 2, 3])
248248

249249
# this is a legitimate constructor

pandas/tests/arrays/sparse/test_libsparse.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -460,11 +460,10 @@ def test_check_integrity(self):
460460
lengths = []
461461

462462
# 0-length OK
463-
# TODO: index variables are not used...is that right?
464-
index = BlockIndex(0, locs, lengths)
463+
BlockIndex(0, locs, lengths)
465464

466465
# also OK even though empty
467-
index = BlockIndex(1, locs, lengths) # noqa
466+
BlockIndex(1, locs, lengths)
468467

469468
msg = "Block 0 extends beyond end"
470469
with pytest.raises(ValueError, match=msg):

pandas/tests/extension/base/dim2.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import numpy as np
55
import pytest
66

7+
from pandas._libs.missing import is_matching_na
78
from pandas.compat import (
89
IS64,
910
is_platform_windows,
@@ -175,7 +176,7 @@ def test_reductions_2d_axis_none(self, data, method, request):
175176
assert type(err_result) == type(err_expected)
176177
return
177178

178-
assert result == expected # TODO: or matching NA
179+
assert is_matching_na(result, expected) or result == expected
179180

180181
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
181182
def test_reductions_2d_axis0(self, data, method, request):
@@ -254,8 +255,5 @@ def test_reductions_2d_axis1(self, data, method, request):
254255
# not necessarily type/dtype-preserving, so weaker assertions
255256
assert result.shape == (1,)
256257
expected_scalar = getattr(data, method)()
257-
if pd.isna(result[0]):
258-
# TODO: require matching NA
259-
assert pd.isna(expected_scalar), expected_scalar
260-
else:
261-
assert result[0] == expected_scalar
258+
res = result[0]
259+
assert is_matching_na(res, expected_scalar) or res == expected_scalar

pandas/tests/groupby/aggregate/test_aggregate.py

-2
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,6 @@ def test_agg_must_agg(df):
5757

5858

5959
def test_agg_ser_multi_key(df):
60-
# TODO(wesm): unused
61-
ser = df.C # noqa
6260

6361
f = lambda x: x.sum()
6462
results = df.C.groupby([df.A, df.B]).aggregate(f)

pandas/tests/indexing/test_coercion.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -780,7 +780,6 @@ def test_where_index_datetime(self, fill_val):
780780

781781
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
782782

783-
@pytest.mark.xfail(reason="GH 22839: do not ignore timezone, must be object")
784783
def test_where_index_datetime64tz(self):
785784
fill_val = pd.Timestamp("2012-01-01", tz="US/Eastern")
786785
exp_dtype = object
@@ -795,9 +794,9 @@ def test_where_index_datetime64tz(self):
795794
assert obj.dtype == "datetime64[ns]"
796795
cond = pd.Index([True, False, True, False])
797796

798-
msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind"
799-
with pytest.raises(TypeError, match=msg):
800-
obj.where(cond, fill_val)
797+
res = obj.where(cond, fill_val)
798+
expected = pd.Index([obj[0], fill_val, obj[2], fill_val], dtype=object)
799+
tm.assert_index_equal(res, expected)
801800

802801
values = pd.Index(pd.date_range(fill_val, periods=4))
803802
exp = pd.Index(

pandas/tests/io/pytables/test_append.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -896,9 +896,7 @@ def test_append_to_multiple_dropna(setup_path):
896896
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
897897

898898

899-
@pytest.mark.xfail(
900-
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
901-
)
899+
@pytest.mark.xfail(reason="append_to_multiple_dropna_false is not raising as failed")
902900
def test_append_to_multiple_dropna_false(setup_path):
903901
df1 = tm.makeTimeDataFrame()
904902
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)

pandas/tests/io/pytables/test_select.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def test_select_dtypes(setup_path):
265265
expected = df[df["A"] > 0]
266266

267267
store.append("df", df, data_columns=True)
268-
np_zero = np.float64(0) # noqa
268+
np_zero = np.float64(0) # noqa:F841
269269
result = store.select("df", where=["A>np_zero"])
270270
tm.assert_frame_equal(expected, result)
271271

@@ -683,17 +683,17 @@ def test_frame_select_complex2(setup_path):
683683
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
684684

685685
# scope with list like
686-
l = selection.index.tolist() # noqa
686+
l0 = selection.index.tolist() # noqa:F841
687687
store = HDFStore(hh)
688-
result = store.select("df", where="l1=l")
688+
result = store.select("df", where="l1=l0")
689689
tm.assert_frame_equal(result, expected)
690690
store.close()
691691

692-
result = read_hdf(hh, "df", where="l1=l")
692+
result = read_hdf(hh, "df", where="l1=l0")
693693
tm.assert_frame_equal(result, expected)
694694

695695
# index
696-
index = selection.index # noqa
696+
index = selection.index # noqa:F841
697697
result = read_hdf(hh, "df", where="l1=index")
698698
tm.assert_frame_equal(result, expected)
699699

@@ -928,7 +928,7 @@ def test_query_compare_column_type(setup_path):
928928
with ensure_clean_store(setup_path) as store:
929929
store.append("test", df, format="table", data_columns=True)
930930

931-
ts = Timestamp("2014-01-01") # noqa
931+
ts = Timestamp("2014-01-01") # noqa:F841
932932
result = store.select("test", where="real_date > ts")
933933
expected = df.loc[[1], :]
934934
tm.assert_frame_equal(expected, result)

pandas/tests/io/pytables/test_store.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -533,7 +533,9 @@ def test_same_name_scoping(setup_path):
533533
result = store.select("df", "index>datetime.datetime(2013,1,5)")
534534
tm.assert_frame_equal(result, expected)
535535

536-
from datetime import datetime # noqa
536+
# changes what 'datetime' points to in the namespace where
537+
# 'select' does the lookup
538+
from datetime import datetime # noqa:F401
537539

538540
# technically an error, but allow it
539541
result = store.select("df", "index>datetime.datetime(2013,1,5)")

pandas/tests/io/test_sql.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1251,7 +1251,7 @@ def test_database_uri_string(self, test_frame1):
12511251
# in sqlalchemy.create_engine -> test passing of this error to user
12521252
try:
12531253
# the rest of this test depends on pg8000's being absent
1254-
import pg8000 # noqa
1254+
import pg8000 # noqa:F401
12551255

12561256
pytest.skip("pg8000 is installed")
12571257
except ImportError:

pandas/tests/libs/test_join.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@ def test_cython_right_outer_join(self):
112112
exp_rs = exp_rs.take(exp_ri)
113113
exp_rs[exp_ri == -1] = -1
114114

115-
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
116-
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
115+
tm.assert_numpy_array_equal(ls, exp_ls)
116+
tm.assert_numpy_array_equal(rs, exp_rs)
117117

118118
def test_cython_inner_join(self):
119119
left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp)
@@ -134,8 +134,8 @@ def test_cython_inner_join(self):
134134
exp_rs = exp_rs.take(exp_ri)
135135
exp_rs[exp_ri == -1] = -1
136136

137-
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
138-
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
137+
tm.assert_numpy_array_equal(ls, exp_ls)
138+
tm.assert_numpy_array_equal(rs, exp_rs)
139139

140140

141141
@pytest.mark.parametrize("readonly", [True, False])

pandas/tests/plotting/test_boxplot_method.py

-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ def test_boxplot_return_type_none(self):
9393

9494
def test_boxplot_return_type_legacy(self):
9595
# API change in https://github.com/pandas-dev/pandas/pull/7096
96-
import matplotlib as mpl # noqa
9796

9897
df = DataFrame(
9998
np.random.randn(6, 4),

pandas/tests/series/test_arithmetic.py

+17-42
Original file line numberDiff line numberDiff line change
@@ -96,10 +96,10 @@ def _constructor(self):
9696

9797
def test_flex_add_scalar_fill_value(self):
9898
# GH12723
99-
s = Series([0, 1, np.nan, 3, 4, 5])
99+
ser = Series([0, 1, np.nan, 3, 4, 5])
100100

101-
exp = s.fillna(0).add(2)
102-
res = s.add(2, fill_value=0)
101+
exp = ser.fillna(0).add(2)
102+
res = ser.add(2, fill_value=0)
103103
tm.assert_series_equal(res, exp)
104104

105105
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
@@ -226,12 +226,12 @@ def test_add_na_handling(self):
226226
from datetime import date
227227
from decimal import Decimal
228228

229-
s = Series(
229+
ser = Series(
230230
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
231231
)
232232

233-
result = s + s.shift(1)
234-
result2 = s.shift(1) + s
233+
result = ser + ser.shift(1)
234+
result2 = ser.shift(1) + ser
235235
assert isna(result[0])
236236
assert isna(result2[0])
237237

@@ -399,15 +399,12 @@ def test_ser_flex_cmp_return_dtypes_empty(self, opname):
399399
expected = np.dtype("bool")
400400
assert result == expected
401401

402-
@pytest.mark.parametrize(
403-
"op",
404-
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
405-
)
406402
@pytest.mark.parametrize(
407403
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
408404
)
409-
def test_ser_cmp_result_names(self, names, op):
405+
def test_ser_cmp_result_names(self, names, comparison_op):
410406
# datetime64 dtype
407+
op = comparison_op
411408
dti = date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
412409
ser = Series(dti).rename(names[1])
413410
result = op(ser, dti)
@@ -583,9 +580,10 @@ def test_comparison_tuples(self):
583580
expected = Series([False, False])
584581
tm.assert_series_equal(result, expected)
585582

586-
s = Series([frozenset([1]), frozenset([1, 2])])
583+
def test_comparison_frozenset(self):
584+
ser = Series([frozenset([1]), frozenset([1, 2])])
587585

588-
result = s == frozenset([1])
586+
result = ser == frozenset([1])
589587
expected = Series([True, False])
590588
tm.assert_series_equal(result, expected)
591589

@@ -649,8 +647,8 @@ def test_comp_ops_df_compat(self, left, right, frame_or_series):
649647

650648
def test_compare_series_interval_keyword(self):
651649
# GH#25338
652-
s = Series(["IntervalA", "IntervalB", "IntervalC"])
653-
result = s == "IntervalA"
650+
ser = Series(["IntervalA", "IntervalB", "IntervalC"])
651+
result = ser == "IntervalA"
654652
expected = Series([True, False, False])
655653
tm.assert_series_equal(result, expected)
656654

@@ -662,19 +660,6 @@ def test_compare_series_interval_keyword(self):
662660

663661

664662
class TestTimeSeriesArithmetic:
665-
# TODO: De-duplicate with test below
666-
def test_series_add_tz_mismatch_converts_to_utc_duplicate(self):
667-
rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
668-
ser = Series(np.random.randn(len(rng)), index=rng)
669-
670-
ts_moscow = ser.tz_convert("Europe/Moscow")
671-
672-
result = ser + ts_moscow
673-
assert result.index.tz is pytz.utc
674-
675-
result = ts_moscow + ser
676-
assert result.index.tz is pytz.utc
677-
678663
def test_series_add_tz_mismatch_converts_to_utc(self):
679664
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
680665

@@ -697,16 +682,6 @@ def test_series_add_tz_mismatch_converts_to_utc(self):
697682
assert result.index.tz == pytz.UTC
698683
tm.assert_series_equal(result, expected)
699684

700-
# TODO: redundant with test_series_add_tz_mismatch_converts_to_utc?
701-
def test_series_arithmetic_mismatched_tzs_convert_to_utc(self):
702-
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
703-
idx1 = base.tz_convert("Asia/Tokyo")[:2]
704-
idx2 = base.tz_convert("US/Eastern")[1:]
705-
706-
res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
707-
expected = Series([np.nan, 3, np.nan], index=base)
708-
tm.assert_series_equal(res, expected)
709-
710685
def test_series_add_aware_naive_raises(self):
711686
rng = date_range("1/1/2011", periods=10, freq="H")
712687
ser = Series(np.random.randn(len(rng)), index=rng)
@@ -871,20 +846,20 @@ def test_none_comparison(series_with_simple_index):
871846
series.iloc[0] = np.nan
872847

873848
# noinspection PyComparisonWithNone
874-
result = series == None # noqa
849+
result = series == None # noqa:E711
875850
assert not result.iat[0]
876851
assert not result.iat[1]
877852

878853
# noinspection PyComparisonWithNone
879-
result = series != None # noqa
854+
result = series != None # noqa:E711
880855
assert result.iat[0]
881856
assert result.iat[1]
882857

883-
result = None == series # noqa
858+
result = None == series # noqa:E711
884859
assert not result.iat[0]
885860
assert not result.iat[1]
886861

887-
result = None != series # noqa
862+
result = None != series # noqa:E711
888863
assert result.iat[0]
889864
assert result.iat[1]
890865

pandas/tests/test_downstream.py

+10-6
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ def df():
3232
@pytest.mark.filterwarnings("ignore:.*64Index is deprecated:FutureWarning")
3333
def test_dask(df):
3434

35-
toolz = import_module("toolz") # noqa
36-
dask = import_module("dask") # noqa
35+
toolz = import_module("toolz") # noqa:F841
36+
dask = import_module("dask") # noqa:F841
3737

3838
import dask.dataframe as dd
3939

@@ -44,7 +44,7 @@ def test_dask(df):
4444

4545
def test_xarray(df):
4646

47-
xarray = import_module("xarray") # noqa
47+
xarray = import_module("xarray") # noqa:F841
4848

4949
assert df.to_xarray() is not None
5050

@@ -109,7 +109,7 @@ def test_statsmodels():
109109
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
110110
def test_scikit_learn(df):
111111

112-
sklearn = import_module("sklearn") # noqa
112+
sklearn = import_module("sklearn") # noqa:F841
113113
from sklearn import (
114114
datasets,
115115
svm,
@@ -133,10 +133,14 @@ def test_seaborn():
133133

134134
def test_pandas_gbq(df):
135135

136-
pandas_gbq = import_module("pandas_gbq") # noqa
136+
pandas_gbq = import_module("pandas_gbq") # noqa:F841
137137

138138

139-
@pytest.mark.xfail(reason="0.8.1 tries to import urlencode from pd.io.common")
139+
@pytest.mark.xfail(
140+
raises=ValueError,
141+
reason="The Quandl API key must be provided either through the api_key "
142+
"variable or through the environmental variable QUANDL_API_KEY",
143+
)
140144
@tm.network
141145
def test_pandas_datareader():
142146

0 commit comments

Comments
 (0)