Skip to content

Commit 7e90686

Browse files
Sarthak Vineet Kumarjbrockmendel
Sarthak Vineet Kumar
authored andcommitted
CLN removing trailing commas (pandas-dev#36101)
1 parent 7b2d437 commit 7e90686

File tree

7 files changed

+13
-18
lines changed

7 files changed

+13
-18
lines changed

pandas/tests/io/test_sql.py

-3
Original file line numberDiff line numberDiff line change
@@ -2349,9 +2349,6 @@ def date_format(dt):
23492349

23502350

23512351
def format_query(sql, *args):
2352-
"""
2353-
2354-
"""
23552352
processed_args = []
23562353
for arg in args:
23572354
if isinstance(arg, float) and isna(arg):

pandas/tests/io/test_stata.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1153,7 +1153,7 @@ def test_read_chunks_117(
11531153
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
11541154
from_frame = self._convert_categorical(from_frame)
11551155
tm.assert_frame_equal(
1156-
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True,
1156+
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
11571157
)
11581158

11591159
pos += chunksize
@@ -1251,7 +1251,7 @@ def test_read_chunks_115(
12511251
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
12521252
from_frame = self._convert_categorical(from_frame)
12531253
tm.assert_frame_equal(
1254-
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True,
1254+
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
12551255
)
12561256

12571257
pos += chunksize

pandas/tests/plotting/test_frame.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1321,7 +1321,7 @@ def test_scatter_with_c_column_name_with_colors(self, cmap):
13211321

13221322
def test_plot_scatter_with_s(self):
13231323
# this refers to GH 32904
1324-
df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"],)
1324+
df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"])
13251325

13261326
ax = df.plot.scatter(x="a", y="b", s="c")
13271327
tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes())
@@ -1716,7 +1716,7 @@ def test_hist_df(self):
17161716
def test_hist_weights(self, weights):
17171717
# GH 33173
17181718
np.random.seed(0)
1719-
df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100,))))
1719+
df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100))))
17201720

17211721
ax1 = _check_plot_works(df.plot, kind="hist", weights=weights)
17221722
ax2 = _check_plot_works(df.plot, kind="hist")

pandas/tests/resample/test_datetime_index.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def test_resample_integerarray():
124124

125125
result = ts.resample("3T").mean()
126126
expected = Series(
127-
[1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64",
127+
[1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64"
128128
)
129129
tm.assert_series_equal(result, expected)
130130

@@ -764,7 +764,7 @@ def test_resample_origin():
764764

765765

766766
@pytest.mark.parametrize(
767-
"origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()],
767+
"origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()]
768768
)
769769
def test_resample_bad_origin(origin):
770770
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
@@ -777,9 +777,7 @@ def test_resample_bad_origin(origin):
777777
ts.resample("5min", origin=origin)
778778

779779

780-
@pytest.mark.parametrize(
781-
"offset", ["invalid_value", "12dayys", "2000-30-30", object()],
782-
)
780+
@pytest.mark.parametrize("offset", ["invalid_value", "12dayys", "2000-30-30", object()])
783781
def test_resample_bad_offset(offset):
784782
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
785783
ts = Series(np.random.randn(len(rng)), index=rng)
@@ -1595,7 +1593,7 @@ def test_downsample_dst_at_midnight():
15951593
"America/Havana", ambiguous=True
15961594
)
15971595
dti = pd.DatetimeIndex(dti, freq="D")
1598-
expected = DataFrame([7.5, 28.0, 44.5], index=dti,)
1596+
expected = DataFrame([7.5, 28.0, 44.5], index=dti)
15991597
tm.assert_frame_equal(result, expected)
16001598

16011599

pandas/tests/reshape/merge/test_merge_index_as_string.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def df2():
2929

3030
@pytest.fixture(params=[[], ["outer"], ["outer", "inner"]])
3131
def left_df(request, df1):
32-
""" Construct left test DataFrame with specified levels
32+
"""Construct left test DataFrame with specified levels
3333
(any of 'outer', 'inner', and 'v1')
3434
"""
3535
levels = request.param
@@ -41,7 +41,7 @@ def left_df(request, df1):
4141

4242
@pytest.fixture(params=[[], ["outer"], ["outer", "inner"]])
4343
def right_df(request, df2):
44-
""" Construct right test DataFrame with specified levels
44+
"""Construct right test DataFrame with specified levels
4545
(any of 'outer', 'inner', and 'v2')
4646
"""
4747
levels = request.param

pandas/tests/reshape/test_crosstab.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,7 @@ def test_crosstab_normalize(self):
354354
crosstab(df.a, df.b, normalize="columns"),
355355
)
356356
tm.assert_frame_equal(
357-
crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index"),
357+
crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index")
358358
)
359359

360360
row_normal_margins = DataFrame(
@@ -377,7 +377,7 @@ def test_crosstab_normalize(self):
377377
crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins
378378
)
379379
tm.assert_frame_equal(
380-
crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins,
380+
crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins
381381
)
382382
tm.assert_frame_equal(
383383
crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins

pandas/tests/reshape/test_get_dummies.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def test_get_dummies_unicode(self, sparse):
161161
s = [e, eacute, eacute]
162162
res = get_dummies(s, prefix="letter", sparse=sparse)
163163
exp = DataFrame(
164-
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8,
164+
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
165165
)
166166
if sparse:
167167
exp = exp.apply(SparseArray, fill_value=0)

0 commit comments

Comments
 (0)