Skip to content

Commit 0bde569

Browse files
jbrockmendeljreback
authored andcommitted
CLN: Prune unnecessary internals (#27685)
1 parent 52362bf commit 0bde569

File tree

14 files changed

+48
-66
lines changed

14 files changed

+48
-66
lines changed

pandas/core/groupby/generic.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -671,7 +671,7 @@ def _transform_item_by_item(self, obj, wrapper):
671671
except Exception:
672672
pass
673673

674-
if len(output) == 0: # pragma: no cover
674+
if len(output) == 0:
675675
raise TypeError("Transform function invalid for data types")
676676

677677
columns = obj.columns

pandas/core/groupby/groupby.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1206,7 +1206,7 @@ def mean(self, *args, **kwargs):
12061206
)
12071207
except GroupByError:
12081208
raise
1209-
except Exception: # pragma: no cover
1209+
except Exception:
12101210
with _group_selection_context(self):
12111211
f = lambda x: x.mean(axis=self.axis, **kwargs)
12121212
return self._python_agg_general(f)
@@ -1232,7 +1232,7 @@ def median(self, **kwargs):
12321232
)
12331233
except GroupByError:
12341234
raise
1235-
except Exception: # pragma: no cover
1235+
except Exception:
12361236

12371237
def f(x):
12381238
if isinstance(x, np.ndarray):
@@ -2470,7 +2470,7 @@ def groupby(obj, by, **kwds):
24702470
from pandas.core.groupby.generic import DataFrameGroupBy
24712471

24722472
klass = DataFrameGroupBy
2473-
else: # pragma: no cover
2473+
else:
24742474
raise TypeError("invalid type: {}".format(obj))
24752475

24762476
return klass(obj, by, **kwds)

pandas/core/internals/blocks.py

+3-5
Original file line numberDiff line numberDiff line change
@@ -760,7 +760,7 @@ def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
760760
values[mask] = na_rep
761761
return values
762762

763-
# block actions ####
763+
# block actions #
764764
def copy(self, deep=True):
765765
""" copy constructor """
766766
values = self.values
@@ -1538,16 +1538,14 @@ def quantile(self, qs, interpolation="linear", axis=0):
15381538
).reshape(len(values), len(qs))
15391539
else:
15401540
# asarray needed for Sparse, see GH#24600
1541-
# Note: we use self.values below instead of values because the
1542-
# `asi8` conversion above will behave differently under `isna`
1543-
mask = np.asarray(isna(self.values))
1541+
mask = np.asarray(isna(values))
15441542
result = nanpercentile(
15451543
values,
15461544
np.array(qs) * 100,
15471545
axis=axis,
15481546
na_value=self.fill_value,
15491547
mask=mask,
1550-
ndim=self.ndim,
1548+
ndim=values.ndim,
15511549
interpolation=interpolation,
15521550
)
15531551

pandas/core/internals/managers.py

-2
Original file line numberDiff line numberDiff line change
@@ -975,8 +975,6 @@ def iget(self, i):
975975
"""
976976
block = self.blocks[self._blknos[i]]
977977
values = block.iget(self._blklocs[i])
978-
if values.ndim != 1:
979-
return values
980978

981979
# shortcut for select a single-dim from a 2-dim BM
982980
return SingleBlockManager(

pandas/io/clipboards.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
121121
return
122122
except TypeError:
123123
warnings.warn(
124-
"to_clipboard in excel mode requires a single " "character separator."
124+
"to_clipboard in excel mode requires a single character separator."
125125
)
126126
elif sep is not None:
127127
warnings.warn("to_clipboard with excel=False ignores the sep argument")

pandas/io/excel/_base.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def read_excel(
297297
for arg in ("sheet", "sheetname", "parse_cols"):
298298
if arg in kwds:
299299
raise TypeError(
300-
"read_excel() got an unexpected keyword argument " "`{}`".format(arg)
300+
"read_excel() got an unexpected keyword argument `{}`".format(arg)
301301
)
302302

303303
if not isinstance(io, ExcelFile):
@@ -353,7 +353,7 @@ def __init__(self, filepath_or_buffer):
353353
self.book = self.load_workbook(filepath_or_buffer)
354354
else:
355355
raise ValueError(
356-
"Must explicitly set engine if not passing in" " buffer or path for io."
356+
"Must explicitly set engine if not passing in buffer or path for io."
357357
)
358358

359359
@property
@@ -713,9 +713,7 @@ def _get_sheet_name(self, sheet_name):
713713
if sheet_name is None:
714714
sheet_name = self.cur_sheet
715715
if sheet_name is None: # pragma: no cover
716-
raise ValueError(
717-
"Must pass explicit sheet_name or set " "cur_sheet property"
718-
)
716+
raise ValueError("Must pass explicit sheet_name or set cur_sheet property")
719717
return sheet_name
720718

721719
def _value_with_fmt(self, val):
@@ -851,7 +849,7 @@ def parse(
851849
"""
852850
if "chunksize" in kwds:
853851
raise NotImplementedError(
854-
"chunksize keyword of read_excel " "is not implemented"
852+
"chunksize keyword of read_excel is not implemented"
855853
)
856854

857855
return self._reader.parse(

pandas/io/feather_format.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def to_feather(df, path):
5353

5454
if df.index.name is not None:
5555
raise ValueError(
56-
"feather does not serialize index meta-data on a " "default index"
56+
"feather does not serialize index meta-data on a default index"
5757
)
5858

5959
# validate columns

pandas/io/formats/csvs.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -96,9 +96,7 @@ def __init__(
9696
# validate mi options
9797
if self.has_mi_columns:
9898
if cols is not None:
99-
raise TypeError(
100-
"cannot specify cols with a MultiIndex on the " "columns"
101-
)
99+
raise TypeError("cannot specify cols with a MultiIndex on the columns")
102100

103101
if cols is not None:
104102
if isinstance(cols, ABCIndexClass):
@@ -158,7 +156,7 @@ def save(self):
158156
"""
159157
# GH21227 internal compression is not used when file-like passed.
160158
if self.compression and hasattr(self.path_or_buf, "write"):
161-
msg = "compression has no effect when passing file-like " "object as input."
159+
msg = "compression has no effect when passing file-like object as input."
162160
warnings.warn(msg, RuntimeWarning, stacklevel=2)
163161

164162
# when zip compression is called.

pandas/io/formats/format.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,10 @@
22
Internal module for formatting output data in csv, html,
33
and latex files. This module also applies to display formatting.
44
"""
5-
5+
import decimal
66
from functools import partial
77
from io import StringIO
8+
import math
89
import re
910
from shutil import get_terminal_size
1011
from typing import (
@@ -862,7 +863,7 @@ def to_latex(
862863
with codecs.open(self.buf, "w", encoding=encoding) as f:
863864
latex_renderer.write_result(f)
864865
else:
865-
raise TypeError("buf is not a file name and it has no write " "method")
866+
raise TypeError("buf is not a file name and it has no write method")
866867

867868
def _format_col(self, i: int) -> List[str]:
868869
frame = self.tr_frame
@@ -907,7 +908,7 @@ def to_html(
907908
with open(self.buf, "w") as f:
908909
buffer_put_lines(f, html)
909910
else:
910-
raise TypeError("buf is not a file name and it has no write " " method")
911+
raise TypeError("buf is not a file name and it has no write method")
911912

912913
def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]:
913914
from pandas.core.index import _sparsify
@@ -1782,9 +1783,6 @@ def __call__(self, num: Union[int, float]) -> str:
17821783
17831784
@return: engineering formatted string
17841785
"""
1785-
import decimal
1786-
import math
1787-
17881786
dnum = decimal.Decimal(str(num))
17891787

17901788
if decimal.Decimal.is_nan(dnum):

pandas/io/parsers.py

+11-13
Original file line numberDiff line numberDiff line change
@@ -687,7 +687,7 @@ def parser_f(
687687
read_csv = Appender(
688688
_doc_read_csv_and_table.format(
689689
func_name="read_csv",
690-
summary=("Read a comma-separated values (csv) file " "into DataFrame."),
690+
summary=("Read a comma-separated values (csv) file into DataFrame."),
691691
_default_sep="','",
692692
)
693693
)(read_csv)
@@ -770,7 +770,7 @@ def read_fwf(
770770
if colspecs is None and widths is None:
771771
raise ValueError("Must specify either colspecs or widths")
772772
elif colspecs not in (None, "infer") and widths is not None:
773-
raise ValueError("You must specify only one of 'widths' and " "'colspecs'")
773+
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
774774

775775
# Compute 'colspecs' from 'widths', if specified.
776776
if widths is not None:
@@ -901,9 +901,7 @@ def _get_options_with_defaults(self, engine):
901901

902902
# see gh-12935
903903
if argname == "mangle_dupe_cols" and not value:
904-
raise ValueError(
905-
"Setting mangle_dupe_cols=False is " "not supported yet"
906-
)
904+
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
907905
else:
908906
options[argname] = value
909907

@@ -942,7 +940,7 @@ def _check_file_or_buffer(self, f, engine):
942940
# needs to have that attribute ("next" for Python 2.x, "__next__"
943941
# for Python 3.x)
944942
if engine != "c" and not hasattr(f, next_attr):
945-
msg = "The 'python' engine cannot iterate " "through this file buffer."
943+
msg = "The 'python' engine cannot iterate through this file buffer."
946944
raise ValueError(msg)
947945

948946
return engine
@@ -959,7 +957,7 @@ def _clean_options(self, options, engine):
959957
# C engine not supported yet
960958
if engine == "c":
961959
if options["skipfooter"] > 0:
962-
fallback_reason = "the 'c' engine does not support" " skipfooter"
960+
fallback_reason = "the 'c' engine does not support skipfooter"
963961
engine = "python"
964962

965963
encoding = sys.getfilesystemencoding() or "utf-8"
@@ -1397,11 +1395,11 @@ def __init__(self, kwds):
13971395
raise ValueError("header must be integer or list of integers")
13981396
if kwds.get("usecols"):
13991397
raise ValueError(
1400-
"cannot specify usecols when " "specifying a multi-index header"
1398+
"cannot specify usecols when specifying a multi-index header"
14011399
)
14021400
if kwds.get("names"):
14031401
raise ValueError(
1404-
"cannot specify names when " "specifying a multi-index header"
1402+
"cannot specify names when specifying a multi-index header"
14051403
)
14061404

14071405
# validate index_col that only contains integers
@@ -1611,7 +1609,7 @@ def _get_name(icol):
16111609

16121610
if col_names is None:
16131611
raise ValueError(
1614-
("Must supply column order to use {icol!s} " "as index").format(
1612+
("Must supply column order to use {icol!s} as index").format(
16151613
icol=icol
16161614
)
16171615
)
@@ -2379,7 +2377,7 @@ def _make_reader(self, f):
23792377
if sep is None or len(sep) == 1:
23802378
if self.lineterminator:
23812379
raise ValueError(
2382-
"Custom line terminators not supported in " "python parser (yet)"
2380+
"Custom line terminators not supported in python parser (yet)"
23832381
)
23842382

23852383
class MyDialect(csv.Dialect):
@@ -2662,7 +2660,7 @@ def _infer_columns(self):
26622660
"number of header fields in the file"
26632661
)
26642662
if len(columns) > 1:
2665-
raise TypeError("Cannot pass names with multi-index " "columns")
2663+
raise TypeError("Cannot pass names with multi-index columns")
26662664

26672665
if self.usecols is not None:
26682666
# Set _use_cols. We don't store columns because they are
@@ -2727,7 +2725,7 @@ def _handle_usecols(self, columns, usecols_key):
27272725
elif any(isinstance(u, str) for u in self.usecols):
27282726
if len(columns) > 1:
27292727
raise ValueError(
2730-
"If using multiple headers, usecols must " "be integers."
2728+
"If using multiple headers, usecols must be integers."
27312729
)
27322730
col_indices = []
27332731

pandas/io/pytables.py

+9-11
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ def read_hdf(path_or_buf, key=None, mode="r", **kwargs):
366366
path_or_buf = _stringify_path(path_or_buf)
367367
if not isinstance(path_or_buf, str):
368368
raise NotImplementedError(
369-
"Support for generic buffers has not " "been implemented."
369+
"Support for generic buffers has not been implemented."
370370
)
371371
try:
372372
exists = os.path.exists(path_or_buf)
@@ -1047,7 +1047,7 @@ def append(
10471047
"""
10481048
if columns is not None:
10491049
raise TypeError(
1050-
"columns is not a supported keyword in append, " "try data_columns"
1050+
"columns is not a supported keyword in append, try data_columns"
10511051
)
10521052

10531053
if dropna is None:
@@ -2161,7 +2161,7 @@ def set_atom(
21612161
# which is an error
21622162

21632163
raise TypeError(
2164-
"too many timezones in this block, create separate " "data columns"
2164+
"too many timezones in this block, create separate data columns"
21652165
)
21662166
elif inferred_type == "unicode":
21672167
raise TypeError("[unicode] is not implemented as a table column")
@@ -2338,9 +2338,7 @@ def validate_attr(self, append):
23382338
if append:
23392339
existing_fields = getattr(self.attrs, self.kind_attr, None)
23402340
if existing_fields is not None and existing_fields != list(self.values):
2341-
raise ValueError(
2342-
"appended items do not match existing items" " in table!"
2343-
)
2341+
raise ValueError("appended items do not match existing items in table!")
23442342

23452343
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
23462344
if existing_dtype is not None and existing_dtype != self.dtype:
@@ -2834,7 +2832,7 @@ def write_multi_index(self, key, index):
28342832
# write the level
28352833
if is_extension_type(lev):
28362834
raise NotImplementedError(
2837-
"Saving a MultiIndex with an " "extension dtype is not supported."
2835+
"Saving a MultiIndex with an extension dtype is not supported."
28382836
)
28392837
level_key = "{key}_level{idx}".format(key=key, idx=i)
28402838
conv_level = _convert_index(
@@ -3079,7 +3077,7 @@ def validate_read(self, kwargs):
30793077
kwargs = super().validate_read(kwargs)
30803078
if "start" in kwargs or "stop" in kwargs:
30813079
raise NotImplementedError(
3082-
"start and/or stop are not supported " "in fixed Sparse reading"
3080+
"start and/or stop are not supported in fixed Sparse reading"
30833081
)
30843082
return kwargs
30853083

@@ -3376,7 +3374,7 @@ def validate_multiindex(self, obj):
33763374
return obj.reset_index(), levels
33773375
except ValueError:
33783376
raise ValueError(
3379-
"duplicate names/columns in the multi-index when " "storing as a table"
3377+
"duplicate names/columns in the multi-index when storing as a table"
33803378
)
33813379

33823380
@property
@@ -4081,7 +4079,7 @@ def read_column(self, column, where=None, start=None, stop=None):
40814079
return False
40824080

40834081
if where is not None:
4084-
raise TypeError("read_column does not currently accept a where " "clause")
4082+
raise TypeError("read_column does not currently accept a where clause")
40854083

40864084
# find the axes
40874085
for a in self.axes:
@@ -4990,7 +4988,7 @@ def __init__(self, table, where=None, start=None, stop=None):
49904988
self.stop is not None and (where >= self.stop).any()
49914989
):
49924990
raise ValueError(
4993-
"where must have index locations >= start and " "< stop"
4991+
"where must have index locations >= start and < stop"
49944992
)
49954993
self.coordinates = where
49964994

pandas/io/sas/sas_xport.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
"000000000000000000000000000000 "
2727
)
2828
_correct_header1 = (
29-
"HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!" "000000000000000001600000000"
29+
"HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
3030
)
3131
_correct_header2 = (
3232
"HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"

0 commit comments

Comments
 (0)