Skip to content

Commit 12a0dc4

Browse files
jbrockmendeldatapythonista
authored andcommitted
STYLE: Avoid using backslash to continue code to the next line (#23073)
1 parent e6277be commit 12a0dc4

23 files changed

+106
-102
lines changed

pandas/compat/numpy/__init__.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,8 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
6060
if not _np_version_under1p11:
6161

6262
# is_list_like
63-
if hasattr(arr, '__iter__') and not \
64-
isinstance(arr, string_and_binary_types):
63+
if (hasattr(arr, '__iter__') and
64+
not isinstance(arr, string_and_binary_types)):
6565
arr = [tz_replacer(s) for s in arr]
6666
else:
6767
arr = tz_replacer(arr)

pandas/core/algorithms.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,8 @@ def match(to_match, values, na_sentinel=-1):
275275
# replace but return a numpy array
276276
# use a Series because it handles dtype conversions properly
277277
from pandas import Series
278-
result = Series(result.ravel()).replace(-1, na_sentinel).values.\
279-
reshape(result.shape)
278+
result = Series(result.ravel()).replace(-1, na_sentinel)
279+
result = result.values.reshape(result.shape)
280280

281281
return result
282282

pandas/core/base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -395,8 +395,8 @@ def nested_renaming_depr(level=4):
395395

396396
elif isinstance(obj, ABCSeries):
397397
nested_renaming_depr()
398-
elif isinstance(obj, ABCDataFrame) and \
399-
k not in obj.columns:
398+
elif (isinstance(obj, ABCDataFrame) and
399+
k not in obj.columns):
400400
raise KeyError(
401401
"Column '{col}' does not exist!".format(col=k))
402402

pandas/core/generic.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -5651,8 +5651,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
56515651
# fill in 2d chunks
56525652
result = {col: s.fillna(method=method, value=value)
56535653
for col, s in self.iteritems()}
5654-
new_obj = self._constructor.\
5655-
from_dict(result).__finalize__(self)
5654+
prelim_obj = self._constructor.from_dict(result)
5655+
new_obj = prelim_obj.__finalize__(self)
56565656
new_data = new_obj._data
56575657

56585658
else:

pandas/core/groupby/generic.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -1027,8 +1027,9 @@ def nunique(self, dropna=True):
10271027
try:
10281028
sorter = np.lexsort((val, ids))
10291029
except TypeError: # catches object dtypes
1030-
assert val.dtype == object, \
1031-
'val.dtype must be object, got %s' % val.dtype
1030+
msg = ('val.dtype must be object, got {dtype}'
1031+
.format(dtype=val.dtype))
1032+
assert val.dtype == object, msg
10321033
val, _ = algorithms.factorize(val, sort=False)
10331034
sorter = np.lexsort((val, ids))
10341035
_isna = lambda a: a == -1

pandas/core/groupby/groupby.py

+8-6
Original file line numberDiff line numberDiff line change
@@ -578,8 +578,8 @@ def wrapper(*args, **kwargs):
578578
# a little trickery for aggregation functions that need an axis
579579
# argument
580580
kwargs_with_axis = kwargs.copy()
581-
if 'axis' not in kwargs_with_axis or \
582-
kwargs_with_axis['axis'] is None:
581+
if ('axis' not in kwargs_with_axis or
582+
kwargs_with_axis['axis'] is None):
583583
kwargs_with_axis['axis'] = self.axis
584584

585585
def curried_with_axis(x):
@@ -1490,8 +1490,10 @@ def nth(self, n, dropna=None):
14901490
self._set_group_selection()
14911491

14921492
if not dropna:
1493-
mask = np.in1d(self._cumcount_array(), nth_values) | \
1494-
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
1493+
mask_left = np.in1d(self._cumcount_array(), nth_values)
1494+
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1,
1495+
-nth_values)
1496+
mask = mask_left | mask_right
14951497

14961498
out = self._selected_obj[mask]
14971499
if not self.as_index:
@@ -1552,8 +1554,8 @@ def nth(self, n, dropna=None):
15521554
result.loc[mask] = np.nan
15531555

15541556
# reset/reindex to the original groups
1555-
if len(self.obj) == len(dropped) or \
1556-
len(result) == len(self.grouper.result_index):
1557+
if (len(self.obj) == len(dropped) or
1558+
len(result) == len(self.grouper.result_index)):
15571559
result.index = self.grouper.result_index
15581560
else:
15591561
result = result.reindex(self.grouper.result_index)

pandas/core/groupby/grouper.py

+14-14
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,8 @@ def _set_grouper(self, obj, sort=False):
157157
if self.key is not None:
158158
key = self.key
159159
# The 'on' is already defined
160-
if getattr(self.grouper, 'name', None) == key and \
161-
isinstance(obj, ABCSeries):
160+
if (getattr(self.grouper, 'name', None) == key and
161+
isinstance(obj, ABCSeries)):
162162
ax = self._grouper.take(obj.index)
163163
else:
164164
if key not in obj._info_axis:
@@ -530,9 +530,9 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
530530
except Exception:
531531
all_in_columns_index = False
532532

533-
if not any_callable and not all_in_columns_index and \
534-
not any_arraylike and not any_groupers and \
535-
match_axis_length and level is None:
533+
if (not any_callable and not all_in_columns_index and
534+
not any_arraylike and not any_groupers and
535+
match_axis_length and level is None):
536536
keys = [com.asarray_tuplesafe(keys)]
537537

538538
if isinstance(level, (tuple, list)):
@@ -593,15 +593,15 @@ def is_in_obj(gpr):
593593

594594
# create the Grouping
595595
# allow us to passing the actual Grouping as the gpr
596-
ping = Grouping(group_axis,
597-
gpr,
598-
obj=obj,
599-
name=name,
600-
level=level,
601-
sort=sort,
602-
observed=observed,
603-
in_axis=in_axis) \
604-
if not isinstance(gpr, Grouping) else gpr
596+
ping = (Grouping(group_axis,
597+
gpr,
598+
obj=obj,
599+
name=name,
600+
level=level,
601+
sort=sort,
602+
observed=observed,
603+
in_axis=in_axis)
604+
if not isinstance(gpr, Grouping) else gpr)
605605

606606
groupings.append(ping)
607607

pandas/core/groupby/ops.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -521,8 +521,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1,
521521
result = result.astype('float64')
522522
result[mask] = np.nan
523523

524-
if kind == 'aggregate' and \
525-
self._filter_empty_groups and not counts.all():
524+
if (kind == 'aggregate' and
525+
self._filter_empty_groups and not counts.all()):
526526
if result.ndim == 2:
527527
try:
528528
result = lib.row_bool_subset(
@@ -743,8 +743,9 @@ def group_info(self):
743743
else:
744744
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
745745

746-
return comp_ids.astype('int64', copy=False), \
747-
obs_group_ids.astype('int64', copy=False), ngroups
746+
return (comp_ids.astype('int64', copy=False),
747+
obs_group_ids.astype('int64', copy=False),
748+
ngroups)
748749

749750
@cache_readonly
750751
def ngroups(self):

pandas/core/indexing.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -1836,8 +1836,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
18361836
"""Translate any partial string timestamp matches in key, returning the
18371837
new key (GH 10331)"""
18381838
if isinstance(labels, MultiIndex):
1839-
if isinstance(key, compat.string_types) and \
1840-
labels.levels[0].is_all_dates:
1839+
if (isinstance(key, compat.string_types) and
1840+
labels.levels[0].is_all_dates):
18411841
# Convert key '2016-01-01' to
18421842
# ('2016-01-01'[, slice(None, None, None)]+)
18431843
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
@@ -1847,8 +1847,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
18471847
# (..., slice('2016-01-01', '2016-01-01', None), ...)
18481848
new_key = []
18491849
for i, component in enumerate(key):
1850-
if isinstance(component, compat.string_types) and \
1851-
labels.levels[i].is_all_dates:
1850+
if (isinstance(component, compat.string_types) and
1851+
labels.levels[i].is_all_dates):
18521852
new_key.append(slice(component, component, None))
18531853
else:
18541854
new_key.append(component)

pandas/core/internals/concat.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
184184
if len(values) and values[0] is None:
185185
fill_value = None
186186

187-
if getattr(self.block, 'is_datetimetz', False) or \
188-
is_datetimetz(empty_dtype):
187+
if (getattr(self.block, 'is_datetimetz', False) or
188+
is_datetimetz(empty_dtype)):
189189
if self.block is None:
190190
array = empty_dtype.construct_array_type()
191191
missing_arr = array([fill_value], dtype=empty_dtype)

pandas/core/reshape/pivot.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
140140
margins_name=margins_name, fill_value=fill_value)
141141

142142
# discard the top level
143-
if values_passed and not values_multi and not table.empty and \
144-
(table.columns.nlevels > 1):
143+
if (values_passed and not values_multi and not table.empty and
144+
(table.columns.nlevels > 1)):
145145
table = table[values[0]]
146146

147147
if len(index) == 0 and len(columns) > 0:

pandas/core/reshape/reshape.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -745,9 +745,8 @@ def check_len(item, name):
745745

746746
if is_list_like(item):
747747
if not len(item) == data_to_encode.shape[1]:
748-
len_msg = \
749-
len_msg.format(name=name, len_item=len(item),
750-
len_enc=data_to_encode.shape[1])
748+
len_msg = len_msg.format(name=name, len_item=len(item),
749+
len_enc=data_to_encode.shape[1])
751750
raise ValueError(len_msg)
752751

753752
check_len(prefix, 'prefix')

pandas/core/tools/datetimes.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -724,8 +724,9 @@ def calc_with_mask(carg, mask):
724724
result = np.empty(carg.shape, dtype='M8[ns]')
725725
iresult = result.view('i8')
726726
iresult[~mask] = tslibs.iNaT
727-
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \
728-
astype('M8[ns]')
727+
728+
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
729+
result[mask] = masked_result.astype('M8[ns]')
729730
return result
730731

731732
# try intlike / strings that are ints

pandas/core/window.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -98,11 +98,11 @@ def is_freq_type(self):
9898
def validate(self):
9999
if self.center is not None and not is_bool(self.center):
100100
raise ValueError("center must be a boolean")
101-
if self.min_periods is not None and not \
102-
is_integer(self.min_periods):
101+
if (self.min_periods is not None and
102+
not is_integer(self.min_periods)):
103103
raise ValueError("min_periods must be an integer")
104-
if self.closed is not None and self.closed not in \
105-
['right', 'both', 'left', 'neither']:
104+
if (self.closed is not None and
105+
self.closed not in ['right', 'both', 'left', 'neither']):
106106
raise ValueError("closed must be 'right', 'left', 'both' or "
107107
"'neither'")
108108

pandas/io/common.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -430,8 +430,8 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None,
430430
handles.append(f)
431431

432432
# in Python 3, convert BytesIO or fileobjects passed with an encoding
433-
if compat.PY3 and is_text and\
434-
(compression or isinstance(f, need_text_wrapping)):
433+
if (compat.PY3 and is_text and
434+
(compression or isinstance(f, need_text_wrapping))):
435435
from io import TextIOWrapper
436436
f = TextIOWrapper(f, encoding=encoding)
437437
handles.append(f)

pandas/io/excel.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -1755,14 +1755,14 @@ def convert(cls, style_dict, num_format_str=None):
17551755
props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted',
17561756
'thick', 'double', 'hair', 'mediumDashed',
17571757
'dashDot', 'mediumDashDot', 'dashDotDot',
1758-
'mediumDashDotDot', 'slantDashDot'].\
1759-
index(props[k])
1758+
'mediumDashDotDot',
1759+
'slantDashDot'].index(props[k])
17601760
except ValueError:
17611761
props[k] = 2
17621762

17631763
if isinstance(props.get('font_script'), string_types):
1764-
props['font_script'] = ['baseline', 'superscript', 'subscript'].\
1765-
index(props['font_script'])
1764+
props['font_script'] = ['baseline', 'superscript',
1765+
'subscript'].index(props['font_script'])
17661766

17671767
if isinstance(props.get('underline'), string_types):
17681768
props['underline'] = {'none': 0, 'single': 1, 'double': 2,

pandas/io/formats/style.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,9 @@
1414
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
1515
)
1616
except ImportError:
17-
msg = "pandas.Styler requires jinja2. "\
18-
"Please install with `conda install Jinja2`\n"\
19-
"or `pip install Jinja2`"
20-
raise ImportError(msg)
17+
raise ImportError("pandas.Styler requires jinja2. "
18+
"Please install with `conda install Jinja2`\n"
19+
"or `pip install Jinja2`")
2120

2221
from pandas.core.dtypes.common import is_float, is_string_like
2322

pandas/io/formats/terminal.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,8 @@ def get_terminal_size():
4040
if tuple_xy is None:
4141
tuple_xy = _get_terminal_size_tput()
4242
# needed for window's python in cygwin's xterm!
43-
if current_os == 'Linux' or \
44-
current_os == 'Darwin' or \
45-
current_os.startswith('CYGWIN'):
43+
if (current_os == 'Linux' or current_os == 'Darwin' or
44+
current_os.startswith('CYGWIN')):
4645
tuple_xy = _get_terminal_size_linux()
4746
if tuple_xy is None:
4847
tuple_xy = (80, 25) # default value

pandas/io/json/normalize.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -250,11 +250,10 @@ def _recursive_extract(data, path, seen_meta, level=0):
250250
if errors == 'ignore':
251251
meta_val = np.nan
252252
else:
253-
raise \
254-
KeyError("Try running with "
255-
"errors='ignore' as key "
256-
"{err} is not always present"
257-
.format(err=e))
253+
raise KeyError("Try running with "
254+
"errors='ignore' as key "
255+
"{err} is not always present"
256+
.format(err=e))
258257
meta_vals[key].append(meta_val)
259258

260259
records.extend(recs)

pandas/io/parsers.py

+14-14
Original file line numberDiff line numberDiff line change
@@ -883,26 +883,26 @@ def _clean_options(self, options, engine):
883883
# C engine not supported yet
884884
if engine == 'c':
885885
if options['skipfooter'] > 0:
886-
fallback_reason = "the 'c' engine does not support"\
887-
" skipfooter"
886+
fallback_reason = ("the 'c' engine does not support"
887+
" skipfooter")
888888
engine = 'python'
889889

890890
encoding = sys.getfilesystemencoding() or 'utf-8'
891891
if sep is None and not delim_whitespace:
892892
if engine == 'c':
893-
fallback_reason = "the 'c' engine does not support"\
894-
" sep=None with delim_whitespace=False"
893+
fallback_reason = ("the 'c' engine does not support"
894+
" sep=None with delim_whitespace=False")
895895
engine = 'python'
896896
elif sep is not None and len(sep) > 1:
897897
if engine == 'c' and sep == r'\s+':
898898
result['delim_whitespace'] = True
899899
del result['delimiter']
900900
elif engine not in ('python', 'python-fwf'):
901901
# wait until regex engine integrated
902-
fallback_reason = "the 'c' engine does not support"\
903-
" regex separators (separators > 1 char and"\
904-
r" different from '\s+' are"\
905-
" interpreted as regex)"
902+
fallback_reason = ("the 'c' engine does not support"
903+
" regex separators (separators > 1 char and"
904+
r" different from '\s+' are"
905+
" interpreted as regex)")
906906
engine = 'python'
907907
elif delim_whitespace:
908908
if 'python' in engine:
@@ -915,10 +915,10 @@ def _clean_options(self, options, engine):
915915
except UnicodeDecodeError:
916916
encodeable = False
917917
if not encodeable and engine not in ('python', 'python-fwf'):
918-
fallback_reason = "the separator encoded in {encoding}" \
919-
" is > 1 char long, and the 'c' engine" \
920-
" does not support such separators".format(
921-
encoding=encoding)
918+
fallback_reason = ("the separator encoded in {encoding}"
919+
" is > 1 char long, and the 'c' engine"
920+
" does not support such separators"
921+
.format(encoding=encoding))
922922
engine = 'python'
923923

924924
quotechar = options['quotechar']
@@ -3203,8 +3203,8 @@ def _clean_index_names(columns, index_col):
32033203
index_names.append(name)
32043204

32053205
# hack
3206-
if isinstance(index_names[0], compat.string_types)\
3207-
and 'Unnamed' in index_names[0]:
3206+
if (isinstance(index_names[0], compat.string_types) and
3207+
'Unnamed' in index_names[0]):
32083208
index_names[0] = None
32093209

32103210
return index_names, columns, index_col

pandas/io/pytables.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1804,8 +1804,8 @@ def validate_metadata(self, handler):
18041804
if self.meta == 'category':
18051805
new_metadata = self.metadata
18061806
cur_metadata = handler.read_metadata(self.cname)
1807-
if new_metadata is not None and cur_metadata is not None \
1808-
and not array_equivalent(new_metadata, cur_metadata):
1807+
if (new_metadata is not None and cur_metadata is not None and
1808+
not array_equivalent(new_metadata, cur_metadata)):
18091809
raise ValueError("cannot append a categorical with "
18101810
"different categories to the existing")
18111811

0 commit comments

Comments
 (0)