Skip to content

Commit 0fce522

Browse files
committed
CLN: flake8 warnings in pandas/tools/*.py
1 parent 85134cd commit 0fce522

File tree

5 files changed

+248
-173
lines changed

5 files changed

+248
-173
lines changed

pandas/tools/merge.py

+38-26
Original file line numberDiff line numberDiff line change
@@ -226,8 +226,8 @@ def _indicator_pre_merge(self, left, right):
226226

227227
for i in ['_left_indicator', '_right_indicator']:
228228
if i in columns:
229-
raise ValueError(
230-
"Cannot use `indicator=True` option when data contains a column named {}".format(i))
229+
raise ValueError("Cannot use `indicator=True` option when "
230+
"data contains a column named {}".format(i))
231231
if self.indicator_name in columns:
232232
raise ValueError(
233233
"Cannot use name of an existing column for indicator column")
@@ -248,14 +248,15 @@ def _indicator_post_merge(self, result):
248248
result['_left_indicator'] = result['_left_indicator'].fillna(0)
249249
result['_right_indicator'] = result['_right_indicator'].fillna(0)
250250

251-
result[self.indicator_name] = Categorical(
252-
(result['_left_indicator'] + result['_right_indicator']), categories=[1, 2, 3])
253-
result[self.indicator_name] = result[self.indicator_name].cat.rename_categories(
254-
['left_only', 'right_only', 'both'])
255-
256-
result = result.drop(
257-
labels=['_left_indicator', '_right_indicator'], axis=1)
251+
result[self.indicator_name] = Categorical((result['_left_indicator'] +
252+
result['_right_indicator']),
253+
categories=[1, 2, 3])
254+
result[self.indicator_name] = (
255+
result[self.indicator_name]
256+
.cat.rename_categories(['left_only', 'right_only', 'both']))
258257

258+
result = result.drop(labels=['_left_indicator', '_right_indicator'],
259+
axis=1)
259260
return result
260261

261262
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
@@ -280,8 +281,9 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
280281
continue
281282

282283
right_na_indexer = right_indexer.take(na_indexer)
283-
result.iloc[na_indexer, key_indexer] = com.take_1d(self.right_join_keys[i],
284-
right_na_indexer)
284+
result.iloc[na_indexer, key_indexer] = (
285+
com.take_1d(self.right_join_keys[i],
286+
right_na_indexer))
285287
elif name in self.right:
286288
if len(self.right) == 0:
287289
continue
@@ -291,8 +293,9 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
291293
continue
292294

293295
left_na_indexer = left_indexer.take(na_indexer)
294-
result.iloc[na_indexer, key_indexer] = com.take_1d(self.left_join_keys[i],
295-
left_na_indexer)
296+
result.iloc[na_indexer, key_indexer] = (
297+
com.take_1d(self.left_join_keys[i],
298+
left_na_indexer))
296299
elif left_indexer is not None \
297300
and isinstance(self.left_join_keys[i], np.ndarray):
298301

@@ -761,12 +764,13 @@ def _get_join_keys(llab, rlab, shape, sort):
761764

762765
return _get_join_keys(llab, rlab, shape, sort)
763766

764-
#----------------------------------------------------------------------
767+
# ---------------------------------------------------------------------
765768
# Concatenate DataFrame objects
766769

767770

768771
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
769-
keys=None, levels=None, names=None, verify_integrity=False, copy=True):
772+
keys=None, levels=None, names=None, verify_integrity=False,
773+
copy=True):
770774
"""
771775
Concatenate pandas objects along a particular axis with optional set logic
772776
along the other axes. Can also add a layer of hierarchical indexing on the
@@ -897,7 +901,8 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
897901
# if we have not multi-index possibiltes
898902
df = DataFrame([obj.shape for obj in objs]).sum(1)
899903
non_empties = df[df != 0]
900-
if len(non_empties) and (keys is None and names is None and levels is None and join_axes is None):
904+
if (len(non_empties) and (keys is None and names is None and
905+
levels is None and join_axes is None)):
901906
objs = [objs[i] for i in non_empties.index]
902907
sample = objs[0]
903908

@@ -967,19 +972,23 @@ def get_result(self):
967972
if self.axis == 0:
968973
new_data = com._concat_compat([x._values for x in self.objs])
969974
name = com._consensus_name_attr(self.objs)
970-
return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
975+
return (Series(new_data, index=self.new_axes[0], name=name)
976+
.__finalize__(self, method='concat'))
971977

972978
# combine as columns in a frame
973979
else:
974980
data = dict(zip(range(len(self.objs)), self.objs))
975981
index, columns = self.new_axes
976982
tmpdf = DataFrame(data, index=index)
977-
# checks if the column variable already stores valid column names (because set via the 'key' argument
978-
# in the 'concat' function call. If that's not the case, use
979-
# the series names as column names
980-
if columns.equals(Index(np.arange(len(self.objs)))) and not self.ignore_index:
981-
columns = np.array(
982-
[data[i].name for i in range(len(data))], dtype='object')
983+
# checks if the column variable already stores valid column
984+
# names (because set via the 'key' argument in the 'concat'
985+
# function call. If that's not the case, use the series names
986+
# as column names
987+
if (columns.equals(Index(np.arange(len(self.objs)))) and
988+
not self.ignore_index):
989+
columns = np.array([data[i].name
990+
for i in range(len(data))],
991+
dtype='object')
983992
indexer = isnull(columns)
984993
if indexer.any():
985994
columns[indexer] = np.arange(len(indexer[indexer]))
@@ -1004,11 +1013,13 @@ def get_result(self):
10041013
mgrs_indexers.append((obj._data, indexers))
10051014

10061015
new_data = concatenate_block_managers(
1007-
mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy)
1016+
mgrs_indexers, self.new_axes,
1017+
concat_axis=self.axis, copy=self.copy)
10081018
if not self.copy:
10091019
new_data._consolidate_inplace()
10101020

1011-
return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat')
1021+
return (self.objs[0]._from_axes(new_data, self.new_axes)
1022+
.__finalize__(self, method='concat'))
10121023

10131024
def _get_result_dim(self):
10141025
if self._is_series and self.axis == 1:
@@ -1214,7 +1225,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
12141225

12151226

12161227
def _should_fill(lname, rname):
1217-
if not isinstance(lname, compat.string_types) or not isinstance(rname, compat.string_types):
1228+
if (not isinstance(lname, compat.string_types) or
1229+
not isinstance(rname, compat.string_types)):
12181230
return True
12191231
return lname == rname
12201232

pandas/tools/pivot.py

+12-8
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,14 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
2424
----------
2525
data : DataFrame
2626
values : column to aggregate, optional
27-
index : a column, Grouper, array which has the same length as data, or list of them.
28-
Keys to group by on the pivot table index.
29-
If an array is passed, it is being used as the same manner as column values.
30-
columns : a column, Grouper, array which has the same length as data, or list of them.
31-
Keys to group by on the pivot table column.
32-
If an array is passed, it is being used as the same manner as column values.
27+
index : a column, Grouper, array which has the same length as data, or list
28+
of them.
29+
Keys to group by on the pivot table index. If an array is passed, it
30+
is being used as the same manner as column values.
31+
columns : a column, Grouper, array which has the same length as data, or
32+
list of them.
33+
Keys to group by on the pivot table column. If an array is passed, it
34+
is being used as the same manner as column values.
3335
aggfunc : function, default numpy.mean, or list of functions
3436
If list of functions passed, the resulting pivot table will have
3537
hierarchical columns whose top level are the function names (inferred
@@ -78,7 +80,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
7880
pieces = []
7981
keys = []
8082
for func in aggfunc:
81-
table = pivot_table(data, values=values, index=index, columns=columns,
83+
table = pivot_table(data, values=values, index=index,
84+
columns=columns,
8285
fill_value=fill_value, aggfunc=func,
8386
margins=margins)
8487
pieces.append(table)
@@ -350,7 +353,8 @@ def _all_key():
350353
def _convert_by(by):
351354
if by is None:
352355
by = []
353-
elif (np.isscalar(by) or isinstance(by, (np.ndarray, Index, Series, Grouper))
356+
elif (np.isscalar(by) or isinstance(by, (np.ndarray, Index,
357+
Series, Grouper))
354358
or hasattr(by, '__call__')):
355359
by = [by]
356360
else:

0 commit comments

Comments
 (0)