Skip to content

CLN: PEP8 cleanup of the io module #5663

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 9, 2013
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion pandas/computation/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,9 @@ def _align_core(terms):
copy=False)

# need to fill if we have a bool dtype/array
if isinstance(ti, (np.ndarray, pd.Series)) and ti.dtype == object and pd.lib.is_bool_array(ti.values):
if (isinstance(ti, (np.ndarray, pd.Series))
and ti.dtype == object
and pd.lib.is_bool_array(ti.values)):
r = f(fill_value=True)
else:
r = f()
Expand Down
15 changes: 9 additions & 6 deletions pandas/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,18 +512,21 @@ def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
res = op(lhs, rhs)

if self.engine != 'pytables':
if (res.op in _cmp_ops_syms and getattr(lhs,'is_datetime',False) or getattr(rhs,'is_datetime',False)):
# all date ops must be done in python bc numexpr doesn't work well
# with NaT
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
return self._possibly_eval(res, self.binary_ops)

if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs,'return_type',None) == object or getattr(rhs,'return_type',None) == object):
# evaluate "==" and "!=" in python if either of our operands has an
# object return type
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
Expand Down
3 changes: 2 additions & 1 deletion pandas/computation/tests/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -1022,7 +1022,8 @@ def check_performance_warning_for_poor_alignment(self, engine, parser):

def test_performance_warning_for_poor_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_performance_warning_for_poor_alignment, engine, parser
yield (self.check_performance_warning_for_poor_alignment, engine,
parser)


#------------------------------------
Expand Down
40 changes: 21 additions & 19 deletions pandas/core/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,8 @@ class DataFrameFormatter(TableFormatter):
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None, max_cols=None,
show_dimensions=False, **kwds):
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, **kwds):
self.frame = frame
self.buf = buf if buf is not None else StringIO()
self.show_index_names = index_names
Expand All @@ -284,7 +284,8 @@ def __init__(self, frame, buf=None, columns=None, col_space=None,
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),len(self.frame))
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions

if justify is None:
Expand Down Expand Up @@ -330,7 +331,8 @@ def _to_str_columns(self):
*(_strlen(x) for x in cheader))

fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth, truncated=truncate_v)
minimum=max_colwidth,
truncated=truncate_v)

max_len = max(np.max([_strlen(x) for x in fmt_values]),
max_colwidth)
Expand All @@ -349,8 +351,8 @@ def _to_str_columns(self):
if self.index:
strcols.insert(0, str_index)
if truncate_h:
strcols.append(([''] * len(str_columns[-1])) \
+ (['...'] * min(len(self.frame), self.max_rows)) )
strcols.append(([''] * len(str_columns[-1]))
+ (['...'] * min(len(self.frame), self.max_rows)))

return strcols

Expand Down Expand Up @@ -382,8 +384,8 @@ def to_string(self, force_unicode=None):
self.buf.writelines(text)

if self.show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]" \
% (len(frame), len(frame.columns)) )
self.buf.write("\n\n[%d rows x %d columns]"
% (len(frame), len(frame.columns)))

def _join_multiline(self, *strcols):
lwidth = self.line_width
Expand Down Expand Up @@ -484,10 +486,11 @@ def write(buf, frame, column_format, strcols):

def _format_col(self, i):
formatter = self._get_formatter(i)
return format_array((self.frame.iloc[:self.max_rows_displayed,i]).get_values(),
formatter, float_format=self.float_format,
na_rep=self.na_rep,
space=self.col_space)
return format_array(
(self.frame.iloc[:self.max_rows_displayed, i]).get_values(),
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)

def to_html(self, classes=None):
"""
Expand Down Expand Up @@ -679,8 +682,6 @@ def write_result(self, buf):
'not %s') % type(self.classes))
_classes.extend(self.classes)



self.write('<table border="1" class="%s">' % ' '.join(_classes),
indent)

Expand All @@ -698,9 +699,9 @@ def write_result(self, buf):

self.write('</table>', indent)
if self.fmt.show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
(len(frame), by, len(frame.columns)) )
(len(frame), by, len(frame.columns)))
_put_lines(buf, self.elements)

def _write_header(self, indent):
Expand Down Expand Up @@ -783,8 +784,9 @@ def _column_header():
align=align)

if self.fmt.has_index_names:
row = [x if x is not None else '' for x in self.frame.index.names] \
+ [''] * min(len(self.columns), self.max_cols)
row = [
x if x is not None else '' for x in self.frame.index.names
] + [''] * min(len(self.columns), self.max_cols)
self.write_tr(row, indent, self.indent_delta, header=True)

indent -= self.indent_delta
Expand Down Expand Up @@ -851,7 +853,7 @@ def _write_hierarchical_rows(self, fmt_values, indent):
truncate = (len(frame) > self.max_rows)

idx_values = frame.index[:nrows].format(sparsify=False, adjoin=False,
names=False)
names=False)
idx_values = lzip(*idx_values)

if self.fmt.sparsify:
Expand Down
22 changes: 13 additions & 9 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,8 +432,9 @@ def _repr_fits_horizontal_(self, ignore_width=False):
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not \
(self._repr_fits_horizontal_() and self._repr_fits_vertical_())
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)

def __unicode__(self):
"""
Expand Down Expand Up @@ -486,8 +487,7 @@ def _repr_html_(self):
return ('<div style="max-height:1000px;'
'max-width:1500px;overflow:auto;">\n' +
self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=True) \
+ '\n</div>')
show_dimensions=True) + '\n</div>')
else:
return None

Expand Down Expand Up @@ -1283,7 +1283,8 @@ def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows, max_cols=max_cols,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()

Expand All @@ -1310,7 +1311,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show all.
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
Expand All @@ -1336,7 +1338,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
header=header, index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows, max_cols=max_cols,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_html(classes=classes)

Expand Down Expand Up @@ -1904,7 +1907,8 @@ def _ensure_valid_index(self, value):

if not isinstance(value, Series):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a Series')
'and a value that cannot be converted to a '
'Series')
self._data.set_axis(1, value.index.copy(), check_axis=False)

def _set_item(self, key, value):
Expand Down Expand Up @@ -4597,7 +4601,7 @@ def extract_index(data):


def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray,Series)):
if not isinstance(values, (np.ndarray, Series)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)

Expand Down
6 changes: 3 additions & 3 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ def is_dictlike(x):

def _single_replace(self, to_replace, method, inplace, limit):
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'.format(to_replace,
method,type(self).__name__))
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))

orig_dtype = self.dtype
result = self if inplace else self.copy()
Expand Down Expand Up @@ -2047,7 +2047,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dictlike(to_replace) and not is_dictlike(regex):
to_replace = [ to_replace ]
to_replace = [to_replace]

if isinstance(to_replace, (tuple, list)):
return _single_replace(self, to_replace, method, inplace,
Expand Down
17 changes: 11 additions & 6 deletions pandas/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -649,9 +649,9 @@ def _index_with_as_index(self, b):
original = self.obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
Expand Down Expand Up @@ -2161,7 +2161,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
else:
key_index = Index(keys, name=key_names[0])


# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = None
Expand All @@ -2170,14 +2169,20 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
break
if v is None:
return DataFrame()
values = [ x if x is not None else v._constructor(**v._construct_axes_dict()) for x in values ]
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]

v = values[0]

if isinstance(v, (np.ndarray, Series)):
if isinstance(v, Series):
applied_index = self.obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([x.index for x in values ])
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)

Expand Down
4 changes: 3 additions & 1 deletion pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -830,7 +830,9 @@ def _reindex(keys, level=None):

# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(result._get_axis(axis)))
new_indexer[cur_indexer] = np.arange(
len(result._get_axis(axis))
)
new_indexer[missing_indexer] = -1

# we have a non_unique selector, need to use the original
Expand Down
5 changes: 4 additions & 1 deletion pandas/core/internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -3480,7 +3480,10 @@ def _delete_from_block(self, i, item):
super(SingleBlockManager, self)._delete_from_block(i, item)

# reset our state
self._block = self.blocks[0] if len(self.blocks) else make_block(np.array([],dtype=self._block.dtype),[],[])
self._block = (
self.blocks[0] if len(self.blocks) else
make_block(np.array([], dtype=self._block.dtype), [], [])
)
self._values = self._block.values

def get_slice(self, slobj, raise_on_error=False):
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -786,6 +786,7 @@ def lreshape(data, groups, dropna=True, label=None):

return DataFrame(mdata, columns=id_cols + pivot_cols)


def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
Expand Down Expand Up @@ -848,8 +849,8 @@ def get_var_names(df, regex):

def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^"+stub)
newdf = melt(df, id_vars=i, value_vars=varnames,
value_name=stub, var_name=j)
newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
Expand All @@ -870,6 +871,7 @@ def melt_stub(df, stub, i, j):
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])


def convert_dummies(data, cat_variables, prefix_sep='_'):
"""
Compute DataFrame with specified columns converted to dummy variables (0 /
Expand Down
1 change: 1 addition & 0 deletions pandas/io/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ def init_service(http):
"""
return gapi.build('analytics', 'v3', http=http)


def reset_default_token_store():
import os
os.remove(DEFAULT_TOKEN_FILE)
9 changes: 6 additions & 3 deletions pandas/io/clipboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from pandas import compat, get_option, DataFrame
from pandas.compat import StringIO


def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
Expand All @@ -20,7 +21,10 @@ def read_clipboard(**kwargs): # pragma: no cover
# try to decode (if needed on PY3)
if compat.PY3:
try:
text = compat.bytes_to_str(text,encoding=kwargs.get('encoding') or get_option('display.encoding'))
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
return read_table(StringIO(text), **kwargs)
Expand Down Expand Up @@ -58,7 +62,7 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
if sep is None:
sep = '\t'
buf = StringIO()
obj.to_csv(buf,sep=sep, **kwargs)
obj.to_csv(buf, sep=sep, **kwargs)
clipboard_set(buf.getvalue())
return
except:
Expand All @@ -70,4 +74,3 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
else:
objstr = str(obj)
clipboard_set(objstr)

Loading