Skip to content

CLN: catch Exception in fewer places, assorted cleanups #28276

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Sep 4, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ci/code_checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ if [[ -z "$CHECK" || "$CHECK" == "code" ]]; then
import sys
import pandas

blacklist = {'bs4', 'gcsfs', 'html5lib', 'ipython', 'jinja2' 'hypothesis',
blacklist = {'bs4', 'gcsfs', 'html5lib', 'ipython', 'jinja2', 'hypothesis',
'lxml', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy',
'tables', 'xlrd', 'xlsxwriter', 'xlwt'}
mods = blacklist & set(m.split('.')[0] for m in sys.modules)
Expand Down
8 changes: 4 additions & 4 deletions pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def fast_unique_multiple(list arrays, sort: bool=True):
if sort is None:
try:
uniques.sort()
except Exception:
except TypeError:
# TODO: RuntimeWarning?
pass

Expand Down Expand Up @@ -264,7 +264,7 @@ def fast_unique_multiple_list(lists: list, sort: bool=True) -> list:
if sort:
try:
uniques.sort()
except Exception:
except TypeError:
pass

return uniques
Expand Down Expand Up @@ -304,7 +304,7 @@ def fast_unique_multiple_list_gen(object gen, bint sort=True):
if sort:
try:
uniques.sort()
except Exception:
except TypeError:
pass

return uniques
Expand Down Expand Up @@ -1410,7 +1410,7 @@ def infer_datetimelike_array(arr: object) -> object:
try:
array_to_datetime(objs, errors='raise')
return 'datetime'
except:
except (ValueError, TypeError):
pass

# we are *not* going to infer from strings
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
except TypeError:
return listed


Expand Down
6 changes: 4 additions & 2 deletions pandas/core/groupby/grouper.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,9 +583,11 @@ def _get_grouper(
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
items = obj._data.items
try:
obj._data.items.get_loc(key)
except Exception:
items.get_loc(key)
except (KeyError, TypeError):
# TypeError shows up here if we pass e.g. Int64Index
return False

return True
Expand Down
30 changes: 5 additions & 25 deletions pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -615,14 +615,9 @@ def _aggregate(
is_datetimelike,
min_count=-1,
):
if values.ndim > 3:
if values.ndim > 2:
# punting for now
raise NotImplementedError("number of dimensions is currently limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):

chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids, min_count)
raise NotImplementedError("number of dimensions is currently limited to 2")
else:
agg_func(result, counts, values, comp_ids, min_count)

Expand All @@ -640,20 +635,9 @@ def _transform(
):

comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
if values.ndim > 2:
# punting for now
raise NotImplementedError("number of dimensions is currently limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):

transform_func(
result[:, :, i],
values,
comp_ids,
ngroups,
is_datetimelike,
**kwargs
)
raise NotImplementedError("number of dimensions is currently limited to 2")
else:
transform_func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)

Expand Down Expand Up @@ -932,11 +916,7 @@ def _chop(self, sdata, slice_obj):
class FrameSplitter(DataSplitter):
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except Exception:
# fails when all -1
return [], True
starts, ends = lib.generate_slices(self.slabels, self.ngroups)

sdata = self._get_sorted_data()
return libreduction.apply_frame_axis0(sdata, f, names, starts, ends)
Expand Down
7 changes: 2 additions & 5 deletions pandas/core/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -698,10 +698,7 @@ def na_op(x, y):

return result

def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
def wrapper(self, other):

res_name = get_op_result_name(self, other)
other = lib.item_from_zerodim(other)
Expand Down Expand Up @@ -1104,7 +1101,7 @@ def f(self, other):
# straight boolean comparisons we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func)
return res.fillna(True).astype(bool)
return res

f.__name__ = op_name

Expand Down
1 change: 1 addition & 0 deletions pandas/tests/test_downstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ def _getitem_tuple(self, tup):

# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.filterwarnings("ignore:RangeIndex.* is deprecated:DeprecationWarning")
def test_pyarrow(df):

pyarrow = import_module("pyarrow") # noqa
Expand Down