Skip to content

ENH/CLN: give all AssertionErrors and nose.SkipTest raises an informative message #3730

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 28, 2013
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 26 additions & 10 deletions pandas/core/panel.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,9 +528,14 @@ def get_value(self, *args):
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN

# require an arg for each axis
if not ((len(args) == self._AXIS_LEN)):
raise AssertionError()
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))

# hm, two layers to the onion
frame = self._get_item_cache(args[0])
Expand All @@ -554,8 +559,13 @@ def set_value(self, *args):
otherwise a new object
"""
# require an arg for each axis and the value
if not ((len(args) == self._AXIS_LEN + 1)):
raise AssertionError()
nargs = len(args)
nreq = self._AXIS_LEN + 1

if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))

try:
frame = self._get_item_cache(args[0])
Expand Down Expand Up @@ -592,8 +602,10 @@ def __setitem__(self, key, value):
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if not ((value.shape == shape[1:])):
raise AssertionError()
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(shape[1:],
value.shape))
mat = np.asarray(value)
elif np.isscalar(value):
dtype, value = _infer_dtype_from_scalar(value)
Expand Down Expand Up @@ -1144,8 +1156,9 @@ def _extract_axes(self, data, axes, **kwargs):
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a) for i, a
in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)])
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -
len(axes):], axes)])

@staticmethod
def _prep_ndarray(self, values, copy=True):
Expand All @@ -1157,8 +1170,11 @@ def _prep_ndarray(self, values, copy=True):
else:
if copy:
values = values.copy()
if not ((values.ndim == self._AXIS_LEN)):
raise AssertionError()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values

@staticmethod
Expand Down
18 changes: 11 additions & 7 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -1299,9 +1299,6 @@ def __unicode__(self):
dtype=True)
else:
result = u('Series([], dtype: %s)') % self.dtype

if not (isinstance(result, compat.text_type)):
raise AssertionError()
return result

def _tidy_repr(self, max_vals=20):
Expand Down Expand Up @@ -1377,7 +1374,9 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None,

# catch contract violations
if not isinstance(the_repr, compat.text_type):
raise AssertionError("expected unicode string")
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(the_repr.__class__.__name__))

if buf is None:
return the_repr
Expand All @@ -1397,11 +1396,16 @@ def _get_repr(
"""

formatter = fmt.SeriesFormatter(self, name=name, header=print_header,
length=length, dtype=dtype, na_rep=na_rep,
length=length, dtype=dtype,
na_rep=na_rep,
float_format=float_format)
result = formatter.to_string()
if not (isinstance(result, compat.text_type)):
raise AssertionError()

# TODO: following check prob. not neces.
if not isinstance(result, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__))
return result

def __iter__(self):
Expand Down
18 changes: 11 additions & 7 deletions pandas/io/date_converters.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""This module is designed for community supported date conversion functions"""
from pandas.compat import range
from pandas.compat import range, map
import numpy as np
import pandas.lib as lib

Expand Down Expand Up @@ -47,12 +47,16 @@ def _maybe_cast(arr):


def _check_columns(cols):
if not ((len(cols) > 0)):
raise AssertionError()
if not len(cols):
raise AssertionError("There must be at least 1 column")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks more like a ValueError or TypeError than an AssertionError...depends on what calls it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can't get this to trigger from user facing code ... it's only called when you pass both a custom date_parser and parse_dates is a list. Leaving this and the one below as is.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sounds good. Might be nice to add a # pragma: no cover so it's clear why it's not tested. :)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let me run coverage and see what comes up

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay, not a big deal at all. I have no opinion whether it's necessary to unittest exceptions on an internal function.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah i abandoned that


N = len(cols[0])
for c in cols[1:]:
if not ((len(c) == N)):
raise AssertionError()
head, tail = cols[0], cols[1:]

N = len(head)

for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError('All columns must have the same length: {0}; '
'column {1} has length {2}'.format(N, i, n))

return N
31 changes: 20 additions & 11 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -552,8 +552,10 @@ def _clean_options(self, options, engine):

# type conversion-related
if converters is not None:
if not (isinstance(converters, dict)):
raise AssertionError()
if not isinstance(converters, dict):
raise TypeError('Type converters must be a dict or'
' subclass, input was '
'a {0!r}'.format(type(converters).__name__))
else:
converters = {}

Expand Down Expand Up @@ -631,6 +633,7 @@ def get_chunk(self, size=None):
size = self.chunksize
return self.read(nrows=size)


def _is_index_col(col):
return col is not None and col is not False

Expand Down Expand Up @@ -1174,6 +1177,7 @@ def TextParser(*args, **kwds):
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)


# delimiter=None, dialect=None, names=None, header=0,
# index_col=None,
# na_values=None,
Expand Down Expand Up @@ -1653,8 +1657,8 @@ def _rows_to_cols(self, content):
if self._implicit_index:
col_len += len(self.index_col)

if not ((self.skip_footer >= 0)):
raise AssertionError()
if self.skip_footer < 0:
raise ValueError('skip footer cannot be negative')

if col_len != zip_len and self.index_col is not False:
i = 0
Expand Down Expand Up @@ -1883,6 +1887,7 @@ def _clean_na_values(na_values, keep_default_na=True):

return na_values, na_fvalues


def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
return None, columns, index_col
Expand Down Expand Up @@ -1941,6 +1946,7 @@ def _floatify_na_values(na_values):
pass
return result


def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
Expand All @@ -1965,6 +1971,7 @@ def _stringify_na_values(na_values):
pass
return set(result)


def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
Expand Down Expand Up @@ -2014,15 +2021,17 @@ def __init__(self, f, colspecs, filler, thousands=None, encoding=None):
encoding = get_option('display.encoding')
self.encoding = encoding

if not ( isinstance(colspecs, (tuple, list))):
raise AssertionError()
if not isinstance(colspecs, (tuple, list)):
raise TypeError("column specifications must be a list or tuple, "
"input was a %r" % type(colspecs).__name__)

for colspec in colspecs:
if not ( isinstance(colspec, (tuple, list)) and
len(colspec) == 2 and
isinstance(colspec[0], int) and
isinstance(colspec[1], int) ):
raise AssertionError()
if not (isinstance(colspec, (tuple, list)) and
len(colspec) == 2 and
isinstance(colspec[0], int) and
isinstance(colspec[1], int)):
raise TypeError('Each column specification must be '
'2 element tuple or list of integers')

def next(self):
line = next(self.f)
Expand Down
22 changes: 10 additions & 12 deletions pandas/io/tests/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@
from numpy.testing import assert_array_equal


def _skip_if_no_lxml():
try:
import lxml
except ImportError:
raise nose.SkipTest("no lxml")


def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning):
all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in
compat.iteritems(obj)))
Expand Down Expand Up @@ -88,10 +95,7 @@ def test_get_multi2(self):
class TestYahoo(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
import lxml
except ImportError:
raise nose.SkipTest
_skip_if_no_lxml()

@network
def test_yahoo(self):
Expand Down Expand Up @@ -210,10 +214,7 @@ def test_get_date_ret_index(self):
class TestYahooOptions(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
import lxml
except ImportError:
raise nose.SkipTest
_skip_if_no_lxml()

# aapl has monthlies
cls.aapl = web.Options('aapl', 'yahoo')
Expand Down Expand Up @@ -272,10 +273,7 @@ def test_get_put_data(self):
class TestOptionsWarnings(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
import lxml
except ImportError:
raise nose.SkipTest
_skip_if_no_lxml()

with assert_produces_warning(FutureWarning):
cls.aapl = web.Options('aapl')
Expand Down
9 changes: 5 additions & 4 deletions pandas/io/tests/test_ga.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from pandas.io.auth import AuthenticationConfigError, reset_token_store
from pandas.io import auth
except ImportError:
raise nose.SkipTest
raise nose.SkipTest("need httplib2 and auth libs")

class TestGoogle(unittest.TestCase):

Expand Down Expand Up @@ -68,7 +68,7 @@ def test_getdata(self):
assert_frame_equal(df, df2)

except AuthenticationConfigError:
raise nose.SkipTest
raise nose.SkipTest("authentication error")

@slow
@with_connectivity_check("http://www.google.com")
Expand Down Expand Up @@ -96,7 +96,7 @@ def test_iterator(self):
assert (df2.index > df1.index).all()

except AuthenticationConfigError:
raise nose.SkipTest
raise nose.SkipTest("authentication error")

@slow
@with_connectivity_check("http://www.google.com")
Expand Down Expand Up @@ -150,7 +150,8 @@ def test_segment(self):
assert 'pageviewsPerVisit' in df

except AuthenticationConfigError:
raise nose.SkipTest
raise nose.SkipTest("authentication error")


if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
Expand Down
18 changes: 6 additions & 12 deletions pandas/io/tests/test_json/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -567,17 +567,11 @@ def test_round_trip_exception_(self):
assert_frame_equal(result.reindex(index=df.index,columns=df.columns),df)

@network
@slow
def test_url(self):
try:

url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
result = read_json(url,convert_dates=True)
for c in ['created_at','closed_at','updated_at']:
self.assert_(result[c].dtype == 'datetime64[ns]')

url = 'http://search.twitter.com/search.json?q=pandas%20python'
result = read_json(url)
url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
result = read_json(url,convert_dates=True)
for c in ['created_at','closed_at','updated_at']:
self.assert_(result[c].dtype == 'datetime64[ns]')

except URLError:
raise nose.SkipTest
url = 'http://search.twitter.com/search.json?q=pandas%20python'
result = read_json(url)
5 changes: 3 additions & 2 deletions pandas/io/tests/test_json/test_ujson.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
def _skip_if_python_ver(skip_major, skip_minor=None):
major, minor = sys.version_info[:2]
if major == skip_major and (skip_minor is None or minor == skip_minor):
raise nose.SkipTest
raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))

json_unicode = (json.dumps if sys.version_info[0] >= 3
else partial(json.dumps, encoding="utf-8"))
Expand Down Expand Up @@ -363,7 +363,8 @@ def test_nat(self):
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise nose.SkipTest
raise nose.SkipTest("numpy version < 1.7.0, is "
"{0}".format(np.__version__))

input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
Expand Down
Loading