From 80fc82eb56d8bf789f908dc1d45b83e33b5bd456 Mon Sep 17 00:00:00 2001 From: DSM Date: Mon, 16 Jun 2014 22:20:38 -0400 Subject: [PATCH] Fix indentation: switch to four spaces. (GH6643) --- doc/make.py | 8 +- pandas/computation/tests/test_eval.py | 2 +- pandas/core/format.py | 24 +- pandas/core/generic.py | 8 +- pandas/core/groupby.py | 6 +- pandas/core/indexing.py | 16 +- pandas/core/internals.py | 4 +- pandas/core/series.py | 2 +- pandas/io/excel.py | 8 +- pandas/tests/test_common.py | 2 +- pandas/tests/test_frame.py | 362 +++++++++++++------------- pandas/tests/test_generic.py | 4 +- pandas/tseries/converter.py | 2 +- pandas/util/testing.py | 4 +- 14 files changed, 226 insertions(+), 226 deletions(-) diff --git a/doc/make.py b/doc/make.py index 8a92654d1378b..4367ac91396bb 100755 --- a/doc/make.py +++ b/doc/make.py @@ -77,10 +77,10 @@ def upload_prev(ver, doc_root='./'): raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root)) def build_pandas(): - os.chdir('..') - os.system('python setup.py clean') - os.system('python setup.py build_ext --inplace') - os.chdir('doc') + os.chdir('..') + os.system('python setup.py clean') + os.system('python setup.py build_ext --inplace') + os.chdir('doc') def build_prev(ver): if os.system('git checkout v%s' % ver) != 1: diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py index e22b6218a2227..5489893df06b9 100644 --- a/pandas/computation/tests/test_eval.py +++ b/pandas/computation/tests/test_eval.py @@ -819,7 +819,7 @@ def testit(r_idx_type, c_idx_type, index_name): with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) for r_idx_type, c_idx_type, index_name in args: - testit(r_idx_type, c_idx_type, index_name) + testit(r_idx_type, c_idx_type, index_name) def test_basic_frame_series_alignment(self): for engine, parser in ENGINES_PARSERS: diff --git a/pandas/core/format.py b/pandas/core/format.py index c2f439877ca00..b11b2e7270271 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -539,18 +539,18 @@ def write(buf, frame, column_format, strcols, longtable=False): buf.write('\\bottomrule\n') buf.write('\\endlastfoot\n') if self.escape: - crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first - .replace('_', '\\_') - .replace('%', '\\%') - .replace('$', '\\$') - .replace('#', '\\#') - .replace('{', '\\{') - .replace('}', '\\}') - .replace('~', '\\textasciitilde') - .replace('^', '\\textasciicircum') - .replace('&', '\\&') if x else '{}') for x in row] + crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first + .replace('_', '\\_') + .replace('%', '\\%') + .replace('$', '\\$') + .replace('#', '\\#') + .replace('{', '\\{') + .replace('}', '\\}') + .replace('~', '\\textasciitilde') + .replace('^', '\\textasciicircum') + .replace('&', '\\&') if x else '{}') for x in row] else: - crow = [x if x else '{}' for x in row] + crow = [x if x else '{}' for x in row] buf.write(' & '.join(crow)) buf.write(' \\\\\n') @@ -2104,7 +2104,7 @@ def detect_console_encoding(): # when all else fails. this will usually be "ascii" if not encoding or 'ascii' in encoding.lower(): - encoding = sys.getdefaultencoding() + encoding = sys.getdefaultencoding() # GH3360, save the reported defencoding at import time # MPL backends may change it. Make available for debugging. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f486d48b58651..cf9ff8abff3ef 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3598,10 +3598,10 @@ def pretty_name(x): return '%.1f%%' % x def describe_numeric_1d(series, percentiles): - return ([series.count(), series.mean(), series.std(), - series.min()] + - [series.quantile(x) for x in percentiles] + - [series.max()]) + return ([series.count(), series.mean(), series.std(), + series.min()] + + [series.quantile(x) for x in percentiles] + + [series.max()]) def describe_categorical_1d(data): names = ['count', 'unique'] diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index dc8b7f3bccc2a..4d3927428cef2 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2218,9 +2218,9 @@ def transform(self, func, *args, **kwargs): # may need to astype try: - common_type = np.common_type(np.array(res), result) - if common_type != result.dtype: - result = result.astype(common_type) + common_type = np.common_type(np.array(res), result) + if common_type != result.dtype: + result = result.astype(common_type) except: pass diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c4550a18492cb..bfff85ac4712c 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1131,13 +1131,13 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False): raise NotImplementedError() def _getbool_axis(self, key, axis=0): - labels = self.obj._get_axis(axis) - key = _check_bool_indexer(labels, key) - inds, = key.nonzero() - try: - return self.obj.take(inds, axis=axis, convert=False) - except Exception as detail: - raise self._exception(detail) + labels = self.obj._get_axis(axis) + key = _check_bool_indexer(labels, key) + inds, = key.nonzero() + try: + return self.obj.take(inds, axis=axis, convert=False) + except Exception as detail: + raise self._exception(detail) def _get_slice_axis(self, slice_obj, axis=0): """ this is pretty simple as we just have to deal with labels """ @@ -1193,7 +1193,7 @@ def _has_valid_type(self, key, axis): ) elif com._is_bool_indexer(key): - return True + return True elif _is_list_like(key): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 105c0c3985cc1..75ec53c95869a 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -824,7 +824,7 @@ def eval(self, func, other, raise_on_error=True, try_cast=False): is_transposed = False if hasattr(other, 'ndim') and hasattr(values, 'ndim'): if values.ndim != other.ndim: - is_transposed = True + is_transposed = True else: if values.shape == other.shape[::-1]: is_transposed = True @@ -2981,7 +2981,7 @@ def _is_indexed_like(self, other): def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): - return False + return False if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): return False self._consolidate_inplace() diff --git a/pandas/core/series.py b/pandas/core/series.py index b66b74a011c4d..bdad1f9e5561b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -672,7 +672,7 @@ def _set_with(self, key, value): else: return self._set_values(key, value) elif key_type == 'boolean': - self._set_values(key.astype(np.bool_), value) + self._set_values(key.astype(np.bool_), value) else: self._set_labels(key, value) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 6372d83f50051..67107ee20b336 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -291,10 +291,10 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0, year = (value.timetuple())[0:3] if ((not epoch1904 and year == (1899, 12, 31)) or (epoch1904 and year == (1904, 1, 1))): - value = datetime.time(value.hour, - value.minute, - value.second, - value.microsecond) + value = datetime.time(value.hour, + value.minute, + value.second, + value.microsecond) else: # Use the xlrd <= 0.9.2 date handling. dt = xldate.xldate_as_tuple(value, epoch1904) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 58338a47d9465..a52be0ee6a82e 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -75,7 +75,7 @@ def test_isnull(): # series for s in [tm.makeFloatSeries(),tm.makeStringSeries(), tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]: - assert(isinstance(isnull(s), Series)) + assert(isinstance(isnull(s), Series)) # frame for df in [tm.makeTimeDataFrame(),tm.makePeriodFrame(),tm.makeMixedDataFrame()]: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4ebf3dd99a105..ea3dafa07715b 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5563,63 +5563,63 @@ def test_to_csv_from_csv(self): pname = '__tmp_to_csv_from_csv__' with ensure_clean(pname) as path: - self.frame['A'][:5] = nan - - self.frame.to_csv(path) - self.frame.to_csv(path, columns=['A', 'B']) - self.frame.to_csv(path, header=False) - self.frame.to_csv(path, index=False) - - # test roundtrip - self.tsframe.to_csv(path) - recons = DataFrame.from_csv(path) - - assert_frame_equal(self.tsframe, recons) - - self.tsframe.to_csv(path, index_label='index') - recons = DataFrame.from_csv(path, index_col=None) - assert(len(recons.columns) == len(self.tsframe.columns) + 1) - - # no index - self.tsframe.to_csv(path, index=False) - recons = DataFrame.from_csv(path, index_col=None) - assert_almost_equal(self.tsframe.values, recons.values) - - # corner case - dm = DataFrame({'s1': Series(lrange(3), lrange(3)), - 's2': Series(lrange(2), lrange(2))}) - dm.to_csv(path) - recons = DataFrame.from_csv(path) - assert_frame_equal(dm, recons) + self.frame['A'][:5] = nan + + self.frame.to_csv(path) + self.frame.to_csv(path, columns=['A', 'B']) + self.frame.to_csv(path, header=False) + self.frame.to_csv(path, index=False) + + # test roundtrip + self.tsframe.to_csv(path) + recons = DataFrame.from_csv(path) + + assert_frame_equal(self.tsframe, recons) + + self.tsframe.to_csv(path, index_label='index') + recons = DataFrame.from_csv(path, index_col=None) + assert(len(recons.columns) == len(self.tsframe.columns) + 1) + + # no index + self.tsframe.to_csv(path, index=False) + recons = DataFrame.from_csv(path, index_col=None) + assert_almost_equal(self.tsframe.values, recons.values) + + # corner case + dm = DataFrame({'s1': Series(lrange(3), lrange(3)), + 's2': Series(lrange(2), lrange(2))}) + dm.to_csv(path) + recons = DataFrame.from_csv(path) + assert_frame_equal(dm, recons) with ensure_clean(pname) as path: - # duplicate index - df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'], - columns=['x', 'y', 'z']) - df.to_csv(path) - result = DataFrame.from_csv(path) - assert_frame_equal(result, df) - - midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)]) - df = DataFrame(np.random.randn(3, 3), index=midx, - columns=['x', 'y', 'z']) - df.to_csv(path) - result = DataFrame.from_csv(path, index_col=[0, 1, 2], - parse_dates=False) - assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ? - - # column aliases - col_aliases = Index(['AA', 'X', 'Y', 'Z']) - self.frame2.to_csv(path, header=col_aliases) - rs = DataFrame.from_csv(path) - xp = self.frame2.copy() - xp.columns = col_aliases - - assert_frame_equal(xp, rs) - - self.assertRaises(ValueError, self.frame2.to_csv, path, - header=['AA', 'X']) + # duplicate index + df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'], + columns=['x', 'y', 'z']) + df.to_csv(path) + result = DataFrame.from_csv(path) + assert_frame_equal(result, df) + + midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)]) + df = DataFrame(np.random.randn(3, 3), index=midx, + columns=['x', 'y', 'z']) + df.to_csv(path) + result = DataFrame.from_csv(path, index_col=[0, 1, 2], + parse_dates=False) + assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ? + + # column aliases + col_aliases = Index(['AA', 'X', 'Y', 'Z']) + self.frame2.to_csv(path, header=col_aliases) + rs = DataFrame.from_csv(path) + xp = self.frame2.copy() + xp.columns = col_aliases + + assert_frame_equal(xp, rs) + + self.assertRaises(ValueError, self.frame2.to_csv, path, + header=['AA', 'X']) with ensure_clean(pname) as path: import pandas as pd @@ -5682,12 +5682,12 @@ def _check_df(df,cols=None): rs_c.columns = df.columns.take(indexer) for c in cols: - obj_df = df[c] - obj_rs = rs_c[c] - if isinstance(obj_df,Series): - assert_series_equal(obj_df,obj_rs) - else: - assert_frame_equal(obj_df,obj_rs,check_names=False) + obj_df = df[c] + obj_rs = rs_c[c] + if isinstance(obj_df,Series): + assert_series_equal(obj_df,obj_rs) + else: + assert_frame_equal(obj_df,obj_rs,check_names=False) # wrote in the same order else: @@ -5713,80 +5713,80 @@ def test_to_csv_moar(self): def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None, dupe_col=False): - kwargs = dict(parse_dates=False) - if cnlvl: - if rnlvl is not None: - kwargs['index_col'] = lrange(rnlvl) - kwargs['header'] = lrange(cnlvl) - with ensure_clean(path) as path: - df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False) - recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs) - else: - kwargs['header'] = 0 - with ensure_clean(path) as path: - df.to_csv(path,encoding='utf8',chunksize=chunksize) - recons = DataFrame.from_csv(path,**kwargs) - - def _to_uni(x): - if not isinstance(x, compat.text_type): - return x.decode('utf8') - return x - if dupe_col: - # read_Csv disambiguates the columns by - # labeling them dupe.1,dupe.2, etc'. monkey patch columns - recons.columns = df.columns - if rnlvl and not cnlvl: - delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)] - ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl) - recons.index = ix - recons = recons.iloc[:,rnlvl-1:] - - type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O') - if r_dtype: - if r_dtype == 'u': # unicode - r_dtype='O' - recons.index = np.array(lmap(_to_uni,recons.index), - dtype=r_dtype) - df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype) - if r_dtype == 'dt': # unicode - r_dtype='O' - recons.index = np.array(lmap(Timestamp,recons.index), - dtype=r_dtype) - df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype) - elif r_dtype == 'p': - r_dtype='O' - recons.index = np.array(list(map(Timestamp, - recons.index.to_datetime())), - dtype=r_dtype) - df.index = np.array(list(map(Timestamp, - df.index.to_datetime())), + kwargs = dict(parse_dates=False) + if cnlvl: + if rnlvl is not None: + kwargs['index_col'] = lrange(rnlvl) + kwargs['header'] = lrange(cnlvl) + with ensure_clean(path) as path: + df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False) + recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs) + else: + kwargs['header'] = 0 + with ensure_clean(path) as path: + df.to_csv(path,encoding='utf8',chunksize=chunksize) + recons = DataFrame.from_csv(path,**kwargs) + + def _to_uni(x): + if not isinstance(x, compat.text_type): + return x.decode('utf8') + return x + if dupe_col: + # read_Csv disambiguates the columns by + # labeling them dupe.1,dupe.2, etc'. monkey patch columns + recons.columns = df.columns + if rnlvl and not cnlvl: + delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)] + ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl) + recons.index = ix + recons = recons.iloc[:,rnlvl-1:] + + type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O') + if r_dtype: + if r_dtype == 'u': # unicode + r_dtype='O' + recons.index = np.array(lmap(_to_uni,recons.index), dtype=r_dtype) - else: - r_dtype= type_map.get(r_dtype) - recons.index = np.array(recons.index,dtype=r_dtype ) - df.index = np.array(df.index,dtype=r_dtype ) - if c_dtype: - if c_dtype == 'u': - c_dtype='O' - recons.columns = np.array(lmap(_to_uni,recons.columns), - dtype=c_dtype) - df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype ) - elif c_dtype == 'dt': - c_dtype='O' - recons.columns = np.array(lmap(Timestamp,recons.columns), + df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype) + if r_dtype == 'dt': # unicode + r_dtype='O' + recons.index = np.array(lmap(Timestamp,recons.index), + dtype=r_dtype) + df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype) + elif r_dtype == 'p': + r_dtype='O' + recons.index = np.array(list(map(Timestamp, + recons.index.to_datetime())), + dtype=r_dtype) + df.index = np.array(list(map(Timestamp, + df.index.to_datetime())), + dtype=r_dtype) + else: + r_dtype= type_map.get(r_dtype) + recons.index = np.array(recons.index,dtype=r_dtype ) + df.index = np.array(df.index,dtype=r_dtype ) + if c_dtype: + if c_dtype == 'u': + c_dtype='O' + recons.columns = np.array(lmap(_to_uni,recons.columns), + dtype=c_dtype) + df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype ) + elif c_dtype == 'dt': + c_dtype='O' + recons.columns = np.array(lmap(Timestamp,recons.columns), dtype=c_dtype ) - df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype) - elif c_dtype == 'p': - c_dtype='O' - recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()), - dtype=c_dtype) - df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype ) - else: - c_dtype= type_map.get(c_dtype) - recons.columns = np.array(recons.columns,dtype=c_dtype ) - df.columns = np.array(df.columns,dtype=c_dtype ) + df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype) + elif c_dtype == 'p': + c_dtype='O' + recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()), + dtype=c_dtype) + df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype ) + else: + c_dtype= type_map.get(c_dtype) + recons.columns = np.array(recons.columns,dtype=c_dtype ) + df.columns = np.array(df.columns,dtype=c_dtype ) - assert_frame_equal(df,recons,check_names=False,check_less_precise=True) + assert_frame_equal(df,recons,check_names=False,check_less_precise=True) N = 100 chunksize=1000 @@ -5794,16 +5794,16 @@ def _to_uni(x): # GH3437 from pandas import NaT def make_dtnat_arr(n,nnat=None): - if nnat is None: - nnat= int(n*0.1) # 10% - s=list(date_range('2000',freq='5min',periods=n)) - if nnat: - for i in np.random.randint(0,len(s),nnat): - s[i] = NaT - i = np.random.randint(100) - s[-i] = NaT - s[i] = NaT - return s + if nnat is None: + nnat= int(n*0.1) # 10% + s=list(date_range('2000',freq='5min',periods=n)) + if nnat: + for i in np.random.randint(0,len(s),nnat): + s[i] = NaT + i = np.random.randint(100) + s[-i] = NaT + s[i] = NaT + return s # N=35000 s1=make_dtnat_arr(chunksize+5) @@ -5879,11 +5879,11 @@ def test_to_csv_from_csv_w_some_infs(self): self.frame['H'] = self.frame.index.map(f) with ensure_clean() as path: - self.frame.to_csv(path) - recons = DataFrame.from_csv(path) - - assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name - assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False) + self.frame.to_csv(path) + recons = DataFrame.from_csv(path) + + assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name + assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False) def test_to_csv_from_csv_w_all_infs(self): @@ -5938,37 +5938,37 @@ def test_to_csv_multiindex(self): with ensure_clean(pname) as path: - frame.to_csv(path, header=False) - frame.to_csv(path, columns=['A', 'B']) - - # round trip - frame.to_csv(path) - df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False) - - assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name - self.assertEqual(frame.index.names, df.index.names) - self.frame.index = old_index # needed if setUP becomes a classmethod - - # try multiindex with dates - tsframe = self.tsframe - old_index = tsframe.index - new_index = [old_index, np.arange(len(old_index))] - tsframe.index = MultiIndex.from_arrays(new_index) - - tsframe.to_csv(path, index_label=['time', 'foo']) - recons = DataFrame.from_csv(path, index_col=[0, 1]) - assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name - - # do not load index - tsframe.to_csv(path) - recons = DataFrame.from_csv(path, index_col=None) - np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2) - - # no index - tsframe.to_csv(path, index=False) - recons = DataFrame.from_csv(path, index_col=None) - assert_almost_equal(recons.values, self.tsframe.values) - self.tsframe.index = old_index # needed if setUP becomes classmethod + frame.to_csv(path, header=False) + frame.to_csv(path, columns=['A', 'B']) + + # round trip + frame.to_csv(path) + df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False) + + assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name + self.assertEqual(frame.index.names, df.index.names) + self.frame.index = old_index # needed if setUP becomes a classmethod + + # try multiindex with dates + tsframe = self.tsframe + old_index = tsframe.index + new_index = [old_index, np.arange(len(old_index))] + tsframe.index = MultiIndex.from_arrays(new_index) + + tsframe.to_csv(path, index_label=['time', 'foo']) + recons = DataFrame.from_csv(path, index_col=[0, 1]) + assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name + + # do not load index + tsframe.to_csv(path) + recons = DataFrame.from_csv(path, index_col=None) + np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2) + + # no index + tsframe.to_csv(path, index=False) + recons = DataFrame.from_csv(path, index_col=None) + assert_almost_equal(recons.values, self.tsframe.values) + self.tsframe.index = old_index # needed if setUP becomes classmethod with ensure_clean(pname) as path: # GH3571, GH1651, GH3141 @@ -6077,8 +6077,8 @@ def test_to_csv_float32_nanrep(self): df.to_csv(path, na_rep=999) with open(path) as f: - lines = f.readlines() - self.assertEqual(lines[1].split(',')[2], '999') + lines = f.readlines() + self.assertEqual(lines[1].split(',')[2], '999') def test_to_csv_withcommas(self): @@ -6149,7 +6149,7 @@ def test_to_csv_dups_cols(self): # date cols for i in ['0.4','1.4','2.4']: - result[i] = to_datetime(result[i]) + result[i] = to_datetime(result[i]) result.columns = df.columns assert_frame_equal(result,df) @@ -12079,7 +12079,7 @@ def test_columns_with_dups(self): # testing iget for i in range(len(df.columns)): - df.iloc[:,i] + df.iloc[:,i] # dup columns across dtype GH 2079/2194 vals = [[1, -1, 2.], [2, -2, 3.]] diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 5f07acf25582f..3cf4cb8bc5809 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -413,8 +413,8 @@ def test_nonzero_single_element(self): # single non-bool are an error for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]: - self.assertRaises(ValueError, lambda : bool(s)) - self.assertRaises(ValueError, lambda : s.bool()) + self.assertRaises(ValueError, lambda : bool(s)) + self.assertRaises(ValueError, lambda : s.bool()) def test_metadata_propagation_indiv(self): # check that the metadata matches up on the resulting ops diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index b9939976fded8..80ac97ee60617 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -513,7 +513,7 @@ def _daily_finder(vmin, vmax, freq): def first_label(label_flags): if (label_flags[0] == 0) and (label_flags.size > 1) and \ ((vmin_orig % 1) > 0.0): - return label_flags[1] + return label_flags[1] else: return label_flags[0] diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 380116fc5aab5..15f11954fd022 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -94,7 +94,7 @@ def assert_numpy_array_equal(self, np_array, assert_equal): If the expected array includes `np.nan` use `assert_numpy_array_equivalent(...)`. """ if np.array_equal(np_array, assert_equal): - return + return raise AssertionError('{0} is not equal to {1}.'.format(np_array, assert_equal)) def assert_numpy_array_equivalent(self, np_array, assert_equal): @@ -108,7 +108,7 @@ def assert_numpy_array_equivalent(self, np_array, assert_equal): function. """ if array_equivalent(np_array, assert_equal): - return + return raise AssertionError('{0} is not equivalent to {1}.'.format(np_array, assert_equal)) def assertIs(self, first, second, msg=''):