27
27
from numpy .testing import assert_array_equal
28
28
import numpy .ma .mrecords as mrecords
29
29
30
- import pandas as pan
31
30
import pandas .core .nanops as nanops
32
31
import pandas .core .common as com
33
32
import pandas .core .format as fmt
34
33
import pandas .core .datetools as datetools
35
- from pandas .core .api import (DataFrame , Index , Series , notnull , isnull ,
36
- MultiIndex , DatetimeIndex , Timestamp )
37
- from pandas import date_range
34
+ from pandas import (DataFrame , Index , Series , notnull , isnull ,
35
+ MultiIndex , DatetimeIndex , Timestamp , date_range , read_csv )
38
36
import pandas as pd
39
- from pandas .io .parsers import read_csv
40
37
from pandas .parser import CParserError
41
38
from pandas .util .misc import is_little_endian
42
39
@@ -3740,7 +3737,7 @@ def test_to_dict(self):
3740
3737
def test_to_records_dt64 (self ):
3741
3738
df = DataFrame ([["one" , "two" , "three" ],
3742
3739
["four" , "five" , "six" ]],
3743
- index = pan . date_range ("2012-01-01" , "2012-01-02" ))
3740
+ index = date_range ("2012-01-01" , "2012-01-02" ))
3744
3741
self .assertEqual (df .to_records ()['index' ][0 ], df .index [0 ])
3745
3742
3746
3743
rs = df .to_records (convert_datetime64 = False )
@@ -5883,7 +5880,7 @@ def create_cols(name):
5883
5880
#### this is a bug in read_csv right now ####
5884
5881
#df_dt.ix[30:50,1:3] = np.nan
5885
5882
5886
- df = pan .concat ([ df_float , df_int , df_bool , df_object , df_dt ], axis = 1 )
5883
+ df = pd .concat ([ df_float , df_int , df_bool , df_object , df_dt ], axis = 1 )
5887
5884
5888
5885
# dtype
5889
5886
dtypes = dict ()
@@ -5893,7 +5890,7 @@ def create_cols(name):
5893
5890
5894
5891
with ensure_clean () as filename :
5895
5892
df .to_csv (filename )
5896
- rs = pan . read_csv (filename , index_col = 0 , dtype = dtypes , parse_dates = create_cols ('date' ))
5893
+ rs = read_csv (filename , index_col = 0 , dtype = dtypes , parse_dates = create_cols ('date' ))
5897
5894
assert_frame_equal (rs , df )
5898
5895
5899
5896
def test_to_csv_dups_cols (self ):
@@ -5911,7 +5908,7 @@ def test_to_csv_dups_cols(self):
5911
5908
df_bool = DataFrame (True ,index = df_float .index ,columns = lrange (3 ))
5912
5909
df_object = DataFrame ('foo' ,index = df_float .index ,columns = lrange (3 ))
5913
5910
df_dt = DataFrame (Timestamp ('20010101' ),index = df_float .index ,columns = lrange (3 ))
5914
- df = pan .concat ([ df_float , df_int , df_bool , df_object , df_dt ], axis = 1 , ignore_index = True )
5911
+ df = pd .concat ([ df_float , df_int , df_bool , df_object , df_dt ], axis = 1 , ignore_index = True )
5915
5912
5916
5913
cols = []
5917
5914
for i in range (5 ):
@@ -5955,7 +5952,7 @@ def test_to_csv_chunking(self):
5955
5952
for chunksize in [10000 ,50000 ,100000 ]:
5956
5953
with ensure_clean () as filename :
5957
5954
aa .to_csv (filename ,chunksize = chunksize )
5958
- rs = pan . read_csv (filename ,index_col = 0 )
5955
+ rs = read_csv (filename ,index_col = 0 )
5959
5956
assert_frame_equal (rs , aa )
5960
5957
5961
5958
def test_to_csv_bug (self ):
@@ -5966,7 +5963,7 @@ def test_to_csv_bug(self):
5966
5963
with ensure_clean () as path :
5967
5964
newdf .to_csv (path )
5968
5965
5969
- recons = pan . read_csv (path , index_col = 0 )
5966
+ recons = read_csv (path , index_col = 0 )
5970
5967
assert_frame_equal (recons , newdf , check_names = False ) # don't check_names as t != 1
5971
5968
5972
5969
def test_to_csv_unicode (self ):
@@ -5975,11 +5972,11 @@ def test_to_csv_unicode(self):
5975
5972
with ensure_clean () as path :
5976
5973
5977
5974
df .to_csv (path , encoding = 'UTF-8' )
5978
- df2 = pan . read_csv (path , index_col = 0 , encoding = 'UTF-8' )
5975
+ df2 = read_csv (path , index_col = 0 , encoding = 'UTF-8' )
5979
5976
assert_frame_equal (df , df2 )
5980
5977
5981
5978
df .to_csv (path , encoding = 'UTF-8' , index = False )
5982
- df2 = pan . read_csv (path , index_col = None , encoding = 'UTF-8' )
5979
+ df2 = read_csv (path , index_col = None , encoding = 'UTF-8' )
5983
5980
assert_frame_equal (df , df2 )
5984
5981
5985
5982
def test_to_csv_unicode_index_col (self ):
@@ -5993,14 +5990,14 @@ def test_to_csv_unicode_index_col(self):
5993
5990
df .to_csv (buf , encoding = 'UTF-8' )
5994
5991
buf .seek (0 )
5995
5992
5996
- df2 = pan . read_csv (buf , index_col = 0 , encoding = 'UTF-8' )
5993
+ df2 = read_csv (buf , index_col = 0 , encoding = 'UTF-8' )
5997
5994
assert_frame_equal (df , df2 )
5998
5995
5999
5996
def test_to_csv_stringio (self ):
6000
5997
buf = StringIO ()
6001
5998
self .frame .to_csv (buf )
6002
5999
buf .seek (0 )
6003
- recons = pan . read_csv (buf , index_col = 0 )
6000
+ recons = read_csv (buf , index_col = 0 )
6004
6001
assert_frame_equal (recons , self .frame , check_names = False ) # TODO to_csv drops column name
6005
6002
6006
6003
def test_to_csv_float_format (self ):
@@ -6013,7 +6010,7 @@ def test_to_csv_float_format(self):
6013
6010
6014
6011
df .to_csv (filename , float_format = '%.2f' )
6015
6012
6016
- rs = pan . read_csv (filename , index_col = 0 )
6013
+ rs = read_csv (filename , index_col = 0 )
6017
6014
xp = DataFrame ([[0.12 , 0.23 , 0.57 ],
6018
6015
[12.32 , 123123.20 , 321321.20 ]],
6019
6016
index = ['A' , 'B' ], columns = ['X' , 'Y' , 'Z' ])
@@ -6359,7 +6356,7 @@ def test_asfreq_datetimeindex(self):
6359
6356
tm .assert_isinstance (ts .index , DatetimeIndex )
6360
6357
6361
6358
def test_at_time_between_time_datetimeindex (self ):
6362
- index = pan . date_range ("2012-01-01" , "2012-01-05" , freq = '30min' )
6359
+ index = date_range ("2012-01-01" , "2012-01-05" , freq = '30min' )
6363
6360
df = DataFrame (randn (len (index ), 5 ), index = index )
6364
6361
akey = time (12 , 0 , 0 )
6365
6362
bkey = slice (time (13 , 0 , 0 ), time (14 , 0 , 0 ))
@@ -8009,12 +8006,11 @@ def test_replace_with_dict_with_bool_keys(self):
8009
8006
df .replace ({'asdf' : 'asdb' , True : 'yes' })
8010
8007
8011
8008
def test_combine_multiple_frames_dtypes (self ):
8012
- from pandas import concat
8013
8009
8014
8010
# GH 2759
8015
8011
A = DataFrame (data = np .ones ((10 , 2 )), columns = ['foo' , 'bar' ], dtype = np .float64 )
8016
8012
B = DataFrame (data = np .ones ((10 , 2 )), dtype = np .float32 )
8017
- results = concat ((A , B ), axis = 1 ).get_dtype_counts ()
8013
+ results = pd . concat ((A , B ), axis = 1 ).get_dtype_counts ()
8018
8014
expected = Series (dict ( float64 = 2 , float32 = 2 ))
8019
8015
assert_series_equal (results ,expected )
8020
8016
@@ -8994,6 +8990,14 @@ def test_shift(self):
8994
8990
assertRaisesRegexp (ValueError , 'does not match PeriodIndex freq' ,
8995
8991
ps .shift , freq = 'D' )
8996
8992
8993
+
8994
+ # shift other axis
8995
+ # GH 6371
8996
+ df = DataFrame (np .random .rand (10 ,5 ))
8997
+ expected = pd .concat ([DataFrame (np .nan ,index = df .index ,columns = [0 ]),df .iloc [:,0 :- 1 ]],ignore_index = True ,axis = 1 )
8998
+ result = df .shift (1 ,axis = 1 )
8999
+ assert_frame_equal (result ,expected )
9000
+
8997
9001
def test_shift_bool (self ):
8998
9002
df = DataFrame ({'high' : [True , False ],
8999
9003
'low' : [False , False ]})
@@ -11339,7 +11343,7 @@ def test_columns_with_dups(self):
11339
11343
df_bool = DataFrame (True ,index = df_float .index ,columns = df_float .columns )
11340
11344
df_object = DataFrame ('foo' ,index = df_float .index ,columns = df_float .columns )
11341
11345
df_dt = DataFrame (Timestamp ('20010101' ),index = df_float .index ,columns = df_float .columns )
11342
- df = pan .concat ([ df_float , df_int , df_bool , df_object , df_dt ], axis = 1 )
11346
+ df = pd .concat ([ df_float , df_int , df_bool , df_object , df_dt ], axis = 1 )
11343
11347
11344
11348
result = df ._data ._set_ref_locs ()
11345
11349
self .assertEqual (len (result ), len (df .columns ))
0 commit comments