@@ -1211,11 +1211,11 @@ def __init__(self,
1211
1211
var_cols = ["east_var" , "north_var" , "up_var" ]
1212
1212
cov_cols = ["east_north_cov" , "east_up_cov" , "north_up_cov" ]
1213
1213
all_cols = data_cols + var_cols + cov_cols
1214
- time = pd .read_csv (self ._path , delim_whitespace = True , header = None ,
1214
+ time = pd .read_csv (self ._path , delimiter = r"\s+" , header = None ,
1215
1215
usecols = [11 , 12 , 13 , 14 , 15 , 16 ],
1216
1216
names = ["year" , "month" , "day" , "hour" , "minute" , "second" ])
1217
1217
time = pd .to_datetime (time ).to_frame (name = "time" )
1218
- data = pd .read_csv (self ._path , delim_whitespace = True , header = None ,
1218
+ data = pd .read_csv (self ._path , delimiter = r"\s+" , header = None ,
1219
1219
usecols = [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ],
1220
1220
names = all_cols )
1221
1221
# compute covariance from correlation, still in meters
@@ -1347,7 +1347,7 @@ def __init__(self,
1347
1347
else :
1348
1348
raise ValueError (f"'data_unit' needs to be 'mm' or 'm', got { data_unit } ." )
1349
1349
# load data and check for some warnings
1350
- df = pd .read_csv (self ._path , delim_whitespace = True ,
1350
+ df = pd .read_csv (self ._path , delimiter = r"\s+" ,
1351
1351
usecols = [0 , 3 ] + list (range (6 , 13 )) + list (range (14 , 20 )))
1352
1352
if show_warnings and len (df ['site' ].unique ()) > 1 :
1353
1353
warn (f"Timeseries file { self ._path } contains multiple site codes: "
@@ -1541,18 +1541,18 @@ def __init__(self,
1541
1541
).with_traceback (e .__traceback__ ) from e
1542
1542
# load data into pandas
1543
1543
f .seek (0 )
1544
- df = pd .read_csv (f , delim_whitespace = True ,
1544
+ df = pd .read_csv (f , delimiter = r"\s+" ,
1545
1545
names = ["site" , "sec-J2000" , "___e-ref(m)" , "___n-ref(m)" ,
1546
1546
"___v-ref(m)" , "sig_e(m)" , "sig_n(m)" , "sig_v(m)" ],
1547
1547
usecols = [0 , 1 ] + list (range (8 , 11 )) + list (range (14 , 17 )))
1548
1548
# if the path is a .kenv.gz file, we only need to extract the single file
1549
1549
elif pathobj .match ("*.kenv.gz" ):
1550
1550
with gzip .open (self ._path , mode = "r" ) as f :
1551
- df = pd .read_csv (f , delim_whitespace = True ,
1551
+ df = pd .read_csv (f , delimiter = r"\s+" ,
1552
1552
usecols = [0 , 1 ] + list (range (8 , 11 )) + list (range (14 , 17 )))
1553
1553
# in all other cases, try loading directly
1554
1554
else :
1555
- df = pd .read_csv (self ._path , delim_whitespace = True ,
1555
+ df = pd .read_csv (self ._path , delimiter = r"\s+" ,
1556
1556
usecols = [0 , 1 ] + list (range (8 , 11 )) + list (range (14 , 17 )))
1557
1557
# check for duplicate sites
1558
1558
if show_warnings and len (df ['site' ].unique ()) > 1 :
0 commit comments