Skip to content

Commit 93f53e5

Browse files
committed
1 parent 3d01e28 commit 93f53e5

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

disstans/timeseries.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -1211,11 +1211,11 @@ def __init__(self,
12111211
var_cols = ["east_var", "north_var", "up_var"]
12121212
cov_cols = ["east_north_cov", "east_up_cov", "north_up_cov"]
12131213
all_cols = data_cols + var_cols + cov_cols
1214-
time = pd.read_csv(self._path, delim_whitespace=True, header=None,
1214+
time = pd.read_csv(self._path, delimiter=r"\s+", header=None,
12151215
usecols=[11, 12, 13, 14, 15, 16],
12161216
names=["year", "month", "day", "hour", "minute", "second"])
12171217
time = pd.to_datetime(time).to_frame(name="time")
1218-
data = pd.read_csv(self._path, delim_whitespace=True, header=None,
1218+
data = pd.read_csv(self._path, delimiter=r"\s+", header=None,
12191219
usecols=[1, 2, 3, 4, 5, 6, 7, 8, 9],
12201220
names=all_cols)
12211221
# compute covariance from correlation, still in meters
@@ -1347,7 +1347,7 @@ def __init__(self,
13471347
else:
13481348
raise ValueError(f"'data_unit' needs to be 'mm' or 'm', got {data_unit}.")
13491349
# load data and check for some warnings
1350-
df = pd.read_csv(self._path, delim_whitespace=True,
1350+
df = pd.read_csv(self._path, delimiter=r"\s+",
13511351
usecols=[0, 3] + list(range(6, 13)) + list(range(14, 20)))
13521352
if show_warnings and len(df['site'].unique()) > 1:
13531353
warn(f"Timeseries file {self._path} contains multiple site codes: "
@@ -1541,18 +1541,18 @@ def __init__(self,
15411541
).with_traceback(e.__traceback__) from e
15421542
# load data into pandas
15431543
f.seek(0)
1544-
df = pd.read_csv(f, delim_whitespace=True,
1544+
df = pd.read_csv(f, delimiter=r"\s+",
15451545
names=["site", "sec-J2000", "___e-ref(m)", "___n-ref(m)",
15461546
"___v-ref(m)", "sig_e(m)", "sig_n(m)", "sig_v(m)"],
15471547
usecols=[0, 1] + list(range(8, 11)) + list(range(14, 17)))
15481548
# if the path is a .kenv.gz file, we only need to extract the single file
15491549
elif pathobj.match("*.kenv.gz"):
15501550
with gzip.open(self._path, mode="r") as f:
1551-
df = pd.read_csv(f, delim_whitespace=True,
1551+
df = pd.read_csv(f, delimiter=r"\s+",
15521552
usecols=[0, 1] + list(range(8, 11)) + list(range(14, 17)))
15531553
# in all other cases, try loading directly
15541554
else:
1555-
df = pd.read_csv(self._path, delim_whitespace=True,
1555+
df = pd.read_csv(self._path, delimiter=r"\s+",
15561556
usecols=[0, 1] + list(range(8, 11)) + list(range(14, 17)))
15571557
# check for duplicate sites
15581558
if show_warnings and len(df['site'].unique()) > 1:

disstans/tools.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -998,7 +998,7 @@ def get_sta_url(sta, year, doy, date):
998998
except error.HTTPError as e:
999999
raise RuntimeError("Failed to download the station list from "
10001000
f"{station_list_url}.").with_traceback(e.__traceback__) from e
1001-
stations = pd.read_csv(station_list_path, delim_whitespace=True, usecols=list(range(11)),
1001+
stations = pd.read_csv(station_list_path, delimiter=r"\s+", usecols=list(range(11)),
10021002
parse_dates=[7, 8, 9])
10031003
# subset according to station_list_or_bbox
10041004
if all([isinstance(site, str) for site in station_list_or_bbox]):
@@ -1261,7 +1261,7 @@ def parse_unr_steps(filepath: str,
12611261
# load the file
12621262
col_names = ["station", "time", "code", "type", "distance", "magnitude", "usgsid"]
12631263
# (for earthquake events, the "type" column is actually the "threshold" column)
1264-
raw = pd.read_csv(filepath, names=col_names, delim_whitespace=True)
1264+
raw = pd.read_csv(filepath, names=col_names, delimiter=r"\s+")
12651265
# we now have a locale-dependent time column in the non-standard format yymmmdd
12661266
# (%y%b%d in strptime language) which we need to convert in a hard-coded way, because we
12671267
# shouldn't change the locale temporarily as it affects the entire system

0 commit comments

Comments
 (0)