Skip to content

Commit ffa4d38

Browse files
committed
Update time offset for more recent pandas
Update time offsets to reflect changes in more recent versions of pandas (pandas-dev/pandas#56346).
1 parent 46a92b9 commit ffa4d38

File tree

2 files changed

+12
-12
lines changed

2 files changed

+12
-12
lines changed

intense/gauge.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def write(self, directory: str):
150150

151151
def monthly_max(self) -> pd.Series:
152152
"""Returns the monthly maximum value from masked"""
153-
return self.masked.groupby(pd.TimeGrouper('M')).max()
153+
return self.masked.groupby(pd.TimeGrouper('ME')).max()
154154

155155

156156
def read_intense(path_or_stream: Union[str, IO], only_metadata: bool = False) -> Gauge:
@@ -213,14 +213,14 @@ def read_intense(path_or_stream: Union[str, IO], only_metadata: bool = False) ->
213213
data = pd.Series(data,
214214
pd.date_range(start=datetime.strptime(metadata['start datetime'], '%Y%m%d%H'),
215215
end=datetime.strptime(metadata['end datetime'], '%Y%m%d%H'),
216-
freq=metadata['new timestep'][:-2] + 'H'),
216+
freq=metadata['new timestep'][:-2] + 'h'),
217217
dtype=float)
218218
except: # Modification adds extra hour at end of series to accomodate nan value
219219
# Drop nan alternative: (keeps all series same length)
220220
data = pd.Series(data[:-1],
221221
pd.date_range(start=datetime.strptime(metadata['start datetime'], '%Y%m%d%H'),
222222
end=datetime.strptime(metadata['end datetime'], '%Y%m%d%H'),
223-
freq=metadata['new timestep'][:-2] + 'H'),
223+
freq=metadata['new timestep'][:-2] + 'h'),
224224
dtype=float)
225225

226226
gauge = Gauge(station_id=metadata['station id'],

intense/qc.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -238,8 +238,8 @@ def check_percentiles(self) -> Tuple[List[int], List[int]]:
238238
Returns:
239239
Years where 95th and 99th percentiles are zero
240240
"""
241-
perc95 = self.gauge.data.groupby(pd.Grouper(freq='A')).quantile(.95)
242-
perc99 = self.gauge.data.groupby(pd.Grouper(freq='A')).quantile(.99)
241+
perc95 = self.gauge.data.groupby(pd.Grouper(freq='YE')).quantile(.95)
242+
perc99 = self.gauge.data.groupby(pd.Grouper(freq='YE')).quantile(.99)
243243

244244
return [d.year for d in list(perc95[perc95 == 0].index)], [d.year for d in list(perc99[perc99 == 0].index)]
245245

@@ -249,9 +249,9 @@ def check_k_largest(self) -> Tuple[List[int], List[int], List[int]]:
249249
Returns:
250250
The first, five and ten largest rainfall values
251251
"""
252-
k1 = self.gauge.data.groupby(pd.Grouper(freq='A')).nlargest(n=1).min(level=0)
253-
k5 = self.gauge.data.groupby(pd.Grouper(freq='A')).nlargest(n=5).min(level=0)
254-
k10 = self.gauge.data.groupby(pd.Grouper(freq='A')).nlargest(n=10).min(level=0)
252+
k1 = self.gauge.data.groupby(pd.Grouper(freq='YE')).nlargest(n=1).min(level=0)
253+
k5 = self.gauge.data.groupby(pd.Grouper(freq='YE')).nlargest(n=5).min(level=0)
254+
k10 = self.gauge.data.groupby(pd.Grouper(freq='YE')).nlargest(n=10).min(level=0)
255255

256256
return [d.year for d in list(k1[k1 == 0].index)], \
257257
[d.year for d in list(k5[k5 == 0].index)], \
@@ -468,7 +468,7 @@ def r99ptot_check_annual(self) -> List[int]:
468468
"D").sum() # this changes depending on which version of pandas youre using. o.14 requires how agument,
469469
# later requires .sum
470470

471-
perc99 = daily_ts.groupby(pd.Grouper(freq='A')).quantile(.99)
471+
perc99 = daily_ts.groupby(pd.Grouper(freq='YE')).quantile(.99)
472472
py = list(perc99.index.year)
473473
pv = list(perc99)
474474
p_dict = {}
@@ -478,7 +478,7 @@ def r99ptot_check_annual(self) -> List[int]:
478478
daily_df["year"] = daily_df.index.year
479479
daily_df["p99"] = daily_df.apply(lambda row: p_dict[row.year], axis=1)
480480
daily_df["filtered"] = daily_df.daily.where(daily_df.daily >= daily_df.p99)
481-
perc99_tot = daily_df.groupby(pd.Grouper(freq='A')).sum()
481+
perc99_tot = daily_df.groupby(pd.Grouper(freq='YE')).sum()
482482
tots = list(perc99_tot.filtered)
483483
checks = [utils.day_check(t, p_max, p_max_filled) for t in tots]
484484

@@ -497,7 +497,7 @@ def prcptot_check_annual(self) -> List[int]:
497497
p_max, p_max_filled = utils.get_etccdi_value(self.etcdii_data, 'PRCPTOT', self.gauge.longitude, self.gauge.latitude)
498498

499499
if np.isfinite(p_max) or np.isfinite(p_max_filled):
500-
ann_tots = self.gauge.data.groupby(pd.Grouper(freq='A')).sum()
500+
ann_tots = self.gauge.data.groupby(pd.Grouper(freq='YE')).sum()
501501
tots = list(ann_tots)
502502
checks = [utils.day_check(t, p_max, p_max_filled) for t in tots]
503503
else:
@@ -1362,7 +1362,7 @@ def read_intense_qc(path_or_stream: Union[IO, str], only_metadata: bool = False)
13621362
data = np.array(data)
13631363
data = pd.DataFrame(data, pd.date_range(start=datetime.strptime(metadata['start datetime'], '%Y%m%d%H'),
13641364
end=datetime.strptime(metadata['end datetime'], '%Y%m%d%H'),
1365-
freq=metadata['new timestep'][:-2] + 'H'), dtype=float,
1365+
freq=metadata['new timestep'][:-2] + 'h'), dtype=float,
13661366
columns=["vals", "hourly_neighbours", "hourly_neighbours_dry", "daily_neighbours",
13671367
"daily_neighbours_dry", "monthly_neighbours", "world_record", "Rx1day",
13681368
"CWD", "CDD", "daily_accumualtions", "monthly_accumulations",

0 commit comments

Comments
 (0)