@@ -238,8 +238,8 @@ def check_percentiles(self) -> Tuple[List[int], List[int]]:
238
238
Returns:
239
239
Years where 95th and 99th percentiles are zero
240
240
"""
241
- perc95 = self .gauge .data .groupby (pd .Grouper (freq = 'A ' )).quantile (.95 )
242
- perc99 = self .gauge .data .groupby (pd .Grouper (freq = 'A ' )).quantile (.99 )
241
+ perc95 = self .gauge .data .groupby (pd .Grouper (freq = 'YE ' )).quantile (.95 )
242
+ perc99 = self .gauge .data .groupby (pd .Grouper (freq = 'YE ' )).quantile (.99 )
243
243
244
244
return [d .year for d in list (perc95 [perc95 == 0 ].index )], [d .year for d in list (perc99 [perc99 == 0 ].index )]
245
245
@@ -249,9 +249,9 @@ def check_k_largest(self) -> Tuple[List[int], List[int], List[int]]:
249
249
Returns:
250
250
The first, five and ten largest rainfall values
251
251
"""
252
- k1 = self .gauge .data .groupby (pd .Grouper (freq = 'A ' )).nlargest (n = 1 ).min (level = 0 )
253
- k5 = self .gauge .data .groupby (pd .Grouper (freq = 'A ' )).nlargest (n = 5 ).min (level = 0 )
254
- k10 = self .gauge .data .groupby (pd .Grouper (freq = 'A ' )).nlargest (n = 10 ).min (level = 0 )
252
+ k1 = self .gauge .data .groupby (pd .Grouper (freq = 'YE ' )).nlargest (n = 1 ).min (level = 0 )
253
+ k5 = self .gauge .data .groupby (pd .Grouper (freq = 'YE ' )).nlargest (n = 5 ).min (level = 0 )
254
+ k10 = self .gauge .data .groupby (pd .Grouper (freq = 'YE ' )).nlargest (n = 10 ).min (level = 0 )
255
255
256
256
return [d .year for d in list (k1 [k1 == 0 ].index )], \
257
257
[d .year for d in list (k5 [k5 == 0 ].index )], \
@@ -468,7 +468,7 @@ def r99ptot_check_annual(self) -> List[int]:
468
468
"D" ).sum () # this changes depending on which version of pandas youre using. o.14 requires how agument,
469
469
# later requires .sum
470
470
471
- perc99 = daily_ts .groupby (pd .Grouper (freq = 'A ' )).quantile (.99 )
471
+ perc99 = daily_ts .groupby (pd .Grouper (freq = 'YE ' )).quantile (.99 )
472
472
py = list (perc99 .index .year )
473
473
pv = list (perc99 )
474
474
p_dict = {}
@@ -478,7 +478,7 @@ def r99ptot_check_annual(self) -> List[int]:
478
478
daily_df ["year" ] = daily_df .index .year
479
479
daily_df ["p99" ] = daily_df .apply (lambda row : p_dict [row .year ], axis = 1 )
480
480
daily_df ["filtered" ] = daily_df .daily .where (daily_df .daily >= daily_df .p99 )
481
- perc99_tot = daily_df .groupby (pd .Grouper (freq = 'A ' )).sum ()
481
+ perc99_tot = daily_df .groupby (pd .Grouper (freq = 'YE ' )).sum ()
482
482
tots = list (perc99_tot .filtered )
483
483
checks = [utils .day_check (t , p_max , p_max_filled ) for t in tots ]
484
484
@@ -497,7 +497,7 @@ def prcptot_check_annual(self) -> List[int]:
497
497
p_max , p_max_filled = utils .get_etccdi_value (self .etcdii_data , 'PRCPTOT' , self .gauge .longitude , self .gauge .latitude )
498
498
499
499
if np .isfinite (p_max ) or np .isfinite (p_max_filled ):
500
- ann_tots = self .gauge .data .groupby (pd .Grouper (freq = 'A ' )).sum ()
500
+ ann_tots = self .gauge .data .groupby (pd .Grouper (freq = 'YE ' )).sum ()
501
501
tots = list (ann_tots )
502
502
checks = [utils .day_check (t , p_max , p_max_filled ) for t in tots ]
503
503
else :
@@ -1362,7 +1362,7 @@ def read_intense_qc(path_or_stream: Union[IO, str], only_metadata: bool = False)
1362
1362
data = np .array (data )
1363
1363
data = pd .DataFrame (data , pd .date_range (start = datetime .strptime (metadata ['start datetime' ], '%Y%m%d%H' ),
1364
1364
end = datetime .strptime (metadata ['end datetime' ], '%Y%m%d%H' ),
1365
- freq = metadata ['new timestep' ][:- 2 ] + 'H ' ), dtype = float ,
1365
+ freq = metadata ['new timestep' ][:- 2 ] + 'h ' ), dtype = float ,
1366
1366
columns = ["vals" , "hourly_neighbours" , "hourly_neighbours_dry" , "daily_neighbours" ,
1367
1367
"daily_neighbours_dry" , "monthly_neighbours" , "world_record" , "Rx1day" ,
1368
1368
"CWD" , "CDD" , "daily_accumualtions" , "monthly_accumulations" ,
0 commit comments