@@ -5,7 +5,7 @@ forecasters <- list(
5
5
c(" flatline_fc" , flatline_fc )
6
6
)
7
7
for (forecaster in forecasters ) {
8
- test_that(forecaster [[1 ]], {
8
+ test_that(paste( forecaster [[1 ]], " gets the date and columns right " ) , {
9
9
jhu <- epipredict :: case_death_rate_subset %> %
10
10
dplyr :: filter(time_value > = as.Date(" 2021-12-01" ))
11
11
# the as_of for this is wildly far in the future
@@ -19,8 +19,50 @@ for (forecaster in forecasters) {
19
19
res $ target_end_date ==
20
20
as.Date(" 2022-01-01" )
21
21
))
22
+ })
23
+
24
+ test_that(paste(forecaster [[1 ]], " deals with no as_of" ), {
25
+ jhu <- epipredict :: case_death_rate_subset %> %
26
+ dplyr :: filter(time_value > = as.Date(" 2021-12-01" ))
27
+ # what if we have no as_of date? assume they mean the last available data
28
+ attributes(jhu )$ metadata $ as_of <- NULL
29
+ expect_no_error(res <- forecaster [[2 ]](jhu , " case_rate" , c(" death_rate" ), 2L ))
30
+ expect_equal(res $ target_end_date %> % unique , max(jhu $ time_value )+ 2 )
31
+ }
32
+
33
+ test_that(paste(forecaster [[1 ]], " handles last second NA's" ), {
34
+ # if the last entries are NA, we should still predict
35
+ # TODO: currently this checks that we DON'T predict
36
+ jhu <- epipredict :: case_death_rate_subset %> %
37
+ dplyr :: filter(time_value > = as.Date(" 2021-12-01" ))
38
+ geo_values <- jhu $ geo_value %> % unique()
39
+ one_day_nas <- tibble(
40
+ geo_value = geo_values ,
41
+ time_value = as.Date(" 2022-01-01" ),
42
+ case_rate = NA ,
43
+ death_rate = runif(length(geo_values ))
44
+ )
45
+ second_day_nas <- one_day_nas %> %
46
+ mutate(time_value = as.Date(" 2022-01-02" ))
47
+ jhu_nad <- jhu %> %
48
+ as_tibble() %> %
49
+ bind_rows(one_day_nas , second_day_nas ) %> %
50
+ as_epi_df()
51
+ attributes(jhu_nad )$ metadata $ as_of <- max(jhu_nad $ time_value ) + 3
52
+ expect_no_error(nas_forecast <- forecaster [[2 ]](jhu_nad , " case_rate" , c(" death_rate" )))
53
+ # TODO: this shouldn't actually be null, it should be a bit further delayed
54
+ expect_equal(nrow(nas_forecast ), 0 )
55
+ })
56
+
57
+ # ################################
22
58
# any forecaster specific tests
23
59
if (forecaster [[1 ]] == " scaled_pop" ) {
60
+ test_that(paste(forecaster [[1 ]], " scaled and unscaled don't make the same predictions" ), {
61
+ jhu <- epipredict :: case_death_rate_subset %> %
62
+ dplyr :: filter(time_value > = as.Date(" 2021-12-01" ))
63
+ # the as_of for this is wildly far in the future
64
+ attributes(jhu )$ metadata $ as_of <- max(jhu $ time_value ) + 3
65
+ res <- forecaster [[2 ]](jhu , " case_rate" , c(" death_rate" ), - 2L )
24
66
# confirm scaling produces different results
25
67
res_unscaled <- forecaster [[2 ]](jhu ,
26
68
" case_rate" ,
@@ -35,10 +77,16 @@ for (forecaster in forecasters) {
35
77
) %> %
36
78
mutate(equal = value.unscaled == value.scaled ) %> %
37
79
summarize(all(equal )) %> % pull(`all(equal)` ))
80
+ })
38
81
}
39
82
# TODO confirming that it produces exactly the same result as arx_forecaster
40
83
# test case where extra_sources is "empty"
41
84
# test case where the epi_df is empty
85
+ test_that(paste(forecaster [[1 ]], " scaled and unscaled don't make the same predictions" ), {
86
+ jhu <- epipredict :: case_death_rate_subset %> %
87
+ dplyr :: filter(time_value > = as.Date(" 2021-12-01" ))
88
+ # the as_of for this is wildly far in the future
89
+ attributes(jhu )$ metadata $ as_of <- max(jhu $ time_value ) + 3
42
90
null_jhu <- jhu %> % filter(time_value < as.Date(" 0009-01-01" ))
43
91
expect_no_error(null_res <- forecaster [[2 ]](null_jhu , " case_rate" , c(" death_rate" )))
44
92
expect_identical(names(null_res ), names(res ))
0 commit comments