Skip to content

Commit 92e8513

Browse files
authored
Merge pull request #294 from cmu-delphi/release/7.2.0
Release 7.2.0
2 parents b919543 + 77b27c3 commit 92e8513

File tree

10 files changed

+139
-12
lines changed

10 files changed

+139
-12
lines changed

.bumpversion.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
[bumpversion]
2-
current_version = 7.1.0
2+
current_version = 7.2.0
33
commit = False
44
tag = False
55

.github/workflows/create_release.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ jobs:
1111
runs-on: ubuntu-latest
1212
steps:
1313
- name: Check out code
14-
uses: actions/checkout@v2
14+
uses: actions/checkout@v3
1515
with:
1616
ref: main
1717
ssh-key: ${{ secrets.CMU_DELPHI_DEPLOY_MACHINE_SSH }}

.github/workflows/release_main.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ jobs:
2020
runs-on: ubuntu-latest
2121
steps:
2222
- name: Check out code
23-
uses: actions/checkout@v2
23+
uses: actions/checkout@v3
2424
- name: Set up Python 3.8
2525
uses: actions/setup-python@v2
2626
with:
@@ -49,7 +49,7 @@ jobs:
4949
runs-on: ubuntu-latest
5050
steps:
5151
- name: Check out code
52-
uses: actions/checkout@v2
52+
uses: actions/checkout@v3
5353
with:
5454
ref: dev
5555
fetch-depth: 0

DESCRIPTION

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
Package: forecasteval
22
Title: Forecast Evaluation Dashboard
3-
Version: 7.1.0
3+
Version: 7.2.0
44
Authors@R: c(person("Kate", "Harwood", role = "aut"),
55
person("Chris", "Scott", role = "ctb"),
6-
person("Jed", "Grabman", role = "ctb")),
6+
person("Jed", "Grabman", role = "ctb"),
77
person("Nat", "DeFries", email= "[email protected]", role = c("aut", "cre")))
88
Description: This app collects and scores COVID-19 forecasts submitted to the CDC, and displays the results in an RShiny dashboard.
99
License: MIT License, Copyright (c) 2021 Delphi contributors

Makefile

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,15 @@ PWD=$(shell pwd)
55
S3_URL=https://forecast-eval.s3.us-east-2.amazonaws.com
66
S3_BUCKET=s3://forecast-eval
77

8+
# Change `imageTag` during `make` call via `make <command> imageTag=<tag name>`
9+
#
10+
# `imageTag` specifies the tag to be used for the production dashboard Docker
11+
# image. If building from `main`, it should be `latest`. If building from
12+
# `dev`, it should be `dev`. The default value used here is meant to prevent
13+
# the actual `latest` and `dev` images in the image repository from being
14+
# accidentally overwritten.
15+
imageTag=local
16+
817
build: build_dashboard
918

1019
# Build a docker image suitable for running the scoring pipeline
@@ -66,10 +75,14 @@ build_dashboard_dev: pull_data
6675
start_dashboard: build_dashboard_dev
6776
docker run --rm -p 3838:80 ghcr.io/cmu-delphi/forecast-eval:latest
6877

69-
# Build a docker image for production use
78+
# Build a docker image for production use. Currently this isn't used anywhere,
79+
# but could be useful if we need to manually build a docker image for
80+
# production.
7081
build_dashboard: pull_data
7182
docker build --no-cache=true --pull -t ghcr.io/cmu-delphi/forecast-eval:$(imageTag) -f devops/Dockerfile .
7283

73-
# Push a production docker image to the image repository
84+
# Push a production docker image to the image repository. Currently this isn't
85+
# used anywhere, but could be useful if we need to manually release a docker
86+
# image for production.
7487
deploy_dashboard: build_dashboard
7588
docker push ghcr.io/cmu-delphi/forecast-eval:$(imageTag)

app/R/exportScores.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ exportScoresUI <- function(id = "exportScores") {
55
)
66
}
77

8-
createExportScoresDataFrame <- function(scoreDf, targetVariable, scoreType, forecasters, loc, coverageInterval) {
8+
createExportScoresDataFrame <- function(scoreDf, targetVariable, scoreType, forecasters, loc, coverageInterval, filterDate) {
99
scoreDf <- filter(
1010
scoreDf[[targetVariable]],
1111
forecaster %chin% forecasters
@@ -16,7 +16,7 @@ createExportScoresDataFrame <- function(scoreDf, targetVariable, scoreType, fore
1616
if (targetVariable == "Hospitalizations") {
1717
scoreDf <- filterHospitalizationsAheads(scoreDf)
1818
}
19-
scoreDf <- filterOverAllLocations(scoreDf, scoreType)
19+
scoreDf <- filterOverAllLocations(scoreDf, scoreType, filterDate = filterDate)
2020
return(scoreDf[[1]])
2121
} else {
2222
scoreDf <- filter(scoreDf, geo_value == tolower(loc))

app/assets/about.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,3 +127,18 @@ stateCases = tryCatch(
127127
```
128128

129129

130+
131+
##### Forecasts with actuals
132+
133+
If you are interested in getting the forecasts paired with the corresponding actual values (if you were e.g. testing different evaluation methods), that can be found in [the Amazon S3 bucket](https://forecast-eval.s3.us-east-2.amazonaws.com/) in 3 zip files.
134+
These files are static, generated using [the aggregation script](https://raw.githubusercontent.com/cmu-delphi/forecast-eval/main/app/assets/forecastsWithActuals.R), and forecast and actual data available on June 12, 2023. The latest forecast date available for each target signal is
135+
136+
* [cases](https://forecast-eval.s3.us-east-2.amazonaws.com/cases.zip): 2023-02-13
137+
* [hospitalizations](https://forecast-eval.s3.us-east-2.amazonaws.com/hospitalizations.zip):
138+
* 1 week: 2023-06-05
139+
* 2 week: 2023-06-05
140+
* 3 week: 2023-06-05
141+
* 4 week: 2023-06-05
142+
* [deaths](https://forecast-eval.s3.us-east-2.amazonaws.com/deaths.zip): 2023-03-06
143+
144+
If the S3 bucket is down, these files are also available on [Delphi's file-hosting site](https://www.cmu.edu/delphi-web/forecast-eval-scores).

app/assets/forecastsWithActuals.R

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
library(dplyr)
2+
library(tidyr)
3+
library(aws.s3)
4+
5+
Sys.setenv("AWS_DEFAULT_REGION" = "us-east-2")
6+
s3bucket <- tryCatch(
7+
{
8+
get_bucket(bucket = "forecast-eval")
9+
},
10+
error = function(e) {
11+
e
12+
}
13+
)
14+
15+
readbucket <- function(name) {
16+
tryCatch(
17+
{
18+
s3readRDS(object = name, bucket = s3bucket)
19+
},
20+
error = function(e) {
21+
e
22+
}
23+
)
24+
}
25+
26+
# Cases, deaths, hosp scores: needed for "actual"s
27+
cases <- bind_rows(
28+
readbucket("score_cards_nation_cases.rds"),
29+
readbucket("score_cards_state_cases.rds")
30+
)
31+
deaths <- bind_rows(
32+
readbucket("score_cards_nation_deaths.rds"),
33+
readbucket("score_cards_state_deaths.rds")
34+
)
35+
hosp <- bind_rows(
36+
readbucket("score_cards_nation_hospitalizations.rds"),
37+
readbucket("score_cards_state_hospitalizations.rds")
38+
)
39+
40+
# The big one: predictions from all forecasters
41+
pred <- readbucket("predictions_cards.rds")
42+
43+
# Cases
44+
pred_cases <- pred %>%
45+
filter(signal == "confirmed_incidence_num") %>%
46+
mutate(signal = NULL, data_source = NULL, incidence_period = NULL) %>%
47+
pivot_wider(
48+
names_from = quantile,
49+
values_from = value,
50+
names_prefix = "forecast_"
51+
)
52+
53+
actual_cases <- cases %>%
54+
select(ahead, geo_value, forecaster, forecast_date, target_end_date, actual)
55+
56+
joined_cases <- left_join(pred_cases, actual_cases)
57+
sum(is.na(actual_cases$actual)) == sum(is.na(joined_cases$actual))
58+
write.csv(joined_cases, "cases.csv")
59+
60+
# Deaths
61+
pred_deaths <- pred %>%
62+
filter(signal == "deaths_incidence_num") %>%
63+
mutate(signal = NULL, data_source = NULL, incidence_period = NULL) %>%
64+
pivot_wider(
65+
names_from = quantile,
66+
values_from = value,
67+
names_prefix = "forecast_"
68+
)
69+
70+
actual_deaths <- deaths %>%
71+
select(ahead, geo_value, forecaster, forecast_date, target_end_date, actual)
72+
73+
joined_deaths <- left_join(pred_deaths, actual_deaths)
74+
sum(is.na(actual_deaths$actual)) == sum(is.na(joined_deaths$actual))
75+
write.csv(joined_deaths, "deaths.csv")
76+
77+
# Hospitalizations: break up by weeks since we run into memory errors o/w!
78+
pred_hosp <- actual_hosp <- joined_hosp <- vector(mode = "list", length = 4)
79+
for (k in 1:4) {
80+
cat(k, "... ")
81+
days <- (k - 1) * 7 + 1:7
82+
pred_hosp[[k]] <- pred %>%
83+
filter(signal == "confirmed_admissions_covid_1d", ahead %in% days) %>%
84+
mutate(signal = NULL, data_source = NULL, incidence_period = NULL) %>%
85+
pivot_wider(
86+
names_from = quantile,
87+
values_from = value,
88+
names_prefix = "forecast_"
89+
)
90+
91+
actual_hosp[[k]] <- hosp %>%
92+
filter(ahead %in% days) %>%
93+
select(ahead, geo_value, forecaster, forecast_date, target_end_date, actual)
94+
95+
joined_hosp[[k]] <- left_join(pred_hosp[[k]], actual_hosp[[k]])
96+
cat(sum(is.na(actual_hosp[[k]]$act)) == sum(is.na(joined_hosp[[k]]$act)))
97+
write.csv(joined_hosp[[k]], sprintf("hospitalizations_%iwk.csv", k))
98+
}

app/global.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ library(tsibble)
1010
library(covidcast)
1111
library(data.table)
1212

13-
appVersion <- "7.1.0"
13+
appVersion <- "7.2.0"
1414

1515
COVERAGE_INTERVALS <- c("10", "20", "30", "40", "50", "60", "70", "80", "90", "95", "98")
1616
CASES_DEATHS_TARGET_DAY <- "Saturday"

app/server.R

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -991,7 +991,8 @@ server <- function(input, output, session) {
991991
"exportScores", shiny::reactive(generateExportFilename(input)),
992992
shiny::reactive(createExportScoresDataFrame(
993993
df_list, input$targetVariable, input$scoreType, input$forecasters,
994-
input$location, input$coverageInterval
994+
input$location, input$coverageInterval,
995+
filterDate = dataCreationDate
995996
))
996997
)
997998
}

0 commit comments

Comments
 (0)