|
2 | 2 | from os.path import join, basename
|
3 | 3 |
|
4 | 4 | import pandas as pd
|
| 5 | +import numpy as np |
| 6 | +from delphi_jhu.run import add_nancodes |
| 7 | +from delphi_utils import Nans |
5 | 8 |
|
6 | 9 |
|
| 10 | +def _non_ignored_files_set(directory): |
| 11 | + """List all files in a directory not preceded by a '.' and store them in a set.""" |
| 12 | + out = {fname for fname in listdir(directory) if not basename(fname).startswith(".")} |
| 13 | + return out |
| 14 | + |
7 | 15 | class TestRun:
|
8 | 16 | def test_output_files_exist(self, run_as_module):
|
9 |
| - |
10 |
| - csv_files = [x for x in listdir("receiving") if not basename(x).startswith(".")] |
11 |
| - |
12 |
| - dates = [ |
13 |
| - "20200303", |
14 |
| - "20200304", |
15 |
| - "20200305", |
16 |
| - "20200306", |
17 |
| - "20200307", |
18 |
| - "20200308", |
19 |
| - "20200309", |
20 |
| - "20200310", |
21 |
| - ] |
| 17 | + csv_files = _non_ignored_files_set("receiving") |
| 18 | + dates = [d.strftime("%Y%m%d") for d in pd.date_range("20200303", "20200310")] |
22 | 19 | geos = ["county", "hrr", "msa", "state", "hhs", "nation"]
|
23 |
| - metrics = [] |
24 |
| - for event in ["confirmed", "deaths"]: |
25 |
| - for smoothing in ["", "_7dav"]: |
26 |
| - for window in ["incidence", "cumulative"]: |
27 |
| - for stat in ["num", "prop"]: |
28 |
| - metrics.append(f"{event}{smoothing}_{window}_{stat}") |
29 |
| - |
30 |
| - expected_files = [] |
31 |
| - for date in dates: |
32 |
| - for geo in geos: |
33 |
| - for metric in metrics: |
34 |
| - if "7dav" in metric and "cumulative" in metric: |
35 |
| - continue |
36 |
| - # Can't compute 7dav for first few days of data because of NAs |
37 |
| - if date > "20200305" or "7dav" not in metric: |
38 |
| - expected_files += [date + "_" + geo + "_" + metric + ".csv"] |
39 |
| - |
40 |
| - assert set(csv_files) == set(expected_files) |
| 20 | + metrics = [ |
| 21 | + f"{event}{smoothing}_{window}_{stat}" |
| 22 | + for event in ["confirmed", "deaths"] |
| 23 | + for smoothing in ["", "_7dav"] |
| 24 | + for window in ["incidence", "cumulative"] |
| 25 | + for stat in ["num", "prop"] |
| 26 | + ] |
| 27 | + expected_files = { |
| 28 | + f"{date}_{geo}_{metric}.csv" |
| 29 | + for date in dates |
| 30 | + for geo in geos |
| 31 | + for metric in metrics |
| 32 | + if not ("7dav" in metric and "cumulative" in metric) |
| 33 | + } |
| 34 | + |
| 35 | + assert csv_files == expected_files |
41 | 36 |
|
42 | 37 | def test_output_file_format(self, run_as_module):
|
43 | 38 |
|
44 |
| - df = pd.read_csv( |
45 |
| - join("receiving", "20200310_state_confirmed_cumulative_num.csv") |
46 |
| - ) |
47 |
| - assert (df.columns.values == ["geo_id", "val", "se", "sample_size"]).all() |
| 39 | + df = pd.read_csv(join("receiving", "20200310_state_confirmed_cumulative_num.csv")) |
| 40 | + expected_columns = [ |
| 41 | + "geo_id", |
| 42 | + "val", |
| 43 | + "se", |
| 44 | + "sample_size", |
| 45 | + "missing_val", |
| 46 | + "missing_se", |
| 47 | + "missing_sample_size", |
| 48 | + ] |
| 49 | + assert (df.columns.values == expected_columns).all() |
| 50 | + |
| 51 | + def test_add_nancodes(self): |
| 52 | + df = pd.DataFrame({ |
| 53 | + "timestamp": pd.date_range("20200321", "20200328"), |
| 54 | + "geo_id": ["01017", "01043", "01061", "01103", "02282", "72001", "31000", "49000"], |
| 55 | + "val": [0.1, 0.2, 0.3, 0.4, 0.5, np.nan, 0.7, np.nan], |
| 56 | + "se": [np.nan] * 8, |
| 57 | + "sample_size": [np.nan] * 8 |
| 58 | + }).set_index(["timestamp", "geo_id"]) |
| 59 | + expected_df = pd.DataFrame({ |
| 60 | + "timestamp": pd.date_range("20200321", "20200328"), |
| 61 | + "geo_id": ["01017", "01043", "01061", "01103", "02282", "72001", "31000", "49000"], |
| 62 | + "val": [0.1, 0.2, 0.3, 0.4, 0.5, np.nan, 0.7, np.nan], |
| 63 | + "se": [np.nan] * 8, |
| 64 | + "sample_size": [np.nan] * 8, |
| 65 | + "missing_val": [Nans.NOT_MISSING] * 5 + [Nans.REGION_EXCEPTION, Nans.NOT_MISSING, Nans.OTHER], |
| 66 | + "missing_se": [Nans.NOT_APPLICABLE] * 8, |
| 67 | + "missing_sample_size": [Nans.NOT_APPLICABLE] * 8, |
| 68 | + }).set_index(["timestamp", "geo_id"]) |
| 69 | + |
| 70 | + pd.testing.assert_frame_equal(add_nancodes(df, "deaths", "county", None), expected_df) |
| 71 | + |
| 72 | + df2 = pd.DataFrame({ |
| 73 | + "timestamp": pd.date_range("20200321", "20200328"), |
| 74 | + "geo_id": ["01017", "01043", "01061", "01103", "02282", "72001", "31000", "49000"], |
| 75 | + "val": [np.nan] * 6 + [0.7, np.nan], |
| 76 | + "se": [np.nan] * 8, |
| 77 | + "sample_size": [np.nan] * 8 |
| 78 | + }).set_index(["timestamp", "geo_id"]) |
| 79 | + expected_df2 = pd.DataFrame({ |
| 80 | + "timestamp": pd.date_range("20200321", "20200328"), |
| 81 | + "geo_id": ["01017", "01043", "01061", "01103", "02282", "72001", "31000", "49000"], |
| 82 | + "val": [np.nan] * 6 + [0.7, np.nan], |
| 83 | + "se": [np.nan] * 8, |
| 84 | + "sample_size": [np.nan] * 8, |
| 85 | + "missing_val": [Nans.CENSORED] * 5 + [Nans.REGION_EXCEPTION, Nans.NOT_MISSING, Nans.OTHER], |
| 86 | + "missing_se": [Nans.NOT_APPLICABLE] * 8, |
| 87 | + "missing_sample_size": [Nans.NOT_APPLICABLE] * 8, |
| 88 | + }).set_index(["timestamp", "geo_id"]) |
| 89 | + |
| 90 | + pd.testing.assert_frame_equal(add_nancodes(df2, "deaths", "county", "seven_day_average"), expected_df2) |
0 commit comments