From c590e56e7efb9a48140535674c3307061c221207 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Klara=20R=C3=B6hrl?= Date: Tue, 18 May 2021 16:03:58 +0200 Subject: [PATCH] Implement an empirically more founded version of time-variant sensitivity of rapid tests. (#127) --- docs/source/reference_guides/epi_params.rst | 20 ++ src/sid/covid_epi_params.csv | 7 +- src/sid/rapid_tests.py | 66 +++---- tests/test_params.csv | 7 +- tests/test_rapid_tests.py | 191 +++++++++++--------- 5 files changed, 167 insertions(+), 124 deletions(-) diff --git a/docs/source/reference_guides/epi_params.rst b/docs/source/reference_guides/epi_params.rst index 3ef6e6e9..3c80168e 100644 --- a/docs/source/reference_guides/epi_params.rst +++ b/docs/source/reference_guides/epi_params.rst @@ -92,6 +92,26 @@ distribution: +Rapid Tests +------------ + +To calibrate the sensitivity of rapid tests we rely on +`this study `_ by Smith et al. (2021). + +From this we define five periods: + +1. Before onset of infectiousness, the sensitivity is 0.35 as reported in figure 2. +2. On the day that infectiousness starts, the sensitivity is 0.88 as reported in table 2. +3. While a person is infectious, the sensitivity is 0.92 as reported in figure 2. +4. After infectiousness stops, the sensitivity is 0.5 until 10 days after the start of + infectiousness as can be conluded from figure 2 and table 2. +5. If it has been more than 10 days since the onset of infectiousness, rapid tests are + negative. + +For the specificity we assume 99.4% which is in line with +`this meta study `_ by Brümmer et al. (2021). + + Immunity Countdown ------------------ diff --git a/src/sid/covid_epi_params.csv b/src/sid/covid_epi_params.csv index 838ca938..d5aaed62 100644 --- a/src/sid/covid_epi_params.csv +++ b/src/sid/covid_epi_params.csv @@ -128,7 +128,8 @@ cd_knows_infectious_false,all,-1,1 cd_is_immune_by_vaccine,all,-1,0.25 cd_is_immune_by_vaccine,all,14,0.35 cd_is_immune_by_vaccine,all,21,0.4 -rapid_test,sensitivity,0,0.6 -rapid_test,sensitivity,-1,0.9 -rapid_test,sensitivity,-2,0.961 rapid_test,specificity,specificity,0.994 +rapid_test,sensitivity,pre-infectious,0.35 +rapid_test,sensitivity,start_infectious,0.88 +rapid_test,sensitivity,while_infectious,0.95 +rapid_test,sensitivity,after_infectious,0.5 diff --git a/src/sid/rapid_tests.py b/src/sid/rapid_tests.py index 1bf12645..10c3eb8c 100644 --- a/src/sid/rapid_tests.py +++ b/src/sid/rapid_tests.py @@ -111,58 +111,50 @@ def _sample_test_outcome(states, receives_rapid_test, params, seed): np.random.seed(next(seed)) is_tested_positive = pd.Series(index=states.index, data=False) - receives_test_and_is_infectious = states["infectious"] & receives_rapid_test with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="indexing past lexsort depth may impact performance." ) sensitivity_params = params.loc[("rapid_test", "sensitivity"), "value"] + + infected = states["cd_infectious_true"] >= -10 + receives_test_and_is_infected = infected & receives_rapid_test sensitivity = _create_sensitivity( - states, + states=states[receives_test_and_is_infected], sensitivity_params=sensitivity_params, - receives_test_and_is_infectious=receives_test_and_is_infectious, ) is_truly_positive = boolean_choices(sensitivity) + is_tested_positive.loc[receives_test_and_is_infected] = is_truly_positive specificity = params.loc[("rapid_test", "specificity", "specificity"), "value"] - receives_test_and_is_not_infectious = ~states["infectious"] & receives_rapid_test - is_falsely_positive = boolean_choices( - np.full(receives_test_and_is_not_infectious.sum(), 1 - specificity) - ) - - is_tested_positive.loc[receives_test_and_is_infectious] = is_truly_positive - is_tested_positive.loc[receives_test_and_is_not_infectious] = is_falsely_positive + uninfected_test_receivers = ~infected & receives_rapid_test + p_false_positive = np.full(uninfected_test_receivers.sum(), 1 - specificity) + is_falsely_positive = boolean_choices(p_false_positive) + is_tested_positive.loc[uninfected_test_receivers] = is_falsely_positive return is_tested_positive -def _create_sensitivity(states, sensitivity_params, receives_test_and_is_infectious): - """Create the sensitivity for each individual. - - Args: - sensitivity_params (pandas.Series): The index are the values of - `cd_infectious_true` if there is more than one entry. In that case the - sensitivity depends on the time since infectiousness started. Missing values - for `cd_infectious_true` are filled with the value of the most negative - index. For example, if you want to have a sensitivity of 50% on the day an - individual becomes infectious and 90% on every later day, you would specify - as index [0, -1] and [0.5, 0.9] as values. - If there is only one entry, the index must be ["sensitivity"] and this - sensitivity is used for all infectious individuals. - - """ - if len(sensitivity_params) == 1: - sensitivity_value = sensitivity_params["sensitivity"] - sensitivity = np.full(receives_test_and_is_infectious.sum(), sensitivity_value) - else: - last_sensitivity_value = sensitivity_params[sensitivity_params.index.min()] - # It is irrelevant what value is set for people who are not infectious (yet) - # as they are removed through receives_test_and_is_infectious. - sensitivity = pd.Series(last_sensitivity_value, index=states.index) - for day_since_infectious, sensitivity_value in sensitivity_params.items(): - at_current_day = states["cd_infectious_true"] == day_since_infectious - sensitivity[at_current_day] = sensitivity_value - sensitivity = sensitivity[receives_test_and_is_infectious] +def _create_sensitivity(states, sensitivity_params): + """Create the sensitivity se""" + sensitivity = pd.Series(np.nan, index=states.index) + p_pos_preinfectious = sensitivity_params.loc["pre-infectious"] + p_pos_start_infectious = sensitivity_params.loc["start_infectious"] + p_pos_while_infectious = sensitivity_params.loc["while_infectious"] + p_pos_after_infectious = sensitivity_params.loc["after_infectious"] + + sensitivity[states["cd_infectious_true"] > 0] = p_pos_preinfectious + sensitivity[states["infectious"]] = p_pos_while_infectious + sensitivity[states["cd_infectious_true"] == 0] = p_pos_start_infectious + within_10_days = states["cd_infectious_true"].between(-10, 0) + sensitivity[~states["infectious"] & within_10_days] = p_pos_after_infectious + if sensitivity.isnull().any(): + raise ValueError( + "There are NaN left in the person-dependent sensitivity. " + "The likeliest explanation is that _create_sensitivity was called " + "with uninfected individuals (i.e. with individuals where " + "`cd_infectious_true` < -10)." + ) return sensitivity diff --git a/tests/test_params.csv b/tests/test_params.csv index 16bcedec..b223be9e 100644 --- a/tests/test_params.csv +++ b/tests/test_params.csv @@ -16,5 +16,8 @@ cd_knows_immune_false,all,-1,1,, cd_knows_infectious_false,all,-1,1,, cd_is_immune_by_vaccine,all,-1,1,, known_cases_multiplier,known_cases_multiplier,known_cases_multiplier,1.1,, -rapid_test,sensitivity,sensitivity,0.961 -rapid_test,specificity,specificity,0.994 +rapid_test,specificity,specificity,0.994,, +rapid_test,sensitivity,pre-infectious,0.35,, +rapid_test,sensitivity,start_infectious,0.88,, +rapid_test,sensitivity,while_infectious,0.95,, +rapid_test,sensitivity,after_infectious,0.5,, diff --git a/tests/test_rapid_tests.py b/tests/test_rapid_tests.py index 5c5cc1fd..79a44d62 100644 --- a/tests/test_rapid_tests.py +++ b/tests/test_rapid_tests.py @@ -141,84 +141,100 @@ def test_compute_who_reveives_rapid_tests_raises_error(initial_states, params): ) -@pytest.mark.unit -def test_sample_test_outcome_with_sensitivity(params): - n_individuals = 1_000_000 - - receives_rapid_test = np.ones(n_individuals).astype(bool) - states = pd.DataFrame({"infectious": receives_rapid_test}) - - is_tested_positive = _sample_test_outcome( - states, receives_rapid_test, params, itertools.count() +@pytest.fixture() +def rapid_test_states(): + np.random.seed(84845) + group_size = 250_000 + + uninfected = pd.DataFrame() + uninfected["cd_infectious_true"] = np.random.choice( + [-12, -15, -18], size=group_size, replace=True ) + uninfected["infectious"] = False - sensitivity = params.loc[("rapid_test", "sensitivity", "sensitivity"), "value"] - assert np.isclose(is_tested_positive.mean(), sensitivity, atol=1e-3) - + pre_infectious = pd.DataFrame() + pre_infectious["cd_infectious_true"] = np.random.choice([2, 1, 3], size=group_size) + pre_infectious["infectious"] = False -@pytest.mark.unit -def test_sample_test_outcome_with_sensitivity_time_dependent(params): - n_individuals = 10_000_000 + start_infectious = pd.DataFrame() + start_infectious["cd_infectious_true"] = [0] * group_size + start_infectious["infectious"] = True - cd_infectious_true = np.random.choice([1, 0, -1, -2], n_individuals, True) - states = pd.DataFrame( - { - "infectious": cd_infectious_true <= 0, - "cd_infectious_true": cd_infectious_true, - } - ) - receives_rapid_test = np.ones(n_individuals).astype(bool) - receives_rapid_test[:1000] = False + while_infectious = pd.DataFrame() + while_infectious["cd_infectious_true"] = np.random.choice([-1, -3], size=group_size) + while_infectious["infectious"] = True - params = params.drop([("rapid_test", "sensitivity")]) - params.loc[("rapid_test", "sensitivity", 0)] = 0.5 - params.loc[("rapid_test", "sensitivity", -1)] = 0.9 + after_infectious = pd.DataFrame() + after_infectious["cd_infectious_true"] = np.random.choice([-4, -8], size=group_size) + after_infectious["infectious"] = False - is_tested_positive = _sample_test_outcome( - states, receives_rapid_test, params, itertools.count() - ) - assert not is_tested_positive[:1000].any() - expected_share_false_positive = ( - 1 - params.loc[("rapid_test", "specificity", "specificity"), "value"] - ) - res_share_false_positive = is_tested_positive[ - states["cd_infectious_true"] > 0 & receives_rapid_test - ].mean() - assert np.isclose( - expected_share_false_positive, res_share_false_positive, atol=1e-3 - ) - - first_day_share_positive = is_tested_positive[ - states["cd_infectious_true"] == 0 & receives_rapid_test - ].mean() - assert np.isclose( - first_day_share_positive, - 0.5, - atol=1e-3, - ) - later_share_positive = is_tested_positive[ - states["cd_infectious_true"] < 0 & receives_rapid_test - ].mean() - assert np.isclose( - later_share_positive, - 0.9, - atol=1e-3, + states = pd.concat( + [ + uninfected, + pre_infectious, + start_infectious, + while_infectious, + after_infectious, + ], + axis=0, ) + return states @pytest.mark.unit -def test_sample_test_outcome_with_specificity(params): - n_individuals = 1_000_000 - - receives_rapid_test = np.ones(n_individuals).astype(bool) - states = pd.DataFrame({"infectious": np.zeros(n_individuals).astype(bool)}) +def test_sample_test_outcome(rapid_test_states, params): + states = rapid_test_states + receives_rapid_test = np.random.choice( + [True, False], size=len(states), p=[0.8, 0.2] + ) is_tested_positive = _sample_test_outcome( - states, receives_rapid_test, params, itertools.count() + states=states, + receives_rapid_test=receives_rapid_test, + params=params, + seed=itertools.count(), ) + # not to be tested + assert not is_tested_positive[~receives_rapid_test].any() + + # uninfected + tested_uninfected = receives_rapid_test & (states["cd_infectious_true"] < -10) + uninfected_share_positive = is_tested_positive[tested_uninfected].mean() specificity = params.loc[("rapid_test", "specificity", "specificity"), "value"] - assert np.isclose(is_tested_positive.mean(), 1 - specificity, atol=1e-3) + assert np.isclose(1 - uninfected_share_positive, specificity, atol=1e-2) + + # preinfectious + sensitivity = params.loc[("rapid_test", "sensitivity", "pre-infectious"), "value"] + tested_preinfectious = receives_rapid_test & (states["cd_infectious_true"] > 0) + preinfectious_share_positive = is_tested_positive[tested_preinfectious].mean() + assert np.isclose(sensitivity, preinfectious_share_positive, atol=1e-2) + + # first day of infectiousness + sensitivity = params.loc[("rapid_test", "sensitivity", "start_infectious"), "value"] + tested_start_infectious = receives_rapid_test & (states["cd_infectious_true"] == 0) + start_infectious_share_positive = is_tested_positive[tested_start_infectious].mean() + assert np.isclose(sensitivity, start_infectious_share_positive, atol=1e-2) + + # while infectious + sensitivity = params.loc[("rapid_test", "sensitivity", "while_infectious"), "value"] + tested_while_infectious = receives_rapid_test & ( + states["infectious"] & (states["cd_infectious_true"] < 0) + ) + while_infectious_share_positive = is_tested_positive[tested_while_infectious].mean() + assert np.isclose(sensitivity, while_infectious_share_positive, atol=1e-2) + + # after infectious + sensitivity = params.loc[("rapid_test", "sensitivity", "after_infectious"), "value"] + tested_after_infectious = ( + receives_rapid_test + & ~states["infectious"] + & (states["cd_infectious_true"] < 0) + & (states["cd_infectious_true"] > -10) + ) + + after_infectious_share_positive = is_tested_positive[tested_after_infectious].mean() + assert np.isclose(sensitivity, after_infectious_share_positive, atol=1e-2) @pytest.mark.unit @@ -246,39 +262,50 @@ def test_update_states_with_rapid_tests_outcomes(): @pytest.mark.unit -def test_create_sensitivity_time_dependent(): +def test_create_sensitivity(): states = pd.DataFrame( - [ + columns=["infectious", "cd_infectious_true"], + data=[ [False, 2], # not infectious yet [True, 0], # first day of infectiousness [True, -1], # 2nd day of infectiousness - [True, -2], # not getting tested - [True, -4], # after latest specified infectiousness [False, -5], # not infectious anymore ], - columns=["infectious", "cd_infectious_true"], ) - params = pd.Series([0.5, 0.7, 0.9], index=[0, -1, -2]) - receives_test_and_is_infectious = pd.Series([False, True, True, False, True, False]) - result = _create_sensitivity(states, params, receives_test_and_is_infectious) - expected = pd.Series([0.5, 0.7, 0.9], index=[1, 2, 4]) - assert result.equals(expected) + sensitivity_params = pd.Series( + [0.35, 0.88, 0.95, 0.5], + index=[ + "pre-infectious", + "start_infectious", + "while_infectious", + "after_infectious", + ], + ) + result = _create_sensitivity(states, sensitivity_params) + expected = pd.Series([0.35, 0.88, 0.95, 0.5, 0.0]) + result.equals(expected) @pytest.mark.unit -def test_create_sensitivity(): +def test_create_sensitivity_raises_nan_error(): states = pd.DataFrame( - [ + columns=["infectious", "cd_infectious_true"], + data=[ [False, 2], # not infectious yet [True, 0], # first day of infectiousness [True, -1], # 2nd day of infectiousness - [True, -2], # not getting tested - [True, -4], # after latest specified infectiousness [False, -5], # not infectious anymore + [False, -12], # recovered + ], + ) + sensitivity_params = pd.Series( + [0.35, 0.88, 0.95, 0.5], + index=[ + "pre-infectious", + "start_infectious", + "while_infectious", + "after_infectious", ], - columns=["infectious", "cd_infectious_true"], ) - params = pd.Series([0.5], index=["sensitivity"]) - receives_test_and_is_infectious = pd.Series([False, True, True, False, True, False]) - result = _create_sensitivity(states, params, receives_test_and_is_infectious) - assert (result == 0.5).all() + with pytest.raises(ValueError, match="NaN left in the"): + _create_sensitivity(states, sensitivity_params)