Skip to content

Commit

Permalink
AMMOD-D23-00192R2 (#384)
Browse files Browse the repository at this point in the history
  • Loading branch information
twallema authored Jun 14, 2023
1 parent 3f5586e commit dc22e9d
Show file tree
Hide file tree
Showing 37 changed files with 1,873 additions and 3,856 deletions.

This file was deleted.

Large diffs are not rendered by default.

This file was deleted.

Large diffs are not rendered by default.

This file was deleted.

This file was deleted.

45 changes: 19 additions & 26 deletions notebooks/calibration/calibrate_BASE-COVID19_SEIQRD_hybrid_vacc.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"""

__author__ = "Tijs Alleman"
__copyright__ = "Copyright (c) 2021 by T.W. Alleman, BIOMATH, Ghent University. All Rights Reserved."
__copyright__ = "Copyright (c) 2023 by T.W. Alleman, BIOSPACE, Ghent University. All Rights Reserved."

############################
## Load required packages ##
Expand Down Expand Up @@ -121,7 +121,6 @@
stochastic=True, distinguish_day_type=True)

tau = 0.5

# Deterministic
model.parameters['beta'] = 0.027 # R0 = 3.31 --> https://pubmed.ncbi.nlm.nih.gov/32498136/
warmup = 0# 39 # Start 5 Feb. 2020: day of first detected COVID-19 infectee in Belgium
Expand Down Expand Up @@ -176,27 +175,23 @@
## Global PSO optimization ##
#############################

# transmission
#pars1 = ['beta',]
#bounds1=((0.001,0.080),)
# Effectivity parameters
pars2 = ['eff_work', 'mentality','k']
bounds2=((0.05,0.95),(0.01,0.99),(1e3,1e4))
# Social contact
pars1 = ['eff_work', 'mentality','k']
bounds1=((0.05,0.95),(0.01,0.99),(1e3,1e4))
# Variants
pars3 = ['K_inf',]
# Must supply the bounds
bounds3 = ((1.20,1.60),(1.60,2.40))
pars2 = ['K_inf',]
bounds2 = ((1.20,1.60),(1.60,2.40))
# Seasonality
pars4 = ['amplitude',]
bounds4 = ((0,0.40),)
pars3 = ['amplitude',]
bounds3 = ((0,0.40),)
# New hospprop
pars5 = ['f_h',]
bounds5 = ((0,1),)
pars4 = ['f_h',]
bounds4 = ((0,1),)
# Join them together
pars = pars2 + pars3 + pars4 + pars5
bounds = bounds2 + bounds3 + bounds4 + bounds5
pars = pars1 + pars2 + pars3 + pars4
bounds = bounds1 + bounds2 + bounds3 + bounds4
# Define labels
labels = ['$\Omega_{work}$', '$M$', 'k', '$K_{inf, abc}$', '$K_{inf, \\delta}$', '$A$', '$f_h$']
labels = ['$\Omega$', '$\Psi$', 'k', '$K_{inf, abc}$', '$K_{inf, \\delta}$', '$A$', '$f_h$']
# Setup objective function without priors and with negative weights
objective_function = log_posterior_probability(model,pars,bounds,data,states,log_likelihood_fnc,log_likelihood_fnc_args,labels=labels)

Expand All @@ -208,7 +203,7 @@
#out = pso.optimize(objective_function, bounds, kwargs={'simulation_kwargs':{'warmup': warmup}},
# swarmsize=multiplier_pso*processes, max_iter=n_pso, processes=processes, debug=True)[0]
# A good guess
theta = [0.45, 0.56, 3140, 1.50, 1.90, 0.225, 0.60] # 2023-02-24
theta = [0.50, 0.56, 5000, 1.40, 1.90, 0.225, 0.60]

# Nelder-mead
#step = len(bounds)*[0.05,]
Expand Down Expand Up @@ -264,21 +259,19 @@
print('\n2) Markov Chain Monte Carlo sampling\n')

# Perturbate PSO Estimate
# pars1 = ['beta',]
#pert1 = [0.01,]
# pars2 = ['eff_schools', 'eff_work', 'eff_rest', 'mentality', 'eff_home']
pert2 = [0.05, 0.05, 0.05]
pert1 = [0.05, 0.05, 0.05]
# pars3 = ['K_inf_abc','K_inf_delta']
pert3 = [0.05, 0.05]
pert2 = [0.05, 0.05]
# pars4 = ['amplitude']
pert4 = [0.05,]
pert3 = [0.05,]
# pars5 = ['f_h']
pert5 = [0.05,]
pert4 = [0.05,]
# Setup prior functions and arguments
log_prior_prob_fnc = len(bounds)*[log_prior_uniform,]
log_prior_prob_fnc_args = bounds
# Add them together and perturbate
pert = pert2 + pert3 + pert4 + pert5
pert = pert1 + pert2 + pert3 + pert4
ndim, nwalkers, pos = perturbate_theta(theta, pert, multiplier=multiplier_mcmc, bounds=log_prior_prob_fnc_args, verbose=False)

######################
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"""

__author__ = " Tijs W. Alleman, Michiel Rollier"
__copyright__ = "Copyright (c) 2022 by T.W. Alleman, BIOMATH, Ghent University. All Rights Reserved."
__copyright__ = "Copyright (c) 2023 by T.W. Alleman, BIOSPACE, Ghent University. All Rights Reserved."

############################
## Load required packages ##
Expand Down Expand Up @@ -139,7 +139,7 @@
model, BASE_samples_dict, initN = initialize_COVID19_SEIQRD_spatial_hybrid_vacc(age_stratification_size=age_stratification_size, agg=agg,
start_date=start_calibration.strftime("%Y-%m-%d"), stochastic=True,
distinguish_day_type=True)
tau = 0.75
tau = 0.50

if agg == 'arr':
# Switch to the provinicial initN
Expand Down Expand Up @@ -218,26 +218,23 @@
## Global PSO optimization ##
#############################

# transmission
#pars1 = ['beta_R', 'beta_U', 'beta_M']
#bounds1=((0.01,0.070),(0.01,0.070),(0.01,0.070))
# Social intertia
# Effectivity parameters
pars2 = ['eff_work', 'k', 'mentality', 'summer_rescaling_F', 'summer_rescaling_W', 'summer_rescaling_B']
bounds2=((0,2),(1e2,1e4),(0,1),(0,5),(0,5),(0,5))
pars1 = ['eff_work', 'mentality', 'k', 'summer_rescaling_F', 'summer_rescaling_W', 'summer_rescaling_B']
bounds1=((0,2),(0,1),(1e2,1e4),(0,5),(0,5),(0,5))
# Variants
pars3 = ['K_inf',]
bounds3 = ((1.20, 1.60),(1.50,2.20))
pars2 = ['K_inf',]
bounds2 = ((1.20, 1.60),(1.50,2.20))
# Seasonality
pars4 = ['amplitude',]
bounds4 = ((0,0.40),)
pars3 = ['amplitude',]
bounds3 = ((0,0.40),)
# Change in hospitalisation propensity
pars5 = ['f_h',]
bounds5 = ((0,1),)
pars4 = ['f_h',]
bounds4 = ((0,1),)
# Join them together
pars = pars2 + pars3 + pars4 + pars5
bounds = bounds2 + bounds3 + bounds4 + bounds5
labels = ['$\\Omega_{work}$', '$k$', '$M$', '$M_{summer, F}$', '$M_{summer, W}$', '$M_{summer, B}$', '$K_{inf, abc}$', '$K_{inf,\\delta}$', '$A$', '$f_h$']
pars = pars1 + pars2 + pars3 + pars4
bounds = bounds1 + bounds2 + bounds3 + bounds4
labels = ['$\\Omega$', '$\Psi$', '$k$', '$\Psi_{F}$', '$\Psi_{W}$', '$\Psi_{B}$', '$K_{inf, abc}$', '$K_{inf,\\delta}$', '$A$', '$f_h$']
# Setup objective function with uniform priors
objective_function = log_posterior_probability(model,pars,bounds,data,states,log_likelihood_fnc,log_likelihood_fnc_args,labels=labels, aggregation_function=aggregate_Brussels_Brabant_DataArray)

Expand All @@ -249,7 +246,7 @@
# out = pso.optimize(objective_function, bounds, kwargs={'simulation_kwargs':{'warmup': 0}},
# swarmsize=multiplier_pso*processes, maxiter=n_pso, processes=processes, debug=True)[0]
# A good guess
theta = [0.4, 4850, 0.644, 0.70, 0.45, 1, 1.3, 1.65, 0.21, 0.7]
theta = [0.5, 0.56, 5000, 0.50, 0.4, 0.7, 1.4, 1.9, 0.225, 0.6]

#######################################
## Visualize fits on multiple levels ##
Expand Down Expand Up @@ -323,18 +320,16 @@

# Perturbate PSO estimate by a certain maximal *fraction* in order to start every chain with a different initial condition
# Generally, the less certain we are of a value, the higher the perturbation fraction
# pars1 = ['beta_R', 'beta_U', 'beta_M']
#pert1=[0.05, 0.05, 0.05]
# pars2 = ['eff_work', 'eff_rest', 'k', 'mentality', 'summer_rescaling_F', 'summer_rescaling_W', 'summer_rescaling_B']
pert2=[0.20, 0.20, 0.20, 0.20, 0.20, 0.20]
# pars3 = ['K_inf_abc', 'K_inf_delta']
pert3 = [0.05, 0.05]
# pars4 = ['amplitude']
pert4 = [0.20,]
# pars5 = ['f_h']
pert5 = [0.20,]
#pars1=['eff_work', 'mentality', 'k', 'summer_rescaling_F', 'summer_rescaling_W', 'summer_rescaling_B']
pert1=[0.20, 0.20, 0.20, 0.20, 0.20, 0.20]
# pars2 = ['K_inf_abc', 'K_inf_delta']
pert2 = [0.05, 0.05]
# pars3 = ['amplitude']
pert3 = [0.20,]
# pars4 = ['f_h']
pert4 = [0.20,]
# Add them together
pert = pert2 + pert3 + pert4 + pert5
pert = pert1 + pert2 + pert3 + pert4
# Use perturbation function
ndim, nwalkers, pos = perturbate_theta(theta, pert, multiplier=multiplier_mcmc, bounds=bounds, verbose=False)

Expand Down
13 changes: 10 additions & 3 deletions notebooks/calibration/emcee_sampler_to_dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,13 @@

# Remove calibrated parameters from the settings
del settings['calibrated_parameters_shapes']
# Print values
# for k,v in samples_dict.items():
# if k != 'K_inf':
# print(f'{k}: mean: {np.mean(v)}, CI: {np.quantile(v, 0.025)}-{np.quantile(v, 0.975)}')
# else:
# for K_inf in v:
# print(f'{k}: mean: {np.mean(K_inf)}, CI: {np.quantile(K_inf, 0.025)}-{np.quantile(K_inf, 0.975)}')
# Append settings to samples dictionary
samples_dict.update(settings)
# Remove settings .json
Expand Down Expand Up @@ -142,17 +149,17 @@


CORNER_KWARGS = dict(
smooth=0.90,
smooth=1,
label_kwargs=dict(fontsize=24),
title_kwargs=dict(fontsize=24),
title_kwargs=dict(fontsize=14),
quantiles=[0.05, 0.95],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),
plot_density=True,
plot_datapoints=False,
fill_contours=True,
show_titles=True,
max_n_ticks=3,
title_fmt=".2E",
title_fmt=".2F",
range=range_lst
)

Expand Down

Large diffs are not rendered by default.

Loading

0 comments on commit dc22e9d

Please sign in to comment.