Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
74 commits
Select commit Hold shift + click to select a range
cfe6911
skyler update
Dec 1, 2024
6879ba0
skyler original file
Dec 1, 2024
c485438
Reproduce the oscillatory derivative
Dec 4, 2024
86d0164
Reproduce the oscillatory derivative with fast run time
Dec 4, 2024
3f23767
there is some small oscillation in the simulated true trajectory. The…
Dec 4, 2024
fda8d02
The m matrix at the edge time points seem to be the problem. See atta…
Dec 4, 2024
86ef121
The problem has been identified to be in the M-matrix.
Dec 4, 2024
6706de7
add back jitter
Dec 4, 2024
5ad676b
add flag for use_fourier_prior
Dec 5, 2024
f4c986d
allow phi_exo
Dec 5, 2024
0ea423f
allow manual phi specification
Dec 5, 2024
005a8dc
Qualitatively, the TensorFlow probability result does not agree with …
Dec 5, 2024
082aea9
add trace plot
Dec 5, 2024
11920f5
add skyler data generation
Dec 5, 2024
b4ef836
seir simulation with log error
Dec 5, 2024
057adfe
log scale verified to be correct
Dec 5, 2024
78f1fba
use 1.0 as N
Dec 6, 2024
012b3c1
log error on SIR 3-component doesn't work
Dec 6, 2024
803069f
refactor name. not working
Dec 6, 2024
2616790
log using eir seems working
Dec 6, 2024
002c7bc
low noise case works
Dec 6, 2024
d9bc949
15% multiplicative noise for the SEIR model 3-component fully observe…
Dec 6, 2024
80e1032
15% multiplicative noise for the SEIR model 3-component fully observe…
Dec 6, 2024
7d8c947
prediction is working
Dec 6, 2024
1f53ea7
missing component runs but not performing well
Dec 6, 2024
a430d9d
results are different every time, suggesting MCMC not converged yet
Dec 6, 2024
28749e1
add MLE that runs
Dec 7, 2024
84141c0
add MLE to see identifiability problem
Dec 7, 2024
48fce8f
print out loss so that I can compare different init value
Dec 7, 2024
8ab4ce2
hessian approximation is not correct
Dec 7, 2024
d5a7ac0
add mcmc
Dec 7, 2024
4a0eb5a
add mcmc, it is working
Dec 7, 2024
e832f08
mcmc and UQ using MLE likelihood
Dec 7, 2024
33188b2
visualization labels
Dec 7, 2024
8f165d3
UQ is under-estimated
Dec 7, 2024
fb034ad
simplify plotting. more samples
Dec 7, 2024
71f9218
R0 reproduction number
Dec 7, 2024
c7f0d40
simulation for repetition
Dec 7, 2024
eeee4d9
save results with command line argument
Dec 7, 2024
8d33fe2
file update for repetition
Dec 8, 2024
ff1bd79
draft evaluation script
Dec 9, 2024
f0aa4fb
evaluation script corrected
Dec 9, 2024
1d1198e
add evaluation and generate tables and figures
Dec 9, 2024
15a11db
add visualizations
Dec 9, 2024
789b712
add visualizations
Dec 9, 2024
d45ff3a
add visualizations
Dec 9, 2024
e4c3e8f
ready to generate figures and tables
Dec 9, 2024
a00ce9d
add coverage
Dec 9, 2024
bff7607
add R0
Dec 9, 2024
6943177
6k early termination works the best
Dec 11, 2024
b34422d
try lmbda = 1000.0; remove sir file
Dec 11, 2024
6b5de7d
output pictures
Dec 12, 2024
55b80c0
visualization update
Dec 15, 2024
9282af0
R0 = beta / (gamma)
Dec 15, 2024
951bed6
add pickle for plot data
Dec 17, 2024
60291b2
overall chart that is working
Dec 18, 2024
23821c1
two types of visualizations
Dec 18, 2024
b7a707a
rmse on original scale
Dec 19, 2024
4f99a7b
add peak timing and intensity in MAGI
Dec 19, 2024
a322c6a
bug fix
Dec 19, 2024
0501e12
add name
Dec 19, 2024
b2249ba
bug fix
Dec 19, 2024
3b22c30
results forecast
Dec 19, 2024
1a2c3ea
bug fix
Dec 19, 2024
03afd9e
bug fix
Dec 19, 2024
1314a22
add RMSPE
Dec 19, 2024
1d5c7c8
final visualizations
Dec 23, 2024
e65e26f
new data simulation under covid-19 setting
Dec 24, 2024
cf8edca
phi2 init optim change
Dec 24, 2024
48a7a78
magi to use new data
Dec 24, 2024
c3297d9
output path
Dec 24, 2024
dee7d44
bug in file path for SEIR MAGI
Dec 24, 2024
94b8e28
move data
Dec 27, 2024
3f8c75d
add prediction rmse
Dec 27, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
289 changes: 289 additions & 0 deletions 110424_slide11_4-components.ipynb

Large diffs are not rendered by default.

525 changes: 525 additions & 0 deletions 111824_slide3_first-two-forecasting-steps.ipynb

Large diffs are not rendered by default.

288 changes: 288 additions & 0 deletions 111824_slide6_sinusoidal-in-sample.ipynb

Large diffs are not rendered by default.

289 changes: 289 additions & 0 deletions 112924_missing-component_good-cubic-init.ipynb

Large diffs are not rendered by default.

329 changes: 329 additions & 0 deletions 112924_slide15_oscillatory-gp-derivatives.ipynb

Large diffs are not rendered by default.

191 changes: 191 additions & 0 deletions Data Generation.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "dea6bfd2",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"from IPython.display import clear_output\n",
"from scipy.integrate import solve_ivp"
]
},
{
"cell_type": "markdown",
"id": "12e528e1",
"metadata": {},
"source": [
"# SIR Model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "889995b4",
"metadata": {},
"outputs": [],
"source": [
"'''\n",
"Showcase Samples:\n",
"1. (S_0, I_0, R_0) = (9900, 100, 0) + (b, g) = (1.5, 0.75).\n",
"2. (S_0, I_0, R_0) = (9900, 100, 0) + (b, g) = (3.5, 1.75).\n",
"\n",
"Standard Settings:\n",
"1. 20x noisy observations per unit time with discretization 1.\n",
"2. {0.5, 2, 8} interval lengths - we'll generate datasets to 20.00.\n",
"3. {0.05, 0.15} alpha noise level multipliers.\n",
"4. 10x randomized seeds.\n",
"'''\n",
"# generate separate datasets for each possible combination, using maximum precision\n",
"for seed in range(10):\n",
" for alpha in [0.05, 0.15]:\n",
" \n",
" # two possible combinations of params (beta, gamma)\n",
" for params in [(1.5, 0.75), (3.5, 1.75)]:\n",
" \n",
" # set a seed for reproducibility\n",
" np.random.seed(seed)\n",
"\n",
" # what are our parameters + population size?\n",
" b, g, N = params[0], params[1], 10000\n",
"\n",
" # encode ODEs for solve_ivp data-generation processes\n",
" def SIR(t, y):\n",
"\n",
" # unpack y\n",
" S, I, R = tuple(y)\n",
"\n",
" # dS/dt = -b*S*I/N; dI/dt = b*I*S/N - g*I; dR/dt = g*I\n",
" dSdt = -b*S*I/N\n",
" dIdt = +b*S*I/N - g*I\n",
" dRdt = +g*I\n",
"\n",
" # return only the derivatives\n",
" return np.array([dSdt, dIdt, dRdt])\n",
" \n",
" # generate our data\n",
" t_start, t_end = 0.0, 20.0\n",
" t_steps = np.linspace(start=t_start, stop=t_end, num=20001)\n",
" SIR_init = np.array([9900, 100, 0])\n",
" X = solve_ivp(fun=SIR, t_span=(t_start, t_end), y0=SIR_init, \n",
" t_eval=t_steps, atol=1e-10, rtol=1e-10).y.T\n",
"\n",
" # compute appropriate noise levels based on alpha choice\n",
" sigmas = alpha * (X.max(axis=0) - X.min(axis=0))\n",
" \n",
" # create a copy of X + add the noise\n",
" X_noised = X.copy()\n",
" X_noised += np.random.normal(loc=0.0, scale=sigmas, size=X.shape)\n",
" \n",
" # save time, X_true, X_noised as a .csv for our gold-standard dataset\n",
" data = np.hstack([t_steps.reshape(-1, 1), X_noised, X])\n",
" cols = [\"t\", \"S_obs\", \"I_obs\", \"R_obs\", \"S_true\", \"I_true\", \"R_true\"]\n",
" \n",
" # save this dataset to our gold-standard datasets list\n",
" df = pd.DataFrame(data=data, columns=cols)\n",
" df.to_csv(f\"data/SIR_beta={b}_gamma={g}_alpha={alpha}_seed={seed}.csv\", index=False)"
]
},
{
"cell_type": "markdown",
"id": "ecf5e40f",
"metadata": {},
"source": [
"# Lorenz Model"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "9de8613c",
"metadata": {},
"outputs": [],
"source": [
"'''\n",
"Showcase Samples:\n",
"1. (X_0, Y_0, Z_0) = (5, 5, 5) + (beta, rho, sigma) = (8/3, 28.0, 10.0).\n",
"2. (X_0, Y_0, Z_0) = (5, 5, 5) + (beta, rho, sigma) = (8/3, 6.0, 10.0).\n",
"\n",
"Standard Settings:\n",
"1. 20x noisy observations per unit time with discretization 1.\n",
"2. {0.5, 2, 8} interval lengths - we'll generate datasets to 20.00.\n",
"3. {0.05, 0.15} alpha noise level multipliers.\n",
"4. 10x randomized seeds.\n",
"'''\n",
"# generate separate datasets for each possible combination, using maximum precision\n",
"for seed in range(10):\n",
" for alpha in [0.05, 0.15]:\n",
" \n",
" # two possible combinations of params (beta, gamma)\n",
" for params in [(8/3, 28.0, 10.0), (8/3, 6.0, 10.0)]:\n",
" \n",
" # set a seed for reproducibility\n",
" np.random.seed(seed)\n",
"\n",
" # what are our parameters?\n",
" beta, rho, sigma = params\n",
"\n",
" # encode ODEs for solve_ivp data-generation processes\n",
" def lorenz(t, y):\n",
"\n",
" # unpack y\n",
" X, Y, Z = tuple(y)\n",
"\n",
" # dXdt = sigma * (Y-X); dYdt = x(rho - z) - y; dZdt = xy - beta*z\n",
" dXdt = sigma * (Y-X)\n",
" dYdt = (X * (rho - Z)) - Y\n",
" dZdt = (X*Y) - (beta*Z)\n",
"\n",
" # return only the derivatives\n",
" return np.array([dXdt, dYdt, dZdt])\n",
" \n",
" # generate our data\n",
" t_start, t_end = 0.0, 20.0\n",
" t_steps = np.linspace(start=t_start, stop=t_end, num=20001)\n",
" y_init = np.array([5.0, 5.0, 5.0])\n",
" X = solve_ivp(fun=lorenz, t_span=(t_start, t_end), y0=y_init, \n",
" t_eval=t_steps, atol=1e-10, rtol=1e-10).y.T\n",
" \n",
" # compute appropriate noise levels based on alpha choice\n",
" sigmas = alpha * (X.max(axis=0) - X.min(axis=0))\n",
" \n",
" # create a copy of X + add the noise\n",
" X_noised = X.copy()\n",
" X_noised += np.random.normal(loc=0.0, scale=sigmas, size=X.shape)\n",
" \n",
" # save time, X_true, X_noised as a .csv for our gold-standard dataset\n",
" data = np.hstack([t_steps.reshape(-1, 1), X_noised, X])\n",
" cols = [\"t\", \"X_obs\", \"Y_obs\", \"Z_obs\", \"X_true\", \"Y_true\", \"Z_true\"]\n",
" \n",
" # save this dataset to our gold-standard datasets list\n",
" df = pd.DataFrame(data=data, columns=cols)\n",
" df.to_csv(f\"data/LORENZ_rho={rho}_alpha={alpha}_seed={seed}.csv\", index=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.10 (Afterburner)",
"language": "python",
"name": "afterburner"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading