|
97 | 97 | }
|
98 | 98 | ],
|
99 | 99 | "source": [
|
100 |
| - "#For reproducibility\n", |
| 100 | + "# For reproducibility\n", |
101 | 101 | "np.random.seed(19920908)\n",
|
102 | 102 | "\n",
|
103 |
| - "def freefall(y,t,p):\n", |
| 103 | + "def freefall(y, t, p):\n", |
104 | 104 | " \n",
|
105 | 105 | " return 2.0*p[1] - p[0]*y[0]\n",
|
106 | 106 | "\n",
|
107 |
| - "#Times for observation\n", |
| 107 | + "# Times for observation\n", |
108 | 108 | "times = np.arange(0,10,0.5)\n",
|
109 | 109 | "gamma,g, y0, sigma = 0.4, 9.8, -2, 2\n",
|
110 |
| - "y = odeint(freefall, t = times, y0 = y0, args = tuple([[gamma,g]]))\n", |
| 110 | + "y = odeint(freefall, t=times, y0=y, args=tuple([[gamma,g]]))\n", |
111 | 111 | "yobs = np.random.normal(y,2)\n",
|
112 | 112 | "\n",
|
113 |
| - "fig, ax = plt.subplots(dpi = 120)\n", |
114 |
| - "plt.plot(times,yobs, label = 'observed speed', linestyle = 'dashed', marker = 'o', color='red')\n", |
115 |
| - "plt.plot(times,y, label = 'True speed', color ='k', alpha = 0.5)\n", |
| 113 | + "fig, ax = plt.subplots(dpi=120)\n", |
| 114 | + "plt.plot(times,yobs, label='observed speed', linestyle='dashed', marker='o', color='red')\n", |
| 115 | + "plt.plot(times,y, label='True speed', color='k', alpha=0.5)\n", |
116 | 116 | "plt.legend()\n",
|
117 | 117 | "plt.xlabel('Time (Seconds)')\n",
|
118 | 118 | "plt.ylabel(r'$y(t)$');\n",
|
|
159 | 159 | }
|
160 | 160 | ],
|
161 | 161 | "source": [
|
162 |
| - "ode_model = DifferentialEquation(func = freefall,\n", |
163 |
| - " t0 = 0,\n", |
164 |
| - " times = times,\n", |
| 162 | + "ode_model = DifferentialEquation(func=freefall,\n", |
| 163 | + " t0=0,\n", |
| 164 | + " times=times,\n", |
165 | 165 | " n_odeparams=2, \n",
|
166 |
| - " n_states = 1)\n", |
| 166 | + " n_states=1)\n", |
167 | 167 | "\n",
|
168 | 168 | "with pm.Model() as model:\n",
|
169 | 169 | " \n",
|
170 |
| - " sigma= pm.HalfCauchy('sigma',1)\n", |
| 170 | + " sigma = pm.HalfCauchy('sigma',1)\n", |
171 | 171 | " \n",
|
172 | 172 | " gamma = pm.Lognormal('gamma',0,1)\n",
|
173 | 173 | " \n",
|
174 |
| - " #If we know one of the parameter values, we can simply pass the value.\n", |
175 |
| - " #No need to specify a prior.\n", |
176 |
| - " ode_solution = ode_model(odeparams = [gamma, 9.8], y0 = [0]).reshape(yobs.shape)\n", |
| 174 | + " # If we know one of the parameter values, we can simply pass the value.\n", |
| 175 | + " # No need to specify a prior.\n", |
| 176 | + " ode_solution = ode_model(odeparams=[gamma, 9.8], y0=[0]).reshape(yobs.shape)\n", |
177 | 177 | " \n",
|
178 |
| - " Y = pm.Normal('Y', mu = ode_solution, sd = sigma, observed = yobs)\n", |
| 178 | + " Y = pm.Normal('Y', mu=ode_solution, sd=sigma, observed=yobs)\n", |
179 | 179 | " \n",
|
180 |
| - " trace = pm.sample(2000,tune = 1000)\n", |
| 180 | + " trace = pm.sample(2000,tune=1000)\n", |
181 | 181 | " prior = pm.sample_prior_predictive()\n",
|
182 | 182 | " posterior_predictive = pm.sample_posterior_predictive(trace)\n",
|
183 | 183 | " \n",
|
184 |
| - " data = az.from_pymc3(trace = trace,\n", |
185 |
| - " prior = prior,\n", |
186 |
| - " posterior_predictive = posterior_predictive)" |
| 184 | + " data = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=posterior_predictive)" |
187 | 185 | ]
|
188 | 186 | },
|
189 | 187 | {
|
|
238 | 236 | "source": [
|
239 | 237 | "with pm.Model() as model2:\n",
|
240 | 238 | " \n",
|
241 |
| - " sigma= pm.HalfCauchy('sigma',1)\n", |
| 239 | + " sigma = pm.HalfCauchy('sigma',1)\n", |
242 | 240 | " gamma = pm.Lognormal('gamma',0,1)\n",
|
243 |
| - " #A prior on the acceleration due to gravity\n", |
| 241 | + " # A prior on the acceleration due to gravity\n", |
244 | 242 | " g = pm.Lognormal('g',pm.math.log(10),2)\n",
|
245 | 243 | " \n",
|
246 |
| - " #Notice now I have passed g to the odeparams argument\n", |
247 |
| - " ode_solution = ode_model(odeparams = [gamma, g], y0 = [0]).reshape(yobs.shape)\n", |
| 244 | + " # Notice now I have passed g to the odeparams argument\n", |
| 245 | + " ode_solution = ode_model(odeparams=[gamma, g], y0=[0]).reshape(yobs.shape)\n", |
248 | 246 | " \n",
|
249 |
| - " Y = pm.Normal('Y', mu = ode_solution, sd = sigma, observed = yobs)\n", |
| 247 | + " Y = pm.Normal('Y', mu=ode_solution, sd=sigma, observed=yobs)\n", |
250 | 248 | "\n",
|
251 | 249 | " \n",
|
252 |
| - " trace = pm.sample(2000,tune = 1000, target_accept = 0.9)\n", |
| 250 | + " trace = pm.sample(2000, tune=1000, target_accept=0.9)\n", |
253 | 251 | " prior = pm.sample_prior_predictive()\n",
|
254 | 252 | " posterior_predictive = pm.sample_posterior_predictive(trace)\n",
|
255 | 253 | " \n",
|
256 |
| - " data = az.from_pymc3(trace = trace,\n", |
257 |
| - " prior = prior,\n", |
258 |
| - " posterior_predictive = posterior_predictive)" |
| 254 | + " data = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=posterior_predictive)" |
259 | 255 | ]
|
260 | 256 | },
|
261 | 257 | {
|
|
316 | 312 | "source": [
|
317 | 313 | "with pm.Model() as model3:\n",
|
318 | 314 | " \n",
|
319 |
| - " sigma= pm.HalfCauchy('sigma',1)\n", |
| 315 | + " sigma = pm.HalfCauchy('sigma',1)\n", |
320 | 316 | " gamma = pm.Lognormal('gamma',0,1)\n",
|
321 | 317 | " g = pm.Lognormal('g',pm.math.log(10),2)\n",
|
322 |
| - " #Initial condition prior. We think it is at rest, but will allow for perturbations in initial velocity.\n", |
| 318 | + " # Initial condition prior. We think it is at rest, but will allow for perturbations in initial velocity.\n", |
323 | 319 | " y0 = pm.Normal('y0', 0, 2)\n",
|
324 | 320 | " \n",
|
325 |
| - " ode_solution = ode_model(odeparams = [gamma, g], y0 = [y0]).reshape(yobs.shape)\n", |
| 321 | + " ode_solution = ode_model(odeparams=[gamma, g], y0=[y0]).reshape(yobs.shape)\n", |
326 | 322 | " \n",
|
327 |
| - " Y = pm.Normal('Y', mu = ode_solution, sd = sigma, observed = yobs)\n", |
| 323 | + " Y = pm.Normal('Y', mu=ode_solution, sd=sigma, observed=yobs)\n", |
328 | 324 | "\n",
|
329 | 325 | " \n",
|
330 |
| - " trace = pm.sample(2000,tune = 1000, target_accept = 0.9)\n", |
| 326 | + " trace = pm.sample(2000,tune=1000, target_accept=0.9)\n", |
331 | 327 | " prior = pm.sample_prior_predictive()\n",
|
332 | 328 | " posterior_predictive = pm.sample_posterior_predictive(trace)\n",
|
333 | 329 | " \n",
|
334 |
| - " data = az.from_pymc3(trace = trace,\n", |
335 |
| - " prior = prior,\n", |
336 |
| - " posterior_predictive = posterior_predictive)" |
| 330 | + " data = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=posterior_predictive)" |
337 | 331 | ]
|
338 | 332 | },
|
339 | 333 | {
|
|
355 | 349 | }
|
356 | 350 | ],
|
357 | 351 | "source": [
|
358 |
| - "az.plot_posterior(data, figsize = (13,3));" |
| 352 | + "az.plot_posterior(data, figsize=(13,3));" |
359 | 353 | ]
|
360 | 354 | },
|
361 | 355 | {
|
|
432 | 426 | }
|
433 | 427 | ],
|
434 | 428 | "source": [
|
435 |
| - "def SIR(y,t,p):\n", |
| 429 | + "def SIR(y, t, p):\n", |
436 | 430 | " \n",
|
437 | 431 | " ds = -p[0]*y[0]*y[1]\n",
|
438 | 432 | " di = p[0]*y[0]*y[1] - p[1]*y[1]\n",
|
439 | 433 | " \n",
|
440 |
| - " return [ds,di]\n", |
| 434 | + " return [ds, di]\n", |
441 | 435 | "\n",
|
442 | 436 | "times = np.arange(0,5,0.25)\n",
|
443 | 437 | "\n",
|
444 | 438 | "beta,gamma = 4,1.0\n",
|
445 |
| - "#Create true curves\n", |
446 |
| - "y = odeint(SIR, t = times, y0 = [0.99, 0.01], args = tuple([[beta,gamma]]), rtol=1e-8 )\n", |
447 |
| - "#Observational model. Lognormal likelihood isn't appropriate, but we'll do it anyway\n", |
448 |
| - "yobs = np.random.lognormal(mean = np.log(y[1::]), sigma = [0.2, 0.3])\n", |
| 439 | + "# Create true curves\n", |
| 440 | + "y = odeint(SIR, t=times, y0=[0.99, 0.01], args=tuple([[beta,gamma]]), rtol=1e-8 )\n", |
| 441 | + "# Observational model. Lognormal likelihood isn't appropriate, but we'll do it anyway\n", |
| 442 | + "yobs = np.random.lognormal(mean=np.log(y[1::]), sigma=[0.2, 0.3])\n", |
449 | 443 | "\n",
|
450 | 444 | "\n",
|
451 |
| - "plt.plot(times[1::],yobs, marker = 'o', linestyle = 'none')\n", |
452 |
| - "plt.plot(times, y[:,0], color = 'C0', alpha = 0.5, label = f'$S(t)$')\n", |
453 |
| - "plt.plot(times, y[:,1], color = 'C1', alpha = 0.5, label = f'$I(t)$')\n", |
| 445 | + "plt.plot(times[1::],yobs, marker='o', linestyle='none')\n", |
| 446 | + "plt.plot(times, y[:,0], color='C0', alpha=0.5, label=f'$S(t)$')\n", |
| 447 | + "plt.plot(times, y[:,1], color ='C1', alpha=0.5, label=f'$I(t)$')\n", |
454 | 448 | "plt.legend()"
|
455 | 449 | ]
|
456 | 450 | },
|
|
474 | 468 | ],
|
475 | 469 | "source": [
|
476 | 470 | "\n",
|
477 |
| - "sir_model = DifferentialEquation(func = SIR,\n", |
478 |
| - " times = np.arange(0.25, 5, 0.25), \n", |
479 |
| - " t0 = 0,\n", |
480 |
| - " n_states = 2,\n", |
481 |
| - " n_odeparams=2)\n", |
| 471 | + "sir_model = DifferentialEquation(func=SIR, \n", |
| 472 | + " times=np.arange(0.25, 5, 0.25), \n", |
| 473 | + " t0=0,\n", |
| 474 | + " n_states=2,\n", |
| 475 | + " n_odeparams=2)\n", |
482 | 476 | "\n",
|
483 | 477 | "with pm.Model() as model4:\n",
|
484 | 478 | " \n",
|
485 |
| - " sigma = pm.HalfCauchy('sigma',1, shape = 2)\n", |
| 479 | + " sigma = pm.HalfCauchy('sigma',1, shape=2)\n", |
486 | 480 | " \n",
|
487 |
| - " #R0 is bounded below by 1 because we see an epidemic has occured\n", |
488 |
| - " R0 = pm.Bound(pm.Normal, lower = 1)('R0', 2,3)\n", |
| 481 | + " # R0 is bounded below by 1 because we see an epidemic has occured\n", |
| 482 | + " R0 = pm.Bound(pm.Normal, lower=1)('R0', 2,3)\n", |
489 | 483 | " lam = pm.Lognormal('lambda',pm.math.log(2),2)\n",
|
490 | 484 | " beta = pm.Deterministic('beta', lam*R0)\n",
|
491 | 485 | "\n",
|
492 | 486 | " \n",
|
493 |
| - " sir_curves = sir_model(odeparams = [beta, lam], y0 = [0.99, 0.01]).reshape(yobs.shape)\n", |
| 487 | + " sir_curves = sir_model(odeparams=[beta, lam], y0=[0.99, 0.01]).reshape(yobs.shape)\n", |
494 | 488 | " \n",
|
495 |
| - " Y = pm.Lognormal('Y', mu = pm.math.log(sir_curves), sd = sigma, observed = yobs)\n", |
496 |
| - " trace = pm.sample(2000,tune = 1000, target_accept = 0.9)\n", |
| 489 | + " Y = pm.Lognormal('Y', mu=pm.math.log(sir_curves), sd=sigma, observed=yobs)\n", |
| 490 | + " trace = pm.sample(2000,tune=1000, target_accept=0.9)\n", |
497 | 491 | " prior = pm.sample_prior_predictive()\n",
|
498 | 492 | " posterior_predictive = pm.sample_posterior_predictive(trace)\n",
|
| 493 | + " \n", |
| 494 | + " data = az.from_pymc3(trace=trace, prior = prior, posterior_predictive = posterior_predictive)\n", |
499 | 495 | " "
|
500 | 496 | ]
|
501 | 497 | },
|
502 |
| - { |
503 |
| - "cell_type": "code", |
504 |
| - "execution_count": 11, |
505 |
| - "metadata": {}, |
506 |
| - "outputs": [], |
507 |
| - "source": [ |
508 |
| - "data = az.from_pymc3(trace = trace,\n", |
509 |
| - " prior = prior,\n", |
510 |
| - " posterior_predictive = posterior_predictive)" |
511 |
| - ] |
512 |
| - }, |
513 | 498 | {
|
514 | 499 | "cell_type": "code",
|
515 | 500 | "execution_count": 12,
|
|
529 | 514 | }
|
530 | 515 | ],
|
531 | 516 | "source": [
|
532 |
| - "az.plot_posterior(data,round_to = 2, credible_interval=0.95);" |
| 517 | + "az.plot_posterior(data,round_to=2, credible_interval=0.95);" |
533 | 518 | ]
|
534 | 519 | },
|
535 | 520 | {
|
|
0 commit comments