-
Notifications
You must be signed in to change notification settings - Fork 0
/
section4_turing.jl
3654 lines (2954 loc) · 133 KB
/
section4_turing.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
### A Pluto.jl notebook ###
# v0.19.46
using Markdown
using InteractiveUtils
# ╔═╡ ef539e62-95ae-42c2-9ce5-3fd11e09a38e
using Turing
# ╔═╡ 68018998-17d7-11ed-2d5e-2b2d5f8b0301
begin
using PlutoUI
using StatsPlots
using PlutoTeachingTools
using Plots; default(fontfamily="Computer Modern", framestyle=:box) # LaTex-style
using PlutoTeachingTools
# using Turing
using Random
using LaTeXStrings
using HypertextLiteral
using Logging;
end;
# ╔═╡ ec23cd37-94df-489f-b892-24bffd755b50
Logging.disable_logging(Logging.Warn);
# ╔═╡ a89d9509-7564-4b6e-bba6-0de1f3f7d64e
TableOfContents()
# ╔═╡ b879d404-343c-4de4-91e1-5dcf6031ad3f
md"[**↩ Home**](https://lf28.github.io/BayesianModelling/)
[**↪ Next Chapter**](./section5_regressions.html)
"
# ╔═╡ 82b6f501-bada-4bf3-a7a7-c325bc48e754
md"""
# Bayesian inference with `Turing.jl`
"""
# ╔═╡ 4b2fc478-b825-4758-b384-d7b7186b8e21
md"""
In the previous chapters, we have introduced some basic concepts of Bayesian inference and MCMC samplers. In a nutshell, Bayesian inference requires a full generative model which includes both prior assumptions for the unknown and the likelihood function for the observed. After the modelling step, efficient algorithms, such as MCMC, are used to compute the posterior distribution.
So far, we have implemented all of the Bayesian models and their inference algorithms manually. Writing programs from scratch requires prior programming experience and effort. Therefore, starting everything from scratch is not practical for general applied practitioners.
Fortunately, with the help of probabilistic programming languages (PPL), such as `Turing.jl` or `Stan`, one can do Bayesian inferences easily without worrying too much about the technical details. A PPL is a programming language to specify and infer general-purpose probabilistic models. They provide a high-level and intuitive-to-use way of specifying Bayesian models. Moreover, a PPL unifies the modelling and computation blocks. Bayesian computations, such as MCMC, can be (almost) carried out automatically once a model is specified.
In this chapter, we are going to learn how to use `Turing.jl`, a popular PPL implemented in Julia, to run Bayesian inferences. `Turing.jl` is very easy to use. The user writes Turing models in the same way as they would write on their paper, which makes it intuitive to use. Together with other helper packages, such as `MCMCChains.jl` (for MCMC chain summary) and `Plots.jl` (for visualisation), the packages provide an ecosystem to do full Bayesian inference.
"""
# ╔═╡ 67d93f23-899b-4ddf-83f1-5d320c23f22f
md"""
## Install Julia and `Turing.jl`
Install Julia (1.8 over greater)
* If you have not yet done so, download and install Julia by following the instructions at [https://julialang.org/downloads/](https://julialang.org/downloads/).
* Choose either the latest stable version or a long-term support version
Add relevant packages by using Julia's package manager
* Step 1: Open the Julia Command-Line
* either by double-clicking the Julia executable or
* running Julia from the command line
And you should see a command line interface like this
```
$ julia
_
_ _ _(_)_ | Documentation: https://docs.julialang.org
(_) | (_) (_) |
_ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help.
| | | | | | |/ _` | |
| | |_| | | | (_| | | Version 1.8.0 (2022-08-17)
_/ |\__'_|_|_|\__'_| | Official https://julialang.org/ release
|__/ |
julia>
```
* Step 2: Install `Turing.jl` (and other packages if needed) by using Julia's package manager
```juliarepl
julia> Using Pkg
julia> Pkg.add("Turing")
```
* Step 3: you can now add `Turing` and start using it with the command line interface (not recommended; check Pluto's instructions below for details)
```juliarepl
julia> add Turing
```
"""
# ╔═╡ b2980969-462c-44fe-bb66-fb5d968fc5f6
md"""
### Run `Turing` models with Pluto
It is recommended that you run `Turing` models in your browser rather than Julia's REPL command line interface.
* Step 1: Install `Pluto.jl`;
```juliarepl
julia> Using Pkg
julia> Pkg.add("Pluto")
```
* Step 2: Use Pluto
```juliarepl
julia> using Pluto
julia> Pluto.run()
```
* Step 3: To get started, you can either run this notebook (recommended) or start your own new notebook
* if you start a new notebook, you need to add `Turing` and `StatsPlots` packages first in a cell of your notebook (**not** in REPL but in the Pluto notebook)
* then check the notes below for further details.
```
begin
using Pluto, StatsPlots
end
```
"""
# ╔═╡ aaeb3a9d-d655-427d-a397-62cab819e346
md"""
## `Turing.jl` basics
"""
# ╔═╡ d4a8b997-5dff-49b1-aa09-017051405790
md"""
### Model specification
In a nutshell, a model in Turing is implemented as a Julia function wrapped with a `@model` macro. Intuitively, the macro rewrites the wrapped Julia function to a probabilistic model such that downstream model operations can be carried out.
A general Turing model is listed below:
```julia
@model function my_model(data)
...
# random variable `θ` with prior distribution `prior_dist`
θ ~ prior_dist
...
# optional deterministic transformations
ϕ = fun(θ)
...
# observation `data` with likelihood distribution `likelihood_dist`
data ~ likelihood_dist
...
end
```
Two important constructs of `Turing.jl` are
* macro "`@model`": a macro that rewrites a Julia function to a probabilistic program that can be inferred later on; all model assumptions are encapsulated within the macro
* operator "`~`": the tilde operator is used to specify a variable that follows some probability distribution: e.g.
> ```θ ~ Beta(1,1); data ~ Bernoulli(θ)```
* where we assume `data` is a random draw from a Bernoulli with bias `θ`; and the bias `θ` follows a flat Beta prior
* check [`Distributions.jl`](https://juliastats.org/Distributions.jl/stable/starting/) for available probability distributions and their interfaces.
A Turing model can be specified with arbitrary Julia code. For example,
* `for, while` loop: can be used to specify some repeated distribution assumptions,
* convenient for `i.i.d` assumption for the observed the data
* `=` assignment: can be used to assign a deterministic value to a variable; e.g.
> ```μ = 0; data ~ Normal(μ, 1)```
* a Gaussian distribution with a fixed mean of zero
* note `=` is different from `~` operator; `~` is used to specify a distribution assumption for a random variable; `=` is a deterministic non-random assignment
"""
# ╔═╡ 0e5f3c06-8e3e-4b66-a42e-78a1db358987
md"""
### Inference
Recall that MCMC provides us with a general and scalable method to draw samples from the posterior distribution which usually cannot be computed exactly:
$$\theta^{(r)} \sim p(\theta|\mathcal D)$$
Instead of writing one's own MCMC algorithms, `Turing.jl` provides an easy-to-use interface: `sample()`:
```julia
chain = sample(model, mcmc_algorithm, mc;
discard_initial = 100,
thinning=10,
chain_type=Chains
)
```
The first three compulsory arguments are
* `model`: a Turing model should be the first argument, e.g. `cf_model`
* `mcmc_algorithm`: can be one of the available MCMC samplers, such as a Hamiltonian sampler with a certain step length and size: `HMC(0.05, 10)`; or HMC's more user-friendly extension No-U-turn sampler: `NUTS()` (which automatically choose the step size and length)
* `mc`: how many MCMC samples to draw, e.g. `3000`
The optional arguments (by Julia's convention optional arguments are specified after `;`) are
* `discard_initial`: discard the specified number of samples as burn-in
* `thinning`: thin the chain to reduce temporal correlations between MCMC iterations
* `chain_type`: return the sample as an object of `Chains` of `MCMCChains.jl`;
"""
# ╔═╡ 45a74e6a-ae59-49bd-8b41-ee1c73153f15
md"""
**Run multiple chains in parallel.** To make use of modern computers' parallel processing power, we usually simulate multiple chains in parallel. One can also simulate multiple MCMC chains in parallel by calling
```julia
sample(model, mcmc_algorithm, parallel_type, n, n_chains)
```
where
* `parallel_type` can be e.g. `MCMCThreads()`
* `n_chains` is the number of parallel chains to simulate at the same time
"""
# ╔═╡ 8fbef2d5-9591-46fc-a35e-f44a0d492748
md"""
### [Auto-differentiation](https://turing.ml/dev/docs/using-turing/autodiff) backend
"""
# ╔═╡ c8863691-ffdc-4629-b0b6-acd61d6d905f
md"""
Some MCMC algorithms such as Hamiltonian Monte Carlo (HMC) or No-U-Turn sampler (NUTS) require the gradient of the log probability density functions.
As shown in chapter 3, packages such as `ForwardDiff.jl` can automatically differentiate a Julia program. With the auto-differentiation (AD) packages serving as backends, `Turing` can compute the gradient automatically. Different backends and algorithms supported in `Turing.jl` include:
- [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl): forward-mode AD, the default backend
- [Tracker.jl](https://github.com/FluxML/Tracker.jl): reverse-mode AD
- [ReverseDiff.jl](https://github.com/JuliaDiff/ReverseDiff.jl): reverse-mode AD, has to be loaded explicitly (optional cache for some models)
- [Zygote.jl](https://github.com/FluxML/Zygote.jl): reverse-mode AD, has to be loaded explicitly
"""
# ╔═╡ fefbf8e9-8920-4555-87bc-daf2e2a231f1
md"""
By default, `Turing` uses `ForwardDiff`. One can change the AD backend by `setadbackend(:backend)`, e.g. `setadbackend(:forwarddiff)`, `setadbackend(:tracker)`, `setadbackend(:reversediff)`, or `setadbackend(:zygote)`.
As a rule of thumb, use forward-mode AD for models with few parameters (say less than 50) and reverse-mode AD for models with many parameters. If you use reverse-mode AD, in particular with Tracker or Zygote, you should avoid
loops and use vectorized operations.
"""
# ╔═╡ f56ea0d7-c4e8-469e-9863-adc2d9c917be
md"""
## Coin-flipping revisited
Recall the coin-flipping problem in chapter one.
> A coin 🪙 is tossed 10 times. And the tossing results are recorded:
> $$\mathcal D=\{1, 1, 1, 0, 1, 0, 1, 1, 1, 0\}$$;
> i.e. seven out of the ten tosses are heads (ones). Is the coin **fair**?
"""
# ╔═╡ a86ed206-aed8-40b4-89aa-1abf0d16fbb2
md"""
**Bayesian conjugate model**
The Bayesian model starts with a generation process for the unknown (or prior) and then the model for the observed data (the likelihood).
1. prior for the bias ``\theta``:
```math
\theta \sim \texttt{Beta}(a_0, b_0);
```
2. then a total 10 coin tosses are drawn from the coin with the bias ``\theta``. for ``n = 1,\ldots, 10``
```math
d_n \sim \texttt{Bernoulli}(\theta).
```
"""
# ╔═╡ 55f5190d-d3dc-43e4-8e4f-1f42115fb6b2
Foldable("Exact Bayesian computation with the conjugate prior model.", md"
Since a conjugate prior is used, the posterior computation is very straightforward. Apply Baye's rule, we find the posterior is
```math
p(\theta|\mathcal D) = \texttt{Beta}(\theta; a_N, b_N) = \frac{1}{\text{B}(a_N, b_N)} \theta^{a_N-1}(1-\theta)^{b_N-1},
```
where ``a_N= a_0 + N_h`` and ``b_N = b_0 + N - N_h``; and ``N_h = \sum_i d_i`` is the total number of heads observed in the ``N`` tosses.
Apply Baye's rule, we have
```math
\begin{align}
p(\theta|\mathcal D) &\propto p(\theta) p(\mathcal D|\theta) \\
&= \frac{1}{\text{B}(a_0, b_0)} \theta^{a_0-1}(1-\theta)^{b_0-1} \theta^{\sum_n d_n} (1-\theta)^{N-\sum_n d_n}\\
&= \theta^{a_0+ \sum_{n} d_n -1}(1-\theta)^{b_0+N-\sum_n d_n -1}\\
&= \theta^{a_N -1}(1-\theta)^{b_N -1},
\end{align}
```
where ``a_N= a_0 + N_h`` and ``b_N = b_0 + N - N_h``.
Next we needs to normalise the non-normalised posterior to find the exact posterior distribution. The normalising constant is:
$$\int_{0}^1 \theta^{a_N -1}(1-\theta)^{b_N -1} d\theta$$
We recognise the unnormalised distribution is a Beta distribution with the updated parameters ``a_N= a_0 + N_h`` and ``b_N = b_0 + N - N_h``; the normalising constant must be ``B(a_N, b_N)``. Therefore,
$$p(\theta|\mathcal D) =\text{Beta}(a_N, b_N).$$
To demonstrate the idea, we choose a noninformative flat prior ``a_0=b_0 =1``; the posterior is of a Beta form with updated counts of heads and tails:
$$p(\theta|\mathcal D) = \texttt{Beta}(8, 4).$$ The corresponding prior and update posterior is shown below. The posterior (light orange curve) updates the flat prior (blue curve) by incorporating the likelihood. Since 7 out of the 10 tosses are head, the posterior correctly reflects this observed information.
")
# ╔═╡ b49725d0-8239-4129-b0d6-982f998be91f
md"""
### Inference with `Turing.jl`
"""
# ╔═╡ ecf86444-1565-406c-8aa2-275ee3a44fae
md"""
**Model specification:** The coin-flipping model can be specified by `Turing.jl` as
"""
# ╔═╡ a2b84c25-bf58-411d-893b-48c8384cae04
@model function coin_flipping(data; a₀=1.0, b₀=1.0)
# Our prior belief about the probability of heads in a coin toss.
θ ~ Beta(a₀, b₀)
# each observation in `data` is an independent draw from the coin, which is Bernoulli distributed
for i in eachindex(data)
data[i] ~ Bernoulli(θ)
end
end;
# ╔═╡ f2dd09fa-9204-4f1e-80da-2f1bb185b3a8
md"""
In the above model,
* `a₀, b₀` (of the prior's parameters) are treated as input parameters to allow some flexibility
* `for` loop is used to repeatedly specify the independent tosses; `data` here is assumed to be an array of tossing realisations (of `true` or `false`)
To realise a concrete model for our problem, we only need to create ``\mathcal D`` as the required array and feed it into the Turing model.
"""
# ╔═╡ 77a9ca64-87ad-465c-bec7-fe406145de40
begin
# create the data as observed
coin_flipping_data = [true, true, true, false, true, false, true, true, true, false]
# create the model by feeding the data as observed
cf_model = coin_flipping(coin_flipping_data; a₀=1.0, b₀=1.0)
end;
# ╔═╡ 139a1d54-b018-4de4-9a9f-ec9cf429b3a1
md"""
**Inference:** To infer the model, we run a MCMC algorithm to sample from the posterior
* all random variables specified with `~` (except the observed `data`) will be sampled in the MCMC algorithm
For example, the following command can be used to draw 2000 samples with a HMC sampler.
"""
# ╔═╡ 7d13ddd9-fb5f-4843-b1d5-176d505acd57
begin
Random.seed!(100)
cf_chain = sample(cf_model, HMC(0.05, 20), MCMCThreads(), 2000, 3; discard_initial = 1000)
end;
# ╔═╡ a2278d07-f857-4910-afa8-bf484ef0dc10
md"""
In particular, the HMC sampler has
* a step length of the Leapfrog: ``\epsilon = 0.05``
* and Leapfrog algorithm's step count: ``{T}=20``
"""
# ╔═╡ 400b77d6-dc69-4ea9-ab45-5daeaa6e4bcc
md"""**Use other MCMC samplers:** A more convenient sampling choice is No-U-Turn (NUTS) sampler [^1]:
```julia
sample(cf_model, NUTS(0.65), 2000; discard_initial = 500);
```
NUTS is a user-friendly extension of HMC
* where Hamiltonian dynamics' parameters are automatically tuned
* such that a pre-specified `accept_rate` is achieved (an acceptance rate around 0.65 is recommended)
* user does not need to manually set the HMC's parameters
Similarly, one may also choose Metropolis-Hastings:
```julia
sample(cf_model, MH(), 2000; discard_initial = 500);
```
However, HMC and NUTS are the go-to choices due to their better sampling efficiency. Check chapter 2 for a comparison between the samplers.
"""
# ╔═╡ 9f119bba-fbd2-4c3a-93c2-22567c781e5f
md"""
**Chain diagnostics:** We should always check the quality of the chain before proceeding to report the findings.
In particular, we need to visually inspect the trace plot to spot any potential divergence. The following command shows the chain trace against the MCMC iterations. The chain seems to mix well (check chapter 2 for more details about visual inspection).
"""
# ╔═╡ 4bb24996-24d0-4910-9996-83f0b34a005f
plot(cf_chain)
# ╔═╡ 6f7ecf38-3017-408e-9a8a-6d218bddc2d1
md"""
We can also check chain diagnostic statistics such as
* `rhat` ``\approx 0.99``; as a rule of thumb `rhat` should be smaller than 1.01
* `ess` ``\approx 1920``; (essential sample size) signals efficiency of the sampler
Both statistics show the sampler has mixed well.
"""
# ╔═╡ 4e9ac22f-a895-4d87-b1a9-2cd8a6da83fb
describe(cf_chain)
# ╔═╡ 5cd3314b-dff2-478f-9a1a-73545b26f797
md"""
### Compare with the exact posterior
For this toy example, we know the posterior distribution exactly:
$$p(\theta|\mathcal D) = \texttt{Beta}(8,4).$$
Therefore, we can compare the performance between the exact posterior and the approximating distribution returned by `Turing.jl`. The following plot shows the comparison. It can be observed that Turing has done a very good job at approximating the posterior.
Recall that one can use `density()` to plot the density of an MCMC chain.
"""
# ╔═╡ 5678e483-1292-440d-83de-462cd249c511
let
plot(Beta(8,4), fill=true, alpha=0.5, lw=2, label=L"\texttt{Beta}(8,4)")
density!(cf_chain, xlim=[0,1], legend=:topleft, w=2)
end
# ╔═╡ 7c6e1185-4fe4-4d2b-9b0a-f6268a75b682
md"""
## Seven scientists re-visited
"""
# ╔═╡ acbc4dcc-3352-4de0-a145-d4aa51975036
md"""
Recall the seven-scientist problem which was introduced in chapter 2.
!!! question "Seven scientist problem"
[*The question is adapted from [^1]*] Seven scientists (A, B, C, D, E, F, G) with widely-differing experimental skills measure some signal ``\mu``. You expect some of them to do accurate work (i.e. to have small observation variance ``\sigma^2``, and some of them to turn in wildly inaccurate answers (i.e. to have enormous measurement error). What is the unknown signal ``\mu``?
The seven measurements are:
| Scientists | A | B | C | D | E | F | G |
| -----------| ---|---|---|--- | ---|---|---|
| ``d_n`` | -27.020| 3.570| 8.191| 9.898| 9.603| 9.945| 10.056|
"""
# ╔═╡ f06c5735-6a4a-4a91-8517-ec57a674225a
begin
scientist_data = [-27.020, 3.57, 8.191, 9.898, 9.603, 9.945, 10.056]
μ̄ = mean(scientist_data)
end;
# ╔═╡ 782e9234-c02b-4abb-a72f-110d2fcabc33
let
ylocations = range(0, 2, length=7) .+ 0.5
plt = plot(ylim = [0., 3.0], xminorticks =5, xlabel=L"d", yticks=false, showaxis=:x, size=(620,250))
scientists = 'A':'G'
δ = 0.1
for i in 1:7
plot!([scientist_data[i]], [ylocations[i]], label="", markershape =:circle, markersize=5, markerstrokewidth=1, st=:sticks, c=1)
annotate!([scientist_data[i]].+7*(-1)^i * δ, [ylocations[i]].+ δ, scientists[i], 8)
end
vline!([μ̄], lw=2, ls=:dash, label="sample mean", legend=:topleft)
plt
end
# ╔═╡ a662232b-524b-4918-93e9-204ab908a14d
md"""
### Inference with `Turing.jl`
To reflect the fact that each scientist has different experimental skills, we modify the usual i.i.d. assumption for the data. In chapter 2, we assume each observation is an independent realisation of the signal plus some subject-specific observation noise. That is each scientist makes observations with his/her noise level ``\sigma^2_n`` for ``n = 1,2,\ldots, 7``.
Here we make a small adjustment to the above assumption. Instead of assuming 7 different levels of skill, we assume scientists A and B have a similar level of skill whereas scientists C, D, E, F, and G are another group with better measurement skills. We have reduced the number of observation noise parameters from 7 to 2.
The proposed Bayesian model becomes:
```math
\begin{align}
\text{prior}: \mu &\sim \mathcal N(m_0=0, v_0=10000)\\
\lambda_1 &\sim \texttt{Gamma}(a_0=0.5, b_0=0.5)\\
\lambda_2 &\sim \texttt{Gamma}(a_0=0.5, b_0=0.5)\\
\text{likelihood}: d_1, d_2 &\sim \mathcal N(\mu, 1/\lambda_1) \\
d_n &\sim \mathcal N(\mu, 1/\lambda_2), \;\; \text{for }n = 3, \ldots, 7.
\end{align}
```
Note that the first observations now share one observation precision while the rest 5 share another. The model can be translated to `Turing.jl` easily.
"""
# ╔═╡ 4043d410-e338-45cc-a528-5afc5a23aea1
begin
@model function seven_scientist(data; m₀=0, v₀=10_000, a₀ = .5, b₀ =.5)
# prior for μ
μ ~ Normal(m₀, sqrt(v₀))
# prior for the two precisions
λ1 ~ Gamma(a₀, 1/b₀)
λ2 ~ Gamma(a₀, 1/b₀)
σ1 = sqrt(1/λ1)
σ2 = sqrt(1/λ2)
# likelihood for scientists A and B
data[1] ~ Normal(μ, σ1)
data[2] ~ Normal(μ, σ1)
# likelihood for the other scientists
for i in 3:length(data)
data[i] ~ Normal(μ, σ2)
end
end
end
# ╔═╡ 0572a686-2a09-4c9e-9f33-51a390d5e66f
md"""
After specifying the model, the inference is very straightforward. The model is initialised with the observed data; the we infer the model with a `NUTS()` sampler.
"""
# ╔═╡ 0096e848-46b0-4c85-8526-0302b03c4682
seven_scientist_model = seven_scientist(scientist_data)
# ╔═╡ 04a162c0-3c7c-4056-8f2d-48d83df3d6e7
seven_sci_chain = let
Random.seed!(100)
sample(seven_scientist_model,
NUTS(),
MCMCThreads(),
2000, #2000 iterations per chain
3; #three chains in parallel
discard_initial = 1000,
thinning=4)
end;
# ╔═╡ cea1df62-2e5a-4cac-a30b-3506ecb9b879
md"""
We can summarise the posterior by using `describe(.)` and `plot(.)`. By checking `rhat` and `ess` and also visual inspection, the chain seems to have mixed well.
The MCMC's summary statistics also tell us a 95% credible range for ``\mu`` is between 8.59 and 10.45.
"""
# ╔═╡ 2af9eda0-a5f6-4b3c-b973-ea3c2ca03290
describe(seven_sci_chain)
# ╔═╡ a0671aeb-32dc-42f1-919f-a32f1c365a9a
plot(seven_sci_chain)
# ╔═╡ c266890f-da07-44be-b98f-cc129463ca6c
density(seven_sci_chain[:μ], fill=(0, 0.1), label="", xlabel=L"\mu", ylabel="density", title="Posterior "*L"p(\mu|\mathcal{D})")
# ╔═╡ 5bd72181-aa58-43b6-837e-47414c7152a1
md"""
## Predictive check with `Turing.jl`
"""
# ╔═╡ 7f16ec4f-ea2f-4990-9a6b-e473b997d786
md"""
Predictive checks are effective tools to evaluate whether the assumptions we make in the modelling stage are a good fit with the real data-generation process. The idea behind the checks is to generate simulated data using samples based on the prior or posterior distribution and compare them to the observed data. And the two checks are called **prior predictive check** and **posterior predictive check**.
The data simulation process is listed below:
!!! information ""
Repeat the following many times:
1. First draw one sample from the posterior (or the prior for prior predictive check):
$$\tilde \theta \sim p(\theta|\mathcal D)$$
2. Second, conditioning on ``\tilde \theta``, simulates the pseudo observations:
$$\tilde{\mathcal D}\sim p(\mathcal D|\tilde{\theta})$$
In chapter 2, we have done the simulation manually. For more complicated models, the process soon becomes tedious and overly-complicated. Fortunately, `Turing.jl` provides an easy-to-use tool called `predict()` to semi-automate the process.
Recall that when `Turing.jl` carries out the sampling procedure, all random variables (specified on the left-hand side of `~`) will be sampled except those being passed as concrete observations, such as `data` in the coin flipping example. To trigger the program to sample the observation, one can pass an array of `missing` type instead of the observed data. `missing` is a special data type in Julia to indicate the value of a variable is missing. For `Turing`, all `missing` type of variables which are on the left side of `~` expression will be sampled.
To be more specific, to simulate data ``\mathcal D_{pred}`` based on a predictive distribution, one should carry out the following steps:
1. create a data array with ``N`` `missing` elements, e.g. `D_missing = Vector{Union{Missing, Bool}}(undef, N)`
2. initialise a `Turing` model with the missing data as an augment, e.g. `missing_model = turing_model(D_missing)`
3. use `predict(missing_model, chain)` to simulate predictive data, where `chain` should be MCMC samples from the posterior or prior depending on the check
"""
# ╔═╡ fb668940-ebf8-44d0-bb57-93fedfcb9892
md"""
"""
# ╔═╡ 15a73e90-c4b5-4cdf-bd9d-1ec4e02e0f2e
md"""
### Example: predictive checks of coin-flipping model
"""
# ╔═╡ dd24a62d-dc0b-4ea1-a9dd-a65527d08776
md"""
**Posterior predictive check.** The following block of code shows how to simulate future data based on the posterior distribution of the coin-flipping example.
"""
# ╔═╡ 35ddd253-e28c-4f30-a2a1-ef81a61a740a
begin
# initialise an array of missing types
# Union{Missing, Bool} is union type of Missing and Boolean
# D_missing_coin_flipping is an array of 10 missing elements
D_missing_coin_flipping = Vector{Union{Missing, Bool}}(undef, 10)
cf_pred_model = coin_flipping(D_missing_coin_flipping)
# D_pred as an MCMCChain object.
post_pred_chain = predict(cf_pred_model, cf_chain)
end
# ╔═╡ 5b01a0b7-affd-465d-b9f1-422d76ce6dca
md"""Remember `cf_chain` is the posterior MCMC chain sampled earlier. The prediction method will return another `MCMCChain` object of ``\mathcal D_{pred}``. For this case, it should contain 6000 simulated ``\mathcal D_{pred}`` (and each sample contains 10 tosses), since `cf_chain` were simulated with 3 parallel chains and each chain with 2000 iterations.
To summarise the simulated data, we sum each ``\mathcal D^{(r)}_{pred}`` to find the simulated ``\tilde{N}_h^{(r)}`` and use histogram to do visual check.
"""
# ╔═╡ 846d2693-b644-4cd7-af2a-5ce6b843cb7d
let
histogram(sum(Array(post_pred_chain), dims=2)[:], normed=true, xticks = 0:10, label="Posterior predictive on "*L"N_h", legend=:outerbottom, xlabel="number of heads "*L"N_h", title="Posterior predictive check with "*L"a_0=b_0=1")
vline!([7], lw=4, lc=2, label="Observed "*L"N_h")
end
# ╔═╡ e896351f-ae21-48ae-a0a2-e53e9b54cd9a
Foldable("Julia code explanation.", md"`sum(Array(post_pred_chain), dims=2)[:]`
* `Array(post_pred_chain)` casts an `MCMCChain` object to `Array` object so it can be processed later
* `sum(., dims=2)` sums the first argument (a matrix here) by rows and return a row array
* `[:]` reduces a row array to a column vector
")
# ╔═╡ 11989831-8179-4978-adfa-480f9a962f5f
md"""
**Prior predictive check.** To carry out prior predictive check, one only needs to replace the posterior chain with a prior chain. To sample from the prior distribution, one can use command like `sample(model, Prior(), 5000)`. The rest is the same as posterior predictive check.
"""
# ╔═╡ 8c1e3f71-bae6-41be-a496-24dceaebc672
begin
Random.seed!(100)
# sample from the prior distribution
coinflip_prior_chain = sample(cf_model, Prior(), 5000)
# simulate data based on the prior chain
prior_pred_chain = predict(cf_pred_model, coinflip_prior_chain)
# lastly, summarise and visualise the data
histogram(sum(Array(prior_pred_chain), dims=2)[:], bins = 20, xticks=0:10, normed=true, label="Prior predictive on Nₕ", legend=:outerbottom, xlabel="number of heads "*L"N_h", title="Prior predictive check with "*L"a_0=b_0=1")
vline!([7], lw=4, lc=2, label="Observed Nₕ")
end
# ╔═╡ b21c5a1f-9ba4-40f3-b58a-d9f635d36dbf
md"""
### Exercise: seven scientists
Carry out posterior predictive check on the seven-scientist problem by using `Turing.jl`. Replicate the KDE check in chapter 2 (as shown below).
"""
# ╔═╡ 15b3cfae-dc5a-4f5a-ad3a-e3f152c88e7a
md"""
!!! hint "Solution"
Simulate posterior predictive data
```julia
D_missing_scientist = Vector{Union{Missing, Float64}}(undef, 7)
seven_sci_pred_model = seven_scientist(D_missing_scientist)
D_pred_seven_sci = predict(seven_sci_pred_model, seven_sci_chain)
```
Plot KDEs of the observed and simulated.
```julia
D_pred = Array(D_pred_seven_sci)
R = 30
plt = density(scientist_data, label="Observed", lw=2, xlim=[-32,30], ylim=[0, 0.25], xlabel=L"d", ylabel="density", title="Posterior predictive check on the correct model")
for i in 1: R
density!(D_pred[i, :], label="", lw=0.4)
end
plt
```
"""
# ╔═╡ 23b3faad-daf0-4573-b8e5-175748ccbea8
md"[**↩ Home**](https://lf28.github.io/BayesianModelling/)
[**↪ Next Chapter**](./section5_regressions.html)
"
# ╔═╡ 9ac490e1-2b15-40f1-a07a-543ce9dd95be
md"""
## Appendix
"""
# ╔═╡ 5202ef8d-dcd5-4882-a18e-b1d2d4769387
begin
D_missing_scientist = Vector{Union{Missing, Float64}}(undef, 7)
seven_sci_pred_model = seven_scientist(D_missing_scientist)
D_pred_seven_sci = predict(seven_sci_pred_model, seven_sci_chain)
end;
# ╔═╡ 645b1343-6717-4920-a319-d9da7e14b29f
begin
D_pred_seven = Array(D_pred_seven_sci)
plt_seven = density(scientist_data, label="Observed", lw=2, xlim=[-32,30], ylim=[0, 0.25], xlabel=L"d", ylabel="density", title="Posterior predictive check on the seven-scientist")
for i in 1: 30
density!(D_pred_seven[i, :], label="", lw=0.4)
end
end;
# ╔═╡ dda506eb-db21-436b-ad9f-3b95450347c7
let
plt_seven
end
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
HypertextLiteral = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
PlutoTeachingTools = "661c6b06-c737-4d37-b85c-46df65de6f69"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd"
Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0"
[compat]
HypertextLiteral = "~0.9.4"
LaTeXStrings = "~1.3.0"
Plots = "~1.40.8"
PlutoTeachingTools = "~0.2.15"
PlutoUI = "~0.7.39"
StatsPlots = "~0.15.1"
Turing = "~0.34.1"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.11.0"
manifest_format = "2.0"
project_hash = "0a618d67fba6f64a468ed0f6a7683e82bc8ac13b"
[[deps.ADTypes]]
git-tree-sha1 = "eea5d80188827b35333801ef97a40c2ed653b081"
uuid = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
version = "1.9.0"
[deps.ADTypes.extensions]
ADTypesChainRulesCoreExt = "ChainRulesCore"
ADTypesEnzymeCoreExt = "EnzymeCore"
[deps.ADTypes.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "d92ad398961a3ed262d8bf04a1a2b8340f915fef"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.5.0"
weakdeps = ["ChainRulesCore", "Test"]
[deps.AbstractFFTs.extensions]
AbstractFFTsChainRulesCoreExt = "ChainRulesCore"
AbstractFFTsTestExt = "Test"
[[deps.AbstractMCMC]]
deps = ["BangBang", "ConsoleProgressMonitor", "Distributed", "FillArrays", "LogDensityProblems", "Logging", "LoggingExtras", "ProgressLogging", "Random", "StatsBase", "TerminalLoggers", "Transducers"]
git-tree-sha1 = "c732dd9f356d26cc48d3b484f3fd9886c0ba8ba3"
uuid = "80f14c24-f653-4e6a-9b94-39d6b0f70001"
version = "5.5.0"
[[deps.AbstractPPL]]
deps = ["AbstractMCMC", "Accessors", "DensityInterface", "Random"]
git-tree-sha1 = "6380a9a03a4207bac53ac310dd3a283bb4df54ef"
uuid = "7a57a42e-76ec-4ea3-a279-07e840d6d9cf"
version = "0.8.4"
[[deps.AbstractPlutoDingetjes]]
deps = ["Pkg"]
git-tree-sha1 = "6e1d2a35f2f90a4bc7c2ed98079b2ba09c35b83a"
uuid = "6e696c72-6542-2067-7265-42206c756150"
version = "1.3.2"
[[deps.AbstractTrees]]
git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.4.5"
[[deps.Accessors]]
deps = ["CompositionsBase", "ConstructionBase", "InverseFunctions", "LinearAlgebra", "MacroTools", "Markdown"]
git-tree-sha1 = "b392ede862e506d451fc1616e79aa6f4c673dab8"
uuid = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
version = "0.1.38"
[deps.Accessors.extensions]
AccessorsAxisKeysExt = "AxisKeys"
AccessorsDatesExt = "Dates"
AccessorsIntervalSetsExt = "IntervalSets"
AccessorsStaticArraysExt = "StaticArrays"
AccessorsStructArraysExt = "StructArrays"
AccessorsTestExt = "Test"
AccessorsUnitfulExt = "Unitful"
[deps.Accessors.weakdeps]
AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "6a55b747d1812e699320963ffde36f1ebdda4099"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "4.0.4"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.AdvancedHMC]]
deps = ["AbstractMCMC", "ArgCheck", "DocStringExtensions", "InplaceOps", "LinearAlgebra", "LogDensityProblems", "LogDensityProblemsAD", "ProgressMeter", "Random", "Requires", "Setfield", "SimpleUnPack", "Statistics", "StatsBase", "StatsFuns"]
git-tree-sha1 = "1da0961a400c28d1e5f057e922ff75ec5d6a5747"
uuid = "0bf59076-c3b1-5ca4-86bd-e02cd72cde3d"
version = "0.6.2"
[deps.AdvancedHMC.extensions]
AdvancedHMCCUDAExt = "CUDA"
AdvancedHMCMCMCChainsExt = "MCMCChains"
AdvancedHMCOrdinaryDiffEqExt = "OrdinaryDiffEq"
[deps.AdvancedHMC.weakdeps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
[[deps.AdvancedMH]]
deps = ["AbstractMCMC", "Distributions", "FillArrays", "LinearAlgebra", "LogDensityProblems", "Random", "Requires"]
git-tree-sha1 = "66ac4c7b320d2434f04d48116db02e73e6dabc8b"
uuid = "5b7e9947-ddc0-4b3f-9b55-0d8042f74170"
version = "0.8.3"
weakdeps = ["DiffResults", "ForwardDiff", "MCMCChains", "StructArrays"]
[deps.AdvancedMH.extensions]
AdvancedMHForwardDiffExt = ["DiffResults", "ForwardDiff"]
AdvancedMHMCMCChainsExt = "MCMCChains"
AdvancedMHStructArraysExt = "StructArrays"
[[deps.AdvancedPS]]
deps = ["AbstractMCMC", "Distributions", "Random", "Random123", "Requires", "SSMProblems", "StatsFuns"]
git-tree-sha1 = "5dcd3de7e7346f48739256e71a86d0f96690b8c8"
uuid = "576499cb-2369-40b2-a588-c64705576edc"
version = "0.6.0"
weakdeps = ["Libtask"]
[deps.AdvancedPS.extensions]
AdvancedPSLibtaskExt = "Libtask"
[[deps.AdvancedVI]]
deps = ["ADTypes", "Bijectors", "DiffResults", "Distributions", "DistributionsAD", "DocStringExtensions", "ForwardDiff", "LinearAlgebra", "ProgressMeter", "Random", "Requires", "StatsBase", "StatsFuns", "Tracker"]
git-tree-sha1 = "c217a9b531b4b752eb120a9f820527126ba68fb9"
uuid = "b5ca4192-6429-45e5-a2d9-87aec30a685c"
version = "0.2.8"
[deps.AdvancedVI.extensions]
AdvancedVIEnzymeExt = ["Enzyme"]
AdvancedVIFluxExt = ["Flux"]
AdvancedVIReverseDiffExt = ["ReverseDiff"]
AdvancedVIZygoteExt = ["Zygote"]
[deps.AdvancedVI.weakdeps]
Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
[[deps.AliasTables]]
deps = ["PtrArrays", "Random"]
git-tree-sha1 = "9876e1e164b144ca45e9e3198d0b689cadfed9ff"
uuid = "66dad0bd-aa9a-41b7-9441-69ab47430ed8"
version = "1.1.3"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.2"
[[deps.ArnoldiMethod]]
deps = ["LinearAlgebra", "Random", "StaticArrays"]
git-tree-sha1 = "d57bd3762d308bded22c3b82d033bff85f6195c6"
uuid = "ec485272-7323-5ecc-a04f-4719b315124d"
version = "0.4.0"
[[deps.Arpack]]
deps = ["Arpack_jll", "Libdl", "LinearAlgebra", "Logging"]
git-tree-sha1 = "9b9b347613394885fd1c8c7729bfc60528faa436"
uuid = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97"
version = "0.5.4"
[[deps.Arpack_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "OpenBLAS_jll", "Pkg"]
git-tree-sha1 = "5ba6c757e8feccf03a1554dfaf3e26b3cfc7fd5e"
uuid = "68821587-b530-5797-8361-c406ea357684"
version = "3.5.1+1"
[[deps.ArrayInterface]]
deps = ["Adapt", "LinearAlgebra"]
git-tree-sha1 = "3640d077b6dafd64ceb8fd5c1ec76f7ca53bcf76"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "7.16.0"
[deps.ArrayInterface.extensions]
ArrayInterfaceBandedMatricesExt = "BandedMatrices"
ArrayInterfaceBlockBandedMatricesExt = "BlockBandedMatrices"
ArrayInterfaceCUDAExt = "CUDA"
ArrayInterfaceCUDSSExt = "CUDSS"
ArrayInterfaceChainRulesExt = "ChainRules"
ArrayInterfaceGPUArraysCoreExt = "GPUArraysCore"
ArrayInterfaceReverseDiffExt = "ReverseDiff"
ArrayInterfaceSparseArraysExt = "SparseArrays"
ArrayInterfaceStaticArraysCoreExt = "StaticArraysCore"
ArrayInterfaceTrackerExt = "Tracker"
[deps.ArrayInterface.weakdeps]
BandedMatrices = "aae01518-5342-5314-be14-df237901396f"
BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CUDSS = "45b445bb-4962-46a0-9369-b4df9d0f772e"
ChainRules = "082447d4-558c-5d27-93f4-14fc19e9eca2"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
version = "1.11.0"
[[deps.Atomix]]
deps = ["UnsafeAtomics"]
git-tree-sha1 = "c06a868224ecba914baa6942988e2f2aade419be"
uuid = "a9b6321e-bd34-4604-b9c9-b65b8de01458"
version = "0.1.0"
[[deps.AxisAlgorithms]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "WoodburyMatrices"]
git-tree-sha1 = "01b8ccb13d68535d73d2b0c23e39bd23155fb712"
uuid = "13072b0f-2c55-5437-9ae7-d433b7a33950"
version = "1.1.0"
[[deps.AxisArrays]]
deps = ["Dates", "IntervalSets", "IterTools", "RangeArrays"]
git-tree-sha1 = "16351be62963a67ac4083f748fdb3cca58bfd52f"
uuid = "39de3d68-74b9-583c-8d2d-e117c070f3a9"