diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml new file mode 100644 index 000000000..5e865d93a --- /dev/null +++ b/.JuliaFormatter.toml @@ -0,0 +1,21 @@ +style="blue" +format_markdown = true +import_to_using = false +# TODO +# We ignore these files because when formatting was first put in place they were being worked on. +# These ignores should be removed once the relevant PRs are merged/closed. +ignore = [ + # https://github.com/TuringLang/Turing.jl/pull/2231/files + "src/experimental/gibbs.jl", + "src/mcmc/abstractmcmc.jl", + "test/experimental/gibbs.jl", + "test/test_utils/numerical_tests.jl", + # https://github.com/TuringLang/Turing.jl/pull/2218/files + "src/mcmc/Inference.jl", + "test/mcmc/Inference.jl", + # https://github.com/TuringLang/Turing.jl/pull/1887 # Enzyme PR + "test/mcmc/Inference.jl", + "test/mcmc/hmc.jl", + "test/mcmc/sghmc.jl", + "test/runtests.jl", +] diff --git a/.github/workflows/DynamicHMC.yml b/.github/workflows/DynamicHMC.yml deleted file mode 100644 index d66c6988b..000000000 --- a/.github/workflows/DynamicHMC.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: DynamicHMC-CI - -on: - push: - branches: - - master - pull_request: - -jobs: - test: - runs-on: ${{ matrix.os }} - strategy: - matrix: - version: - - '1' - os: - - ubuntu-latest - arch: - - x64 - steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - uses: actions/cache@v1 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - ${{ runner.os }}- - - uses: julia-actions/julia-buildpkg@latest - - uses: julia-actions/julia-runtest@latest - with: - coverage: false - env: - STAGE: dynamichmc diff --git a/.github/workflows/Format.yml b/.github/workflows/Format.yml new file mode 100644 index 000000000..abeefa989 --- /dev/null +++ b/.github/workflows/Format.yml @@ -0,0 +1,38 @@ +name: Format + +on: + push: + branches: + - master + pull_request: + branches: + - master + merge_group: + types: [checks_requested] + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + format: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@latest + with: + version: 1 + - name: Format code + run: | + using Pkg + Pkg.add(; name="JuliaFormatter", uuid="98e50ef6-434e-11e9-1051-2b60c6c9e899") + using JuliaFormatter + format("."; verbose=true) + shell: julia --color=yes {0} + - uses: reviewdog/action-suggester@v1 + if: github.event_name == 'pull_request' + with: + tool_name: JuliaFormatter + fail_on_error: true diff --git a/.github/workflows/Numerical.yml b/.github/workflows/Numerical.yml deleted file mode 100644 index 977fc86f7..000000000 --- a/.github/workflows/Numerical.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Numerical - -on: - push: - branches: - - master - pull_request: - -jobs: - test: - runs-on: ${{ matrix.os }} - strategy: - matrix: - version: - - '1' - os: - - ubuntu-latest - arch: - - x64 - steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - uses: actions/cache@v1 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - - uses: julia-actions/julia-buildpkg@latest - - uses: julia-actions/julia-runtest@latest - with: - coverage: false - env: - STAGE: numerical diff --git a/.github/workflows/Tests.yml b/.github/workflows/Tests.yml new file mode 100644 index 000000000..1d4a10567 --- /dev/null +++ b/.github/workflows/Tests.yml @@ -0,0 +1,87 @@ +name: Tests + +on: + push: + branches: + - master + pull_request: + +jobs: + test: + runs-on: ${{ matrix.os }} + continue-on-error: ${{ matrix.version == 'nightly' }} + + strategy: + fail-fast: false + matrix: + test-args: + # Run some of the slower test files individually. The last one catches everything + # not included in the others. + - "essential/ad.jl" + - "mcmc/gibbs.jl" + - "mcmc/hmc.jl" + - "mcmc/abstractmcmc.jl" + - "mcmc/Inference.jl" + - "experimental/gibbs.jl" + - "mcmc/ess.jl" + - "--skip essential/ad.jl mcmc/gibbs.jl mcmc/hmc.jl mcmc/abstractmcmc.jl mcmc/Inference.jl experimental/gibbs.jl mcmc/ess.jl" + version: + #- '1.7' TODO(mhauru): Temporarily disabled for Enzyme + - '1' + os: + - ubuntu-latest + #- windows-latest TODO(mhauru): Temporarily disabled for Enzyme + #- macOS-latest TODO(mhauru): Temporarily disabled for Enzyme + arch: + - x64 + #- x86 TODO(mhauru): Temporarily disabled for Enzyme + num_threads: + - 1 + - 2 + exclude: + # With Windows and macOS, only run Julia 1.7, x64, 2 threads. We just want to see + # some combination work on OSes other than Ubuntu. + - os: windows-latest + version: '1' + - os: macOS-latest + version: '1' + - os: windows-latest + arch: x86 + - os: macOS-latest + arch: x86 + - os: windows-latest + num_threads: 1 + - os: macOS-latest + num_threads: 1 + # It's sufficient to test x86 with one version of Julia and one thread. + - version: '1' + arch: x86 + - num_threads: 2 + arch: x86 + + steps: + - name: Print matrix variables + run: | + echo "OS: ${{ matrix.os }}" + echo "Architecture: ${{ matrix.arch }}" + echo "Julia version: ${{ matrix.version }}" + echo "Number of threads: ${{ matrix.num_threads }}" + echo "Test arguments: ${{ matrix.test-args }}" + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 + with: + version: '${{ matrix.version }}' + arch: ${{ matrix.arch }} + - uses: actions/cache@v4 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + restore-keys: | + ${{ runner.os }}-test-${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- + - uses: julia-actions/julia-buildpkg@latest + - name: Call Pkg.test + run: julia --color=yes --depwarn=yes --check-bounds=yes --threads=${{ matrix.num_threads }} --project=@. -e 'import Pkg; Pkg.test(; test_args=ARGS)' -- ${{ matrix.test-args }} diff --git a/.github/workflows/TuringCI.yml b/.github/workflows/TuringCI.yml deleted file mode 100644 index cc8648a7a..000000000 --- a/.github/workflows/TuringCI.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: Turing-CI - -on: - push: - branches: - - master - pull_request: - -jobs: - test: - runs-on: ${{ matrix.os }} - continue-on-error: ${{ matrix.version == 'nightly' }} - strategy: - matrix: - version: - - '1' - os: - - ubuntu-latest - arch: - - x64 - num_threads: - - 1 - - 2 - steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - uses: actions/cache@v1 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - ${{ runner.os }}- - - uses: julia-actions/julia-buildpkg@latest - - uses: julia-actions/julia-runtest@latest - with: - coverage: ${{ matrix.version == '1.6' && matrix.os == 'ubuntu-latest' && matrix.num_threads == 1 }} - env: - JULIA_NUM_THREADS: ${{ matrix.num_threads }} - - uses: julia-actions/julia-processcoverage@v1 - if: matrix.version == '1.7' && matrix.os == 'ubuntu-latest' && matrix.num_threads == 1 - - uses: codecov/codecov-action@v1 - if: matrix.version == '1.7' && matrix.os == 'ubuntu-latest' && matrix.num_threads == 1 - with: - file: lcov.info - - uses: coverallsapp/github-action@master - if: matrix.version == '1.7' && matrix.os == 'ubuntu-latest' && matrix.num_threads == 1 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - path-to-lcov: lcov.info diff --git a/HISTORY.md b/HISTORY.md index 0b3661739..5b1cad0ed 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,91 +1,117 @@ +# Release 0.33.0 + +## Breaking changes + +The following exported functions have been removed: + + - `constrained_space` + - `get_parameter_bounds` + - `optim_objective` + - `optim_function` + - `optim_problem` + +The same functionality is now offered by the new exported functions + + - `maximum_likelihood` + - `maximum_a_posteriori` + # Release 0.30.5 -- `essential/ad.jl` is removed, `ForwardDiff` and `ReverseDiff` integrations via `LogDensityProblemsAD` are moved to `DynamicPPL` and live in corresponding package extensions. -- `LogDensityProblemsAD.ADgradient(ℓ::DynamicPPL.LogDensityFunction)` (i.e. the single argument method) is moved to `Inference` module. It will create `ADgradient` using the `adtype` information stored in `context` field of `ℓ`. -- `getADbackend` function is renamed to `getADType`, the interface is preserved, but packages that previously used `getADbackend` should be updated to use `getADType`. -- `TuringTag` for ForwardDiff is also removed, now `DynamicPPLTag` is defined in `DynamicPPL` package and should serve the same [purpose](https://www.stochasticlifestyle.com/improved-forwarddiff-jl-stacktraces-with-package-tags/). + - `essential/ad.jl` is removed, `ForwardDiff` and `ReverseDiff` integrations via `LogDensityProblemsAD` are moved to `DynamicPPL` and live in corresponding package extensions. + - `LogDensityProblemsAD.ADgradient(ℓ::DynamicPPL.LogDensityFunction)` (i.e. the single argument method) is moved to `Inference` module. It will create `ADgradient` using the `adtype` information stored in `context` field of `ℓ`. + - `getADbackend` function is renamed to `getADType`, the interface is preserved, but packages that previously used `getADbackend` should be updated to use `getADType`. + - `TuringTag` for ForwardDiff is also removed, now `DynamicPPLTag` is defined in `DynamicPPL` package and should serve the same [purpose](https://www.stochasticlifestyle.com/improved-forwarddiff-jl-stacktraces-with-package-tags/). # Release 0.30.0 -- [`ADTypes.jl`](https://github.com/SciML/ADTypes.jl) replaced Turing's global AD backend. Users should now specify the desired `ADType` directly in sampler constructors, e.g., `HMC(0.1, 10; adtype=AutoForwardDiff(; chunksize))`, or `HMC(0.1, 10; adtype=AutoReverseDiff(false))` (`false` indicates not to use compiled tape). -- Interface functions such as `ADBackend`, `setadbackend`, `setadsafe`, `setchunksize`, and `setrdcache` are deprecated and will be removed in a future release. -- Removed the outdated `verifygrad` function. -- Updated to a newer version of `LogDensityProblemsAD` (v1.7). + - [`ADTypes.jl`](https://github.com/SciML/ADTypes.jl) replaced Turing's global AD backend. Users should now specify the desired `ADType` directly in sampler constructors, e.g., `HMC(0.1, 10; adtype=AutoForwardDiff(; chunksize))`, or `HMC(0.1, 10; adtype=AutoReverseDiff(false))` (`false` indicates not to use compiled tape). + - Interface functions such as `ADBackend`, `setadbackend`, `setadsafe`, `setchunksize`, and `setrdcache` are deprecated and will be removed in a future release. + - Removed the outdated `verifygrad` function. + - Updated to a newer version of `LogDensityProblemsAD` (v1.7). # Release 0.12.0 -- The interface for defining new distributions with constrained support and making them compatible with `Turing` has changed. To make a custom distribution type `CustomDistribution` compatible with `Turing`, the user needs to define the method `bijector(d::CustomDistribution)` that returns an instance of type `Bijector` implementing the `Bijectors.Bijector` API. -- `~` is now thread-safe when used for observations, but not assumptions (non-observed model parameters) yet. -- There were some performance improvements in the automatic differentiation (AD) of functions in `DistributionsAD` and `Bijectors`, leading to speeds closer to and sometimes faster than Stan's. -- An `HMC` initialization bug was fixed. `HMC` initialization in Turing is now consistent with Stan's. -- Sampling from the prior is now possible using `sample`. -- `psample` is now deprecated in favour of `sample(model, sampler, parallel_method, n_samples, n_chains)` where `parallel_method` can be either `MCMCThreads()` or `MCMCDistributed()`. `MCMCThreads` will use your available threads to sample each chain (ensure that you have the environment variable `JULIA_NUM_THREADS` set to the number of threads you want to use), and `MCMCDistributed` will dispatch chain sampling to each available process (you can add processes with `addprocs()`). -- Turing now uses `AdvancedMH.jl` v0.5, which mostly provides behind-the-scenes restructuring. -- Custom expressions and macros can be interpolated in the `@model` definition with `$`; it is possible to use `@.` also for assumptions (non-observed model parameters) and observations. -- The macros `@varinfo`, `@logpdf`, and `@sampler` are removed. Instead, one can access the internal variables `_varinfo`, `_model`, `_sampler`, and `_context` in the `@model` definition. -- Additional constructors for `SMC` and `PG` make it easier to choose the resampling method and threshold. + - The interface for defining new distributions with constrained support and making them compatible with `Turing` has changed. To make a custom distribution type `CustomDistribution` compatible with `Turing`, the user needs to define the method `bijector(d::CustomDistribution)` that returns an instance of type `Bijector` implementing the `Bijectors.Bijector` API. + - `~` is now thread-safe when used for observations, but not assumptions (non-observed model parameters) yet. + - There were some performance improvements in the automatic differentiation (AD) of functions in `DistributionsAD` and `Bijectors`, leading to speeds closer to and sometimes faster than Stan's. + - An `HMC` initialization bug was fixed. `HMC` initialization in Turing is now consistent with Stan's. + - Sampling from the prior is now possible using `sample`. + - `psample` is now deprecated in favour of `sample(model, sampler, parallel_method, n_samples, n_chains)` where `parallel_method` can be either `MCMCThreads()` or `MCMCDistributed()`. `MCMCThreads` will use your available threads to sample each chain (ensure that you have the environment variable `JULIA_NUM_THREADS` set to the number of threads you want to use), and `MCMCDistributed` will dispatch chain sampling to each available process (you can add processes with `addprocs()`). + - Turing now uses `AdvancedMH.jl` v0.5, which mostly provides behind-the-scenes restructuring. + - Custom expressions and macros can be interpolated in the `@model` definition with `$`; it is possible to use `@.` also for assumptions (non-observed model parameters) and observations. + - The macros `@varinfo`, `@logpdf`, and `@sampler` are removed. Instead, one can access the internal variables `_varinfo`, `_model`, `_sampler`, and `_context` in the `@model` definition. + - Additional constructors for `SMC` and `PG` make it easier to choose the resampling method and threshold. # Release 0.11.0 -- Removed some extraneous imports and dependencies ([#1182](https://github.com/TuringLang/Turing.jl/pull/1182)) -- Minor backend changes to `sample` and `psample`, which now use functions defined upstream in AbstractMCMC.jl ([#1187](https://github.com/TuringLang/Turing.jl/pull/1187)) -- Fix for an AD-related crash ([#1202](https://github.com/TuringLang/Turing.jl/pull/1202)) -- StatsBase compat update to 0.33 ([#1185](https://github.com/TuringLang/Turing.jl/pull/1185)) -- Bugfix for ReverseDiff caching and memoization ([#1208](https://github.com/TuringLang/Turing.jl/pull/1208)) -- BREAKING: `VecBinomialLogit` is now removed. Also `BernoulliLogit` is added ([#1214](https://github.com/TuringLang/Turing.jl/pull/1214)) -- Bugfix for cases where dynamic models were breaking with HMC methods ([#1217](https://github.com/TuringLang/Turing.jl/pull/1217)) -- Updates to allow AdvancedHMC 0.2.23 ([#1218](https://github.com/TuringLang/Turing.jl/pull/1218)) -- Add more informative error messages for SMC ([#900](https://github.com/TuringLang/Turing.jl/pull/900)) + + - Removed some extraneous imports and dependencies ([#1182](https://github.com/TuringLang/Turing.jl/pull/1182)) + - Minor backend changes to `sample` and `psample`, which now use functions defined upstream in AbstractMCMC.jl ([#1187](https://github.com/TuringLang/Turing.jl/pull/1187)) + - Fix for an AD-related crash ([#1202](https://github.com/TuringLang/Turing.jl/pull/1202)) + - StatsBase compat update to 0.33 ([#1185](https://github.com/TuringLang/Turing.jl/pull/1185)) + - Bugfix for ReverseDiff caching and memoization ([#1208](https://github.com/TuringLang/Turing.jl/pull/1208)) + - BREAKING: `VecBinomialLogit` is now removed. Also `BernoulliLogit` is added ([#1214](https://github.com/TuringLang/Turing.jl/pull/1214)) + - Bugfix for cases where dynamic models were breaking with HMC methods ([#1217](https://github.com/TuringLang/Turing.jl/pull/1217)) + - Updates to allow AdvancedHMC 0.2.23 ([#1218](https://github.com/TuringLang/Turing.jl/pull/1218)) + - Add more informative error messages for SMC ([#900](https://github.com/TuringLang/Turing.jl/pull/900)) # Release 0.10.1 -- Fix bug where arrays with mixed integers, floats, and missing values were not being passed to the `MCMCChains.Chains` constructor properly [#1180](https://github.com/TuringLang/Turing.jl/pull/1180). + + - Fix bug where arrays with mixed integers, floats, and missing values were not being passed to the `MCMCChains.Chains` constructor properly [#1180](https://github.com/TuringLang/Turing.jl/pull/1180). # Release 0.10.0 -- Update elliptical slice sampling to use [EllipticalSliceSampling.jl](https://github.com/TuringLang/EllipticalSliceSampling.jl) on the backend. [#1145](https://github.com/TuringLang/Turing.jl/pull/1145). Nothing should change from a front-end perspective -- you can still call `sample(model, ESS(), 1000)`. -- Added default progress loggers in [#1149](https://github.com/TuringLang/Turing.jl/pull/1149). -- The symbols used to define the AD backend have changed to be the lowercase form of the package name used for AD. `forward_diff` is now `forwarddiff`, `reverse_diff` is now `tracker`, and `zygote` and `reversediff` are newly supported (see below). `forward_diff` and `reverse_diff` are deprecated and are slated to be removed. -- Turing now has experimental support for Zygote.jl ([#783](https://github.com/TuringLang/Turing.jl/pull/783)) and ReverseDiff.jl ([#1170](https://github.com/TuringLang/Turing.jl/pull/1170)) AD backends. Both backends are experimental, so please report any bugs you find. Zygote does not allow mutation within your model, so please be aware of this issue. You can enable Zygote with `Turing.setadbackend(:zygote)` and you can enable ReverseDiff with `Turing.setadbackend(:reversediff)`, though to use either you must import the package with `using Zygote` or `using ReverseDiff`. `for` loops are not recommended for ReverseDiff or Zygote -- see [performance tips](https://turinglang.org/dev/docs/using-turing/performancetips#special-care-for-codetrackercode-and-codezygotecode) for more information. -- Fix MH indexing bug [#1135](https://github.com/TuringLang/Turing.jl/pull/1135). -- Fix MH array sampling [#1167](https://github.com/TuringLang/Turing.jl/pull/1167). -- Fix bug in VI where the bijectors where being inverted incorrectly [#1168](https://github.com/TuringLang/Turing.jl/pull/1168). -- The Gibbs sampler handles state better by passing `Transition` structs to the local samplers ([#1169](https://github.com/TuringLang/Turing.jl/pull/1169) and [#1166](https://github.com/TuringLang/Turing.jl/pull/1166)). + + - Update elliptical slice sampling to use [EllipticalSliceSampling.jl](https://github.com/TuringLang/EllipticalSliceSampling.jl) on the backend. [#1145](https://github.com/TuringLang/Turing.jl/pull/1145). Nothing should change from a front-end perspective -- you can still call `sample(model, ESS(), 1000)`. + - Added default progress loggers in [#1149](https://github.com/TuringLang/Turing.jl/pull/1149). + - The symbols used to define the AD backend have changed to be the lowercase form of the package name used for AD. `forward_diff` is now `forwarddiff`, `reverse_diff` is now `tracker`, and `zygote` and `reversediff` are newly supported (see below). `forward_diff` and `reverse_diff` are deprecated and are slated to be removed. + - Turing now has experimental support for Zygote.jl ([#783](https://github.com/TuringLang/Turing.jl/pull/783)) and ReverseDiff.jl ([#1170](https://github.com/TuringLang/Turing.jl/pull/1170)) AD backends. Both backends are experimental, so please report any bugs you find. Zygote does not allow mutation within your model, so please be aware of this issue. You can enable Zygote with `Turing.setadbackend(:zygote)` and you can enable ReverseDiff with `Turing.setadbackend(:reversediff)`, though to use either you must import the package with `using Zygote` or `using ReverseDiff`. `for` loops are not recommended for ReverseDiff or Zygote -- see [performance tips](https://turinglang.org/dev/docs/using-turing/performancetips#special-care-for-codetrackercode-and-codezygotecode) for more information. + - Fix MH indexing bug [#1135](https://github.com/TuringLang/Turing.jl/pull/1135). + - Fix MH array sampling [#1167](https://github.com/TuringLang/Turing.jl/pull/1167). + - Fix bug in VI where the bijectors where being inverted incorrectly [#1168](https://github.com/TuringLang/Turing.jl/pull/1168). + - The Gibbs sampler handles state better by passing `Transition` structs to the local samplers ([#1169](https://github.com/TuringLang/Turing.jl/pull/1169) and [#1166](https://github.com/TuringLang/Turing.jl/pull/1166)). # Release 0.4.0-alpha -- Fix compatibility with Julia 0.6 [#341, #330, #293] -- Support of Stan interface [#343, #326] -- Fix Binomial distribution for gradients. [#311] -- Stochastic gradient Hamiltonian Monte Carlo [#201]; Stochastic gradient Langevin dynamics [#27] -- More particle MCMC family samplers: PIMH & PMMH [#364, #369] -- Disable adaptive resampling for CSMC [#357] -- Fix resampler for SMC [#338] -- Interactive particle MCMC [#334] -- Add type alias CSMC for PG [#333] -- Fix progress meter [#317] + + - Fix compatibility with Julia 0.6 [#341, #330, #293] + - Support of Stan interface [#343, #326] + - Fix Binomial distribution for gradients. [#311] + - Stochastic gradient Hamiltonian Monte Carlo [#201]; Stochastic gradient Langevin dynamics [#27] + - More particle MCMC family samplers: PIMH & PMMH [#364, #369] + - Disable adaptive resampling for CSMC [#357] + - Fix resampler for SMC [#338] + - Interactive particle MCMC [#334] + - Add type alias CSMC for PG [#333] + - Fix progress meter [#317] # Release 0.3 -- NUTS implementation #188 -- HMC: Transforms of ϵ for each variable #67 (replace with introducing mass matrix) -- Finish: Sampler (internal) interface design #107 -- Substantially improve performance of HMC and Gibbs #7 - - Vectorising likelihood computations #117 #255 - - Remove obsolete `randoc`, `randc`? #156 -- Support truncated distribution. #87 -- Refactoring code: Unify VarInfo, Trace, TaskLocalStorage #96 -- Refactoring code: Better gradient interface #97 + + - NUTS implementation #188 + - HMC: Transforms of ϵ for each variable #67 (replace with introducing mass matrix) + - Finish: Sampler (internal) interface design #107 + - Substantially improve performance of HMC and Gibbs #7 + - Vectorising likelihood computations #117 #255 + - Remove obsolete `randoc`, `randc`? #156 + - Support truncated distribution. #87 + - Refactoring code: Unify VarInfo, Trace, TaskLocalStorage #96 + - Refactoring code: Better gradient interface #97 # Release 0.2 -- Gibbs sampler ([#73]) -- HMC for constrained variables ([#66]; no support for varying dimensions) -- Added support for `Mamba.Chain` ([#90]): describe, plot etc. -- New interface design ([#55]), ([#104]) -- Bugfixes and general improvements (e.g. `VarInfo` [#96]) + + - Gibbs sampler ([#73]) + - HMC for constrained variables ([#66]; no support for varying dimensions) + - Added support for `Mamba.Chain` ([#90]): describe, plot etc. + - New interface design ([#55]), ([#104]) + - Bugfixes and general improvements (e.g. `VarInfo` [#96]) # Release 0.1.0 -- Initial support for Hamiltonian Monte Carlo (no support for discrete/constrained variables) -- Require Julia 0.5 -- Bugfixes and general improvements + + - Initial support for Hamiltonian Monte Carlo (no support for discrete/constrained variables) + - Require Julia 0.5 + - Bugfixes and general improvements # Release 0.0.1-0.0.4 -The initial releases of Turing. -- Particle MCMC, SMC, IS -- Implemented [copying for Julia Task](https://github.com/JuliaLang/julia/pull/15078) -- Implemented copy-on-write data structure `TArray` for Tasks + +The initial releases of Turing. + + - Particle MCMC, SMC, IS + - Implemented [copying for Julia Task](https://github.com/JuliaLang/julia/pull/15078) + - Implemented copy-on-write data structure `TArray` for Tasks diff --git a/Project.toml b/Project.toml index e764f4f6c..3ee30eb95 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "Turing" uuid = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" -version = "0.32.1" +version = "0.33" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" @@ -12,6 +12,7 @@ AdvancedPS = "576499cb-2369-40b2-a588-c64705576edc" AdvancedVI = "b5ca4192-6429-45e5-a2d9-87aec30a685c" BangBang = "198e06fe-97b7-11e9-32a5-e1d131e6ad66" Bijectors = "76274a88-744f-5084-9051-94815aaf08c4" +Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" DistributionsAD = "ced4e74d-a319-5a8a-b0ac-84af2272839c" @@ -25,6 +26,8 @@ LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c" LogDensityProblemsAD = "996a588d-648d-4e1f-a8f0-a84b347e47b1" MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d" NamedArrays = "86f7a689-2022-50b4-a561-43c23ac3c673" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -61,18 +64,24 @@ DistributionsAD = "0.6" DocStringExtensions = "0.8, 0.9" DynamicHMC = "3.4" DynamicPPL = "0.27.1" +Compat = "4.15.0" EllipticalSliceSampling = "0.5, 1, 2" ForwardDiff = "0.10.3" Libtask = "0.7, 0.8" +LinearAlgebra = "1" LogDensityProblems = "2" LogDensityProblemsAD = "1.7.0" MCMCChains = "5, 6" NamedArrays = "0.9, 0.10" +Optimization = "3" +OptimizationOptimJL = "0.1, 0.2, 0.3" OrderedCollections = "1" +Printf = "1" +Random = "1" Optim = "1" Reexport = "0.2, 1" Requires = "0.5, 1.0" -SciMLBase = "1.37.1, 2" +SciMLBase = "1.92.1, 2" SpecialFunctions = "0.7.2, 0.8, 0.9, 0.10, 1, 2" Statistics = "1.6" StatsAPI = "1.6" diff --git a/README.md b/README.md index fa95b3378..4ea12d52e 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,7 @@ [![Build Status](https://github.com/TuringLang/Turing.jl/workflows/Turing-CI/badge.svg)](https://github.com/TuringLang/Turing.jl/actions?query=workflow%3ATuring-CI+branch%3Amaster) [![Coverage Status](https://coveralls.io/repos/github/TuringLang/Turing.jl/badge.svg?branch=master)](https://coveralls.io/github/TuringLang/Turing.jl?branch=master) [![codecov](https://codecov.io/gh/TuringLang/Turing.jl/branch/master/graph/badge.svg?token=OiUBsnDQqf)](https://codecov.io/gh/TuringLang/Turing.jl) -[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac) - +[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor%27s%20Guide-blueviolet)](https://github.com/SciML/ColPrac) ## Getting Started @@ -12,7 +11,6 @@ Turing's home page, with links to everything you'll need to use Turing, is avail https://turinglang.org/docs/ - ## What's changed recently? See [releases](https://github.com/TuringLang/Turing.jl/releases). @@ -25,6 +23,5 @@ You can see the complete list on Github: https://github.com/TuringLang/Turing.jl Turing is an open source project so if you feel you have some relevant skills and are interested in contributing, please get in touch. See the [Contributing](https://turinglang.org/dev/docs/contributing/guide) page for details on the process. You can contribute by opening issues on Github, implementing things yourself, and making a pull request. We would also appreciate example models written using Turing. ## Issues and Discussions -Issues related to bugs and feature requests are welcome on the [issues page](https://github.com/TuringLang/Turing.jl/issues), while discussions and questions about statistical applications and theory should place on the [Discussions page](https://github.com/TuringLang/Turing.jl/discussions) or [our channel](https://julialang.slack.com/messages/turing/) (`#turing`) in the Julia Slack chat. If you do not have an invitation to Julia's Slack, you can get one by going [here](https://julialang.org/slack/). - +Issues related to bugs and feature requests are welcome on the [issues page](https://github.com/TuringLang/Turing.jl/issues), while discussions and questions about statistical applications and theory should place on the [Discussions page](https://github.com/TuringLang/Turing.jl/discussions) or [our channel](https://julialang.slack.com/messages/turing/) (`#turing`) in the Julia Slack chat. If you do not have an invitation to Julia's Slack, you can get one by going [here](https://julialang.org/slack/). diff --git a/benchmarks/benchmarks_suite.jl b/benchmarks/benchmarks_suite.jl index ef00117ee..7f8db0980 100644 --- a/benchmarks/benchmarks_suite.jl +++ b/benchmarks/benchmarks_suite.jl @@ -4,30 +4,29 @@ using LinearAlgebra const BenchmarkSuite = BenchmarkTools.BenchmarkGroup() # -# Add models to benchmarks +# Add models to benchmarks # include("models/hlr.jl") include("models/lr.jl") include("models/sv_nuts.jl") -# constrained +# constrained BenchmarkSuite["constrained"] = BenchmarkGroup(["constrained"]) data = [0, 1, 0, 1, 1, 1, 1, 1, 1, 1] - @model function constrained_test(obs) - p ~ Beta(2,2) - for i = 1:length(obs) + p ~ Beta(2, 2) + for i in 1:length(obs) obs[i] ~ Bernoulli(p) end - p + return p end - -BenchmarkSuite["constrained"]["constrained"] = @benchmarkable sample($(constrained_test(data)), $(HMC(0.01, 2)), 2000) - +BenchmarkSuite["constrained"]["constrained"] = @benchmarkable sample( + $(constrained_test(data)), $(HMC(0.01, 2)), 2000 +) ## gdemo @@ -41,9 +40,9 @@ BenchmarkSuite["gdemo"] = BenchmarkGroup(["gdemo"]) return s², m end -BenchmarkSuite["gdemo"]["hmc"] = @benchmarkable sample($(gdemo(1.5, 2.0)), $(HMC(0.01, 2)), 2000) - - +BenchmarkSuite["gdemo"]["hmc"] = @benchmarkable sample( + $(gdemo(1.5, 2.0)), $(HMC(0.01, 2)), 2000 +) ## MvNormal @@ -52,8 +51,8 @@ BenchmarkSuite["mnormal"] = BenchmarkGroup(["mnormal"]) # Define the target distribution and its gradient @model function target(dim) - Θ = Vector{Real}(undef, dim) - θ ~ MvNormal(zeros(dim), I) + Θ = Vector{Real}(undef, dim) + return θ ~ MvNormal(zeros(dim), I) end # Sampling parameter settings @@ -61,24 +60,29 @@ dim = 10 n_samples = 100_000 n_adapts = 2_000 -BenchmarkSuite["mnormal"]["hmc"] = @benchmarkable sample($(target(dim)), $(HMC(0.1, 5)), $n_samples) +BenchmarkSuite["mnormal"]["hmc"] = @benchmarkable sample( + $(target(dim)), $(HMC(0.1, 5)), $n_samples +) ## MvNormal: ForwardDiff vs ReverseDiff @model function mdemo(d, N) Θ = Vector(undef, N) - for n=1:N - Θ[n] ~ d - end + for n in 1:N + Θ[n] ~ d + end end dim2 = 250 -A = rand(Wishart(dim2, Matrix{Float64}(I, dim2, dim2))); -d = MvNormal(zeros(dim2), A) +A = rand(Wishart(dim2, Matrix{Float64}(I, dim2, dim2))); +d = MvNormal(zeros(dim2), A) # ForwardDiff -BenchmarkSuite["mnormal"]["forwarddiff"] = @benchmarkable sample($(mdemo(d, 1)), $(HMC(0.1, 5; adtype=AutoForwardDiff(; chunksize=0))), 5000) - +BenchmarkSuite["mnormal"]["forwarddiff"] = @benchmarkable sample( + $(mdemo(d, 1)), $(HMC(0.1, 5; adtype=AutoForwardDiff(; chunksize=0))), 5000 +) # ReverseDiff -BenchmarkSuite["mnormal"]["reversediff"] = @benchmarkable sample($(mdemo(d, 1)), $(HMC(0.1, 5; adtype=AutoReverseDiff(false))), 5000) +BenchmarkSuite["mnormal"]["reversediff"] = @benchmarkable sample( + $(mdemo(d, 1)), $(HMC(0.1, 5; adtype=AutoReverseDiff(false))), 5000 +) diff --git a/benchmarks/models/hlr.jl b/benchmarks/models/hlr.jl index 36836263f..b40c3b2c2 100644 --- a/benchmarks/models/hlr.jl +++ b/benchmarks/models/hlr.jl @@ -10,15 +10,14 @@ end x, y = readlrdata() @model function hlr_nuts(x, y, θ) + N, D = size(x) - N,D = size(x) - - σ² ~ Exponential(θ) + σ² ~ Exponential(θ) α ~ Normal(0, sqrt(σ²)) β ~ MvNormal(zeros(D), σ² * I) - for n = 1:N - y[n] ~ BinomialLogit(1, dot(x[n,:], β) + α) + for n in 1:N + y[n] ~ BinomialLogit(1, dot(x[n, :], β) + α) end end @@ -26,4 +25,6 @@ end n_samples = 10_000 # Sampling -BenchmarkSuite["nuts"]["hrl"] = @benchmarkable sample(hlr_nuts(x, y, 1/0.1), NUTS(0.65), n_samples) +BenchmarkSuite["nuts"]["hrl"] = @benchmarkable sample( + hlr_nuts(x, y, 1 / 0.1), NUTS(0.65), n_samples +) diff --git a/benchmarks/models/lr.jl b/benchmarks/models/lr.jl index c98963e79..3714a93d0 100644 --- a/benchmarks/models/lr.jl +++ b/benchmarks/models/lr.jl @@ -10,14 +10,13 @@ end X, Y = readlrdata() @model function lr_nuts(x, y, σ) - - N,D = size(x) + N, D = size(x) α ~ Normal(0, σ) β ~ MvNormal(zeros(D), σ^2 * I) - for n = 1:N - y[n] ~ BinomialLogit(1, dot(x[n,:], β) + α) + for n in 1:N + y[n] ~ BinomialLogit(1, dot(x[n, :], β) + α) end end @@ -26,5 +25,6 @@ n_samples = 1_000 n_adapts = 1_000 # Sampling -BenchmarkSuite["nuts"]["lr"] = @benchmarkable sample(lr_nuts(X, Y, 100), - NUTS(0.65), n_samples) +BenchmarkSuite["nuts"]["lr"] = @benchmarkable sample( + lr_nuts(X, Y, 100), NUTS(0.65), n_samples +) diff --git a/benchmarks/models/lr_helper.jl b/benchmarks/models/lr_helper.jl index 20f6bf3f1..83de134c8 100644 --- a/benchmarks/models/lr_helper.jl +++ b/benchmarks/models/lr_helper.jl @@ -1,10 +1,9 @@ using DelimitedFiles function readlrdata() - fname = joinpath(dirname(@__FILE__), "lr_nuts.data") z = readdlm(fname) - x = z[:,1:end-1] - y = z[:,end] .- 1 + x = z[:, 1:(end - 1)] + y = z[:, end] .- 1 return x, y end diff --git a/benchmarks/models/lr_nuts.data b/benchmarks/models/lr_nuts.data index 723307e19..6251f5932 100644 --- a/benchmarks/models/lr_nuts.data +++ b/benchmarks/models/lr_nuts.data @@ -1,1000 +1,1000 @@ - 1 6 4 12 5 5 3 4 1 67 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 48 2 60 1 3 2 2 1 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 12 4 21 1 4 3 3 1 49 3 1 2 1 1 0 0 1 0 0 1 0 1 0 1 - 1 42 2 79 1 4 3 4 2 45 3 1 2 1 1 0 0 0 0 0 0 0 0 1 1 - 1 24 3 49 1 3 3 4 4 53 3 2 2 1 1 1 0 1 0 0 0 0 0 1 2 - 4 36 2 91 5 3 3 4 4 35 3 1 2 2 1 0 0 1 0 0 0 0 1 0 1 - 4 24 2 28 3 5 3 4 2 53 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 36 2 69 1 3 3 2 3 35 3 1 1 2 1 0 1 1 0 1 0 0 0 0 1 - 4 12 2 31 4 4 1 4 1 61 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 30 4 52 1 1 4 2 3 28 3 2 1 1 1 1 0 1 0 0 1 0 0 0 2 - 2 12 2 13 1 2 2 1 3 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 - 1 48 2 43 1 2 2 4 2 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 2 12 2 16 1 3 2 1 3 22 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 24 4 12 1 5 3 4 3 60 3 2 1 1 1 1 0 1 0 0 1 0 1 0 2 - 1 15 2 14 1 3 2 4 3 28 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 - 1 24 2 13 2 3 2 2 3 32 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 4 24 4 24 5 5 3 4 2 53 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 30 0 81 5 2 3 3 3 25 1 3 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 24 2 126 1 5 2 2 4 44 3 1 1 2 1 0 1 1 0 0 0 0 0 0 2 - 4 24 2 34 3 5 3 2 3 31 3 1 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 9 4 21 1 3 3 4 3 48 3 3 1 2 1 1 0 1 0 0 1 0 0 1 1 - 1 6 2 26 3 3 3 3 1 44 3 1 2 1 1 0 0 1 0 1 0 0 0 1 1 - 1 10 4 22 1 2 3 3 1 48 3 2 2 1 2 1 0 1 0 1 0 0 1 0 1 - 2 12 4 18 2 2 3 4 2 44 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 4 10 4 21 5 3 4 1 3 26 3 2 1 1 2 0 0 1 0 0 1 0 0 1 1 - 1 6 2 14 1 3 3 2 1 36 1 1 1 2 1 0 0 1 0 0 1 0 1 0 1 - 4 6 0 4 1 5 4 4 3 39 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 3 12 1 4 4 3 2 3 1 42 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 - 2 7 2 24 1 3 3 2 1 34 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 - 1 60 3 68 1 5 3 4 4 63 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 2 18 2 19 4 2 4 3 1 36 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 24 2 40 1 3 3 2 3 27 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 18 2 59 2 3 3 2 3 30 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 12 4 13 5 5 3 4 4 57 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 3 12 2 15 1 2 2 1 2 33 1 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 2 45 4 47 1 2 3 2 2 25 3 2 1 1 1 0 0 1 0 0 1 0 1 0 2 - 4 48 4 61 1 3 3 3 4 31 1 1 1 2 1 0 0 1 0 0 0 0 0 1 1 - 3 18 2 21 1 3 3 2 1 37 2 1 1 1 1 0 0 0 1 0 1 0 0 1 2 - 3 10 2 12 1 3 3 2 3 37 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 9 2 5 1 3 3 3 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 30 2 23 3 5 3 2 3 30 1 1 1 1 1 0 0 1 0 0 1 0 0 0 1 - 2 12 2 12 3 3 1 1 3 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 18 3 62 1 3 3 4 1 44 3 1 2 2 1 0 0 1 0 0 1 0 1 0 1 - 1 30 4 62 2 4 4 4 3 24 3 2 1 1 1 0 1 1 0 1 0 0 0 1 1 - 1 48 4 61 1 5 2 4 4 58 2 2 1 1 1 0 1 1 0 0 0 0 1 0 2 - 4 11 4 14 1 2 2 4 3 35 3 2 1 1 1 1 0 1 0 0 1 0 0 0 1 - 4 36 2 23 3 5 3 4 3 39 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 6 2 14 3 1 2 2 2 23 3 1 1 2 1 0 1 1 0 1 0 1 0 0 1 - 4 11 4 72 1 3 3 4 2 39 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 - 4 12 2 21 2 3 2 2 1 28 3 1 1 1 1 0 0 0 1 0 1 0 0 1 1 - 2 24 3 23 5 2 3 2 2 29 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 27 3 60 1 5 3 2 3 30 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 - 4 12 2 13 1 3 3 2 3 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 34 5 3 3 1 2 31 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 - 2 36 3 22 1 5 3 4 4 57 1 2 1 2 1 1 0 1 0 0 0 0 0 1 2 - 4 6 1 8 5 3 3 2 1 26 2 1 2 1 1 1 0 0 0 0 1 0 1 0 1 - 2 12 2 65 5 1 3 1 4 52 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 36 4 96 1 3 2 2 3 31 2 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 18 2 20 1 5 2 2 3 23 3 1 1 1 1 1 0 1 0 0 1 0 0 0 1 - 1 36 4 62 1 2 2 4 4 23 3 2 1 2 1 0 0 0 1 1 0 0 1 0 2 - 2 9 2 14 1 3 4 1 1 27 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 15 4 15 5 5 3 4 1 50 3 2 1 2 1 0 0 0 0 0 1 0 0 1 1 - 2 36 0 20 1 5 3 4 4 61 3 1 1 2 1 0 0 1 0 0 0 0 0 0 2 - 2 48 0 144 1 3 3 2 3 25 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 24 2 32 1 2 2 4 2 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 27 2 52 5 5 3 4 2 48 3 4 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 12 2 22 1 2 2 2 3 29 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 12 2 10 4 3 4 1 1 22 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 36 2 18 1 3 3 4 4 37 2 1 1 2 1 0 0 1 0 0 0 0 0 1 2 - 4 36 2 24 5 3 2 4 3 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 36 2 81 1 3 2 2 2 30 1 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 4 7 4 7 5 5 3 2 2 46 3 2 1 2 1 0 0 1 0 1 0 0 1 0 1 - 1 8 4 12 1 5 3 4 4 51 1 2 2 2 1 0 0 1 0 0 0 0 0 0 1 - 2 42 4 60 1 4 2 1 1 41 1 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 36 2 20 5 5 3 4 4 40 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 1 12 4 15 1 5 3 4 4 66 3 2 1 1 1 0 1 1 0 0 0 0 0 0 1 - 1 42 2 40 1 2 3 3 3 34 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 11 3 48 1 4 3 4 2 51 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 54 0 94 5 3 3 2 2 39 3 1 2 1 1 0 1 1 0 0 1 0 1 0 1 - 2 30 2 38 1 2 4 1 2 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 59 5 2 2 1 3 44 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 15 2 12 3 5 3 3 2 47 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 16 2 3 2 4 2 24 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 1 24 2 18 1 5 2 4 1 58 3 1 1 2 1 0 0 0 0 0 1 0 1 0 1 - 1 10 2 23 1 5 3 4 1 52 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 12 4 14 1 3 2 2 1 29 3 2 1 2 1 0 0 0 0 0 1 0 0 0 1 - 2 18 4 13 1 2 2 1 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 36 2 126 2 3 3 4 4 47 3 1 2 2 1 0 0 1 0 0 0 0 0 1 2 - 1 18 2 22 2 4 3 3 3 30 3 1 2 2 1 1 0 1 0 0 1 0 0 0 1 - 1 12 0 11 1 4 3 3 1 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 12 4 6 1 5 3 4 1 56 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 4 14 1 5 3 3 1 54 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 4 12 4 8 5 5 2 3 2 33 1 1 2 1 1 0 0 1 0 0 1 0 1 0 2 - 3 24 4 36 5 5 3 4 4 20 3 2 1 1 1 0 0 0 1 1 0 0 0 1 1 - 2 12 2 13 4 5 3 4 1 54 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 2 54 0 159 1 2 3 4 4 58 3 1 1 2 1 0 0 1 0 1 0 0 0 1 2 - 4 12 4 20 5 4 2 2 3 61 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 18 2 26 2 3 3 4 3 34 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 36 4 23 1 5 3 4 1 36 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 20 3 71 5 4 3 4 2 36 1 2 2 2 1 0 1 1 0 1 0 0 0 0 1 - 4 24 2 15 2 5 4 4 1 41 3 1 1 1 1 1 0 1 0 1 0 0 1 0 1 - 2 36 2 23 1 4 3 4 3 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 4 6 3 9 1 3 2 2 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 9 4 19 1 4 3 3 3 35 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 12 2 24 5 2 4 4 3 26 3 1 1 2 1 0 1 1 0 1 0 0 0 1 1 - 2 24 4 119 1 3 3 3 3 39 3 2 2 2 1 0 0 0 1 0 1 0 0 0 2 - 4 18 1 65 1 5 3 4 4 39 1 2 2 2 1 1 0 1 0 0 1 0 0 0 2 - 2 12 2 61 1 4 3 2 3 32 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 24 2 77 5 2 2 2 2 30 3 1 1 2 2 0 0 1 0 0 1 0 0 1 1 - 2 14 2 14 3 5 4 2 1 35 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 6 3 14 2 5 1 2 3 31 1 2 2 1 1 0 0 1 0 0 1 0 0 1 1 - 3 15 2 4 1 2 2 4 2 23 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 2 18 2 63 1 4 3 3 1 28 3 1 1 1 1 1 0 1 0 1 0 0 1 0 1 - 4 36 4 79 1 3 2 2 1 25 2 2 1 2 1 1 0 1 0 0 1 0 0 1 2 - 1 12 2 17 3 5 4 1 1 35 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 48 4 36 5 5 3 1 1 47 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 42 2 72 5 4 2 3 3 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 1 10 4 21 5 2 2 3 1 27 3 2 1 1 2 0 0 0 1 1 0 0 0 1 1 - 1 33 4 43 3 3 2 4 3 23 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 12 4 24 3 4 1 3 3 36 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 - 1 21 2 18 1 3 2 2 1 25 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 24 4 39 1 5 2 2 3 41 3 2 1 2 1 0 1 1 0 1 0 0 0 0 1 - 4 12 2 18 1 3 3 2 1 24 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 3 10 4 8 1 5 3 4 4 63 3 2 1 2 1 1 0 1 0 0 0 0 0 1 1 - 2 18 2 19 5 2 2 3 1 27 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 1 12 4 21 1 3 3 2 2 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 12 2 7 1 3 4 2 1 40 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 12 2 6 1 3 3 2 3 30 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 12 4 19 1 1 3 2 3 34 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 - 1 12 4 35 1 3 2 2 1 29 3 2 1 1 1 1 0 0 1 0 1 0 0 1 2 - 2 48 2 85 5 4 2 2 3 24 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 36 3 69 1 3 3 3 2 29 2 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 15 2 27 1 2 3 3 2 27 1 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 18 2 20 1 3 3 4 4 47 1 2 1 1 1 0 0 1 0 0 0 0 0 1 1 - 4 60 2 101 2 4 2 4 1 21 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 12 4 12 5 5 2 2 1 38 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 27 3 86 4 3 3 2 3 27 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 - 2 12 2 8 3 3 3 3 1 66 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 2 15 4 27 5 4 3 2 1 35 1 3 1 2 1 0 0 0 0 0 1 0 0 1 1 - 3 12 2 19 1 3 2 2 3 44 3 1 1 2 1 0 0 1 0 1 0 0 1 0 1 - 3 6 2 7 4 2 4 2 1 27 3 1 1 1 2 1 0 1 0 0 1 1 0 0 1 - 2 36 2 48 1 2 2 1 4 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 1 27 2 34 1 3 3 2 3 27 3 1 1 1 1 0 0 1 0 0 1 0 0 0 1 - 1 18 2 25 1 3 3 2 3 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 21 4 23 1 2 2 4 2 23 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 48 1 36 2 4 3 2 3 30 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 6 4 9 1 5 2 4 4 39 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 12 4 7 2 4 2 3 3 51 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 1 36 4 54 1 3 3 2 2 28 3 2 1 1 1 0 0 0 0 0 1 0 0 1 1 - 4 18 4 16 4 5 3 4 3 46 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 6 2 13 2 5 3 4 4 42 1 1 2 2 1 0 0 1 0 0 0 0 0 1 1 - 4 10 2 19 1 3 3 4 2 38 3 1 1 2 2 0 0 1 0 0 1 0 0 1 1 - 3 36 2 58 1 3 3 1 3 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 24 4 78 4 5 2 4 4 29 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 - 2 24 3 70 2 4 3 4 3 36 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 - 1 12 2 13 1 3 2 4 3 20 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 1 9 4 13 2 5 3 4 1 48 3 2 2 1 2 0 0 0 0 0 1 0 0 1 1 - 1 12 1 3 1 5 4 1 3 45 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 24 2 35 2 4 3 3 3 38 1 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 6 4 19 5 3 3 2 1 34 3 2 2 1 1 0 0 1 0 0 1 0 1 0 1 - 4 24 4 29 2 5 3 4 1 36 3 1 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 18 4 11 1 2 2 1 2 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 15 2 13 3 4 3 3 2 36 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 10 2 73 1 1 3 4 4 70 1 1 1 2 1 1 0 1 0 0 0 0 0 0 1 - 4 36 2 9 3 5 3 4 2 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 6 2 30 3 3 3 2 3 32 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 18 2 11 1 1 2 2 3 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 11 2 16 4 2 2 1 1 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 40 1 4 2 4 2 25 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 2 24 4 19 1 5 1 4 1 31 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 1 15 0 10 1 5 3 3 3 33 3 2 2 1 1 1 0 1 0 1 0 0 0 1 2 - 4 12 2 8 1 3 2 1 1 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 24 3 21 1 1 2 2 2 34 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 2 8 2 14 1 3 3 2 1 33 3 1 1 1 2 0 0 0 0 0 1 0 0 1 1 - 1 21 3 34 1 2 3 1 2 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 30 1 75 5 1 2 1 1 53 1 1 1 2 1 0 1 1 0 0 1 0 0 0 2 - 1 12 2 26 1 3 1 1 3 42 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 6 4 3 3 5 3 4 3 52 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 2 20 1 4 3 2 3 31 3 2 2 2 1 0 0 1 0 1 0 0 0 0 1 - 1 21 4 6 1 5 3 4 1 65 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 36 3 96 1 2 1 1 3 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 36 3 45 1 3 1 2 1 30 2 2 1 2 1 0 0 1 0 0 1 0 0 0 2 - 1 21 1 16 5 3 3 2 2 40 3 2 2 1 1 1 0 1 0 0 1 0 1 0 2 - 4 24 4 38 4 3 3 4 1 50 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 18 4 9 1 5 3 4 3 36 1 1 2 2 1 1 0 1 0 0 1 0 0 1 2 - 4 15 4 14 1 3 3 2 2 31 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 9 1 51 1 5 2 4 4 74 1 1 2 2 1 0 1 1 0 0 0 0 0 0 2 - 2 16 4 12 1 1 3 3 3 68 3 3 1 2 1 1 0 1 0 0 0 1 0 0 1 - 1 12 2 7 2 4 4 1 2 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 18 0 32 1 3 2 4 3 33 1 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 46 4 3 3 3 2 54 3 3 1 2 1 0 0 1 0 0 1 0 0 0 2 - 2 48 0 38 2 4 3 4 4 34 3 1 2 1 1 0 0 1 0 0 0 0 1 0 2 - 2 27 2 39 1 3 3 2 3 36 3 1 2 2 1 0 0 1 0 0 1 0 0 1 2 - 4 6 2 21 1 4 4 2 1 29 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 2 45 2 30 2 3 3 4 2 21 3 1 1 1 1 0 0 0 0 1 0 0 0 1 2 - 2 9 4 15 1 5 2 3 3 34 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 6 4 14 1 3 2 1 3 28 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 12 2 10 2 2 2 4 3 27 1 4 1 1 1 0 0 1 0 1 0 0 0 1 2 - 2 24 2 28 5 5 3 4 4 36 1 1 1 2 1 0 1 1 0 0 0 0 0 1 1 - 2 18 3 43 1 5 1 3 4 40 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 9 4 9 3 5 3 2 3 52 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 12 2 12 1 3 4 3 1 27 3 1 1 1 1 1 0 1 0 0 1 0 1 0 1 - 4 27 3 51 1 4 3 4 3 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 2 9 1 4 4 4 2 21 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 4 12 4 15 1 5 3 1 1 38 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 - 1 30 4 106 1 5 3 4 4 38 3 3 2 2 1 0 1 1 0 0 0 0 0 0 1 - 4 12 4 19 1 5 3 4 1 43 3 3 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 12 4 14 1 4 3 3 2 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 24 2 66 1 3 4 2 3 21 2 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 12 2 14 4 4 3 2 2 55 3 1 1 1 2 0 1 1 0 0 1 0 0 1 1 - 4 9 4 31 5 3 3 2 1 33 3 2 2 1 1 0 0 1 0 0 1 0 0 1 1 - 4 36 2 38 5 5 2 4 1 45 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 - 1 27 0 53 1 1 3 4 2 50 2 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 3 30 3 19 1 5 3 4 1 66 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 36 4 33 5 5 3 2 3 51 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 6 4 9 5 4 2 3 2 39 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 18 0 31 1 4 3 1 2 31 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 3 36 2 39 1 3 3 2 1 23 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 24 2 30 1 3 1 2 1 24 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 4 10 2 14 1 3 2 4 3 64 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 2 12 2 6 1 2 4 1 1 26 1 1 1 1 1 0 0 0 0 0 1 0 1 0 1 - 1 12 2 12 5 3 2 4 2 23 1 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 12 2 7 1 3 3 2 1 30 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 3 30 5 3 3 4 1 32 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 15 2 47 1 3 3 2 3 30 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 - 4 36 0 26 1 3 3 2 3 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 48 2 110 4 4 3 2 4 27 1 2 1 2 1 0 0 0 1 0 1 0 0 1 2 - 1 12 2 79 1 5 3 4 4 53 3 1 1 2 1 0 0 1 0 0 0 0 0 0 2 - 4 9 2 15 1 4 3 2 3 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 24 2 31 1 2 3 1 4 22 1 1 1 1 1 0 0 1 0 0 0 0 0 1 1 - 3 36 2 42 1 3 3 2 3 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 9 2 25 3 5 3 4 4 51 3 1 1 1 1 1 0 1 0 0 0 0 1 0 1 - 4 12 2 21 2 4 3 1 4 35 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 18 2 9 1 3 4 2 1 25 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 - 4 4 4 15 1 4 3 1 1 42 3 3 2 1 1 0 0 1 0 0 1 0 1 0 1 - 1 24 2 18 1 1 3 2 3 30 2 1 2 1 1 0 0 1 0 0 1 0 0 0 2 - 2 6 2 146 5 1 3 2 2 23 3 1 1 2 1 1 0 1 0 0 1 1 0 0 2 - 2 21 2 28 2 5 1 2 3 61 1 2 1 1 1 0 0 1 0 1 0 0 1 0 2 - 4 12 4 13 1 3 2 2 2 35 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 30 2 25 1 5 3 3 2 39 3 1 2 1 1 0 0 0 0 0 1 0 0 1 1 - 1 24 2 9 5 5 2 2 3 29 1 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 6 2 16 1 4 3 2 2 51 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 - 1 48 0 46 1 5 3 4 4 24 3 2 2 1 1 0 1 1 0 0 0 0 0 1 2 - 4 12 4 12 1 3 2 2 1 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 1 34 3 3 2 3 1 35 3 1 2 1 1 0 0 1 0 0 1 0 1 0 1 - 4 24 2 13 1 4 3 1 1 25 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 12 4 7 1 5 3 4 1 52 3 3 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 6 0 12 2 3 3 1 4 35 1 1 1 1 2 1 0 1 0 1 0 0 0 1 1 - 3 24 2 19 1 3 3 2 1 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 4 1 1 2 4 1 22 3 1 1 1 1 0 0 0 1 1 0 0 0 1 2 - 1 6 4 7 4 4 2 4 1 39 3 2 1 2 1 1 0 1 0 0 1 0 1 0 1 - 3 12 2 23 1 3 2 2 3 46 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 30 2 22 1 3 2 2 4 24 1 1 1 1 1 1 0 0 0 0 1 0 0 1 2 - 4 24 3 42 2 3 3 3 2 35 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 9 2 20 5 4 3 1 3 24 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 60 3 74 5 3 3 1 1 27 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 24 4 27 1 3 3 2 1 35 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 12 1 21 1 3 1 1 4 29 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 - 4 15 2 38 2 2 2 4 3 23 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 - 4 11 4 12 2 1 2 4 1 57 3 3 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 12 2 17 1 3 3 2 1 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 24 2 16 1 5 2 4 3 55 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 18 4 53 1 5 3 4 4 36 3 3 1 2 1 1 0 1 0 0 0 0 0 0 1 - 4 12 4 27 1 5 2 4 4 57 1 3 1 1 1 0 0 1 0 0 0 0 1 0 1 - 4 10 4 12 1 5 3 4 1 32 3 2 2 1 2 1 0 1 0 0 1 0 1 0 1 - 2 15 2 8 1 5 3 3 3 37 3 1 2 1 1 0 0 1 0 0 1 0 0 1 2 - 4 36 4 63 5 5 3 4 1 36 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 15 1 2 2 3 3 38 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 14 2 90 1 5 1 4 2 45 3 1 1 2 2 1 0 1 0 0 1 0 0 0 2 - 4 24 2 10 5 5 3 2 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 27 5 4 3 3 2 32 3 1 1 1 2 1 0 1 0 0 1 0 0 1 1 - 4 12 4 14 3 4 2 4 3 37 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 2 48 1 122 5 1 3 4 4 36 3 1 1 2 1 1 0 0 1 0 0 0 0 0 1 - 2 48 2 31 1 4 3 4 1 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 30 2 120 1 2 1 1 4 34 3 1 1 2 1 0 0 1 0 0 1 0 1 0 2 - 4 9 2 27 1 3 3 2 1 32 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 4 24 1 3 2 2 3 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 2 13 5 5 1 4 2 49 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 - 4 6 2 46 1 2 2 4 2 32 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 24 2 19 2 3 3 4 3 29 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 - 4 15 4 34 4 5 3 4 4 23 3 2 1 2 1 0 1 1 0 1 0 0 0 1 1 - 4 12 2 16 1 3 3 2 1 50 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 18 1 14 5 4 3 4 3 49 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 15 4 15 5 5 3 4 2 63 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 24 4 39 2 2 1 2 3 37 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 1 47 2 107 1 2 2 1 1 35 3 1 1 2 1 1 0 1 0 0 1 0 1 0 1 - 1 48 2 48 1 4 3 3 2 26 3 1 2 1 1 0 1 1 0 0 1 0 0 1 1 - 2 48 3 76 2 1 3 4 4 31 3 1 1 2 1 0 0 1 0 0 0 0 0 0 1 - 2 12 2 11 1 3 2 4 1 49 3 2 1 2 1 0 0 0 0 0 1 0 0 1 1 - 1 24 3 10 1 2 4 4 1 48 2 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 12 2 11 1 3 4 2 1 26 3 1 1 2 2 0 0 1 0 0 1 0 0 1 1 - 2 36 2 94 1 2 4 4 3 28 3 1 1 2 1 0 1 1 0 1 0 0 0 0 2 - 1 24 4 64 1 5 2 4 4 44 3 2 2 2 1 0 1 1 0 0 0 0 0 0 1 - 3 42 4 48 1 5 3 4 4 56 3 1 1 1 1 0 1 1 0 0 0 0 0 1 1 - 4 48 4 76 5 5 1 2 3 46 1 2 2 1 1 0 0 1 0 0 1 0 0 0 1 - 2 48 2 100 1 2 2 2 3 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 12 2 47 5 2 2 4 3 20 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 - 4 10 2 13 5 5 3 2 2 45 3 1 1 1 2 1 0 0 1 0 1 0 1 0 1 - 4 18 2 25 1 3 3 4 1 43 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 21 4 27 4 4 3 2 3 32 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 6 2 7 1 1 2 4 1 54 3 1 1 2 1 1 0 1 0 0 1 1 0 0 1 - 2 36 0 38 1 3 2 1 3 42 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 3 24 4 13 5 4 3 2 1 37 1 2 2 1 1 1 0 1 0 0 1 0 1 0 2 - 1 10 4 10 1 4 3 3 2 49 3 2 1 2 1 1 0 0 1 0 1 0 0 1 1 - 4 48 4 101 3 3 3 2 4 44 1 1 1 1 1 1 0 1 0 0 0 0 0 1 2 - 4 6 2 15 4 3 1 2 1 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 30 2 48 5 4 2 4 2 24 2 1 1 1 1 0 1 1 0 1 0 0 1 0 1 - 1 12 2 7 2 2 4 3 4 33 3 1 1 2 1 0 0 1 0 0 1 0 1 0 2 - 2 8 2 12 1 3 2 4 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 9 2 3 1 3 4 4 1 22 3 1 1 1 1 1 0 1 0 1 0 0 1 0 1 - 2 48 2 54 5 1 3 4 4 40 1 1 1 2 1 0 0 1 0 0 0 1 0 0 1 - 4 24 2 55 2 3 3 1 3 25 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 24 2 37 1 2 2 4 3 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 12 2 7 1 4 4 3 3 25 1 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 3 4 2 15 5 2 3 2 1 29 3 1 2 1 2 1 0 1 0 0 1 0 1 0 1 - 1 36 1 27 1 5 3 4 3 31 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 12 2 7 1 3 3 3 2 38 3 1 2 1 1 0 0 0 0 0 1 0 1 0 1 - 2 24 2 44 5 3 2 4 2 48 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 - 4 12 4 7 1 3 3 2 3 32 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 15 3 36 1 5 2 4 2 27 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 30 4 42 1 1 4 2 3 28 3 2 1 1 1 1 0 1 0 0 1 0 0 0 2 - 1 24 2 19 1 2 1 3 2 32 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 24 2 29 1 4 3 1 4 34 3 1 1 2 1 0 1 1 0 0 0 0 0 0 1 - 1 18 2 27 4 3 3 2 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 4 10 1 3 2 3 1 36 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 8 4 34 1 4 3 4 1 39 3 2 1 1 2 1 0 1 0 0 1 0 1 0 1 - 4 12 4 58 5 5 3 4 2 49 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 24 2 15 4 4 2 3 3 34 3 1 2 2 1 1 0 1 0 0 1 0 0 1 1 - 3 36 2 45 1 5 3 2 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 6 2 11 1 5 3 4 3 28 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 - 1 24 4 66 1 1 3 4 4 75 3 2 1 2 1 0 1 1 0 0 0 0 0 0 1 - 4 18 4 19 2 3 2 2 1 30 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 60 2 74 2 2 2 2 2 24 3 1 1 1 1 1 0 1 0 0 1 0 0 0 2 - 4 48 4 116 2 3 2 4 3 24 1 2 1 1 1 0 1 1 0 1 0 0 1 0 2 - 1 24 0 41 1 5 3 4 4 23 1 2 2 1 1 0 0 1 0 1 0 0 0 1 2 - 1 6 4 34 1 3 1 4 1 44 3 1 1 2 1 0 0 1 0 1 0 0 0 0 2 - 2 13 2 21 1 2 2 4 2 23 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 - 1 15 2 13 5 3 2 2 3 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 1 24 2 42 1 3 3 4 2 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 10 2 15 1 3 1 2 3 31 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 24 4 57 1 2 2 4 4 24 3 2 1 2 1 0 0 1 0 0 0 0 0 1 1 - 1 21 2 36 1 4 2 4 3 26 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 2 18 2 32 3 2 4 3 1 25 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 2 18 2 44 1 5 3 1 1 33 1 1 1 2 1 0 0 0 1 0 1 0 0 0 1 - 3 10 2 39 1 2 3 1 2 37 3 1 2 1 1 1 0 0 0 0 1 0 1 0 1 - 4 15 4 15 1 3 2 2 3 43 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 13 4 9 1 2 3 4 1 23 3 2 1 1 1 0 0 0 0 0 1 0 0 1 1 - 2 24 2 38 3 1 2 4 4 23 3 1 1 1 1 0 0 1 0 1 0 1 0 0 1 - 4 6 3 17 2 3 3 2 1 34 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 9 4 11 4 5 3 3 4 32 3 2 2 1 1 0 0 1 0 0 0 0 0 1 2 - 4 9 2 12 1 2 2 4 1 23 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 2 9 2 10 1 3 2 2 3 29 3 1 1 1 2 0 0 1 0 0 1 0 0 1 2 - 4 18 4 32 5 1 3 4 4 38 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 1 12 0 62 1 3 3 2 2 28 3 2 1 2 1 0 0 1 0 1 0 0 0 1 2 - 4 10 2 7 3 5 3 4 4 46 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 - 2 24 2 12 1 2 3 2 1 23 2 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 4 12 4 23 5 5 3 4 1 49 3 1 1 2 1 0 0 0 1 0 1 0 0 1 1 - 4 36 3 45 1 3 3 2 3 26 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 12 2 8 1 3 4 2 1 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 30 2 24 1 4 2 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 2 18 2 12 5 3 3 4 4 61 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 - 3 12 2 34 5 5 3 3 3 37 3 1 1 1 1 0 0 1 0 0 1 0 0 0 1 - 3 12 3 22 1 3 2 2 3 36 2 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 6 2 18 1 3 4 2 2 21 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 1 18 2 25 1 1 3 1 3 25 3 1 1 1 1 0 0 1 0 0 1 1 0 0 2 - 4 12 2 15 1 4 3 4 3 36 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 18 4 38 1 4 3 1 3 27 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 - 1 18 2 36 1 2 2 4 3 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 1 36 2 34 1 5 3 2 3 42 3 1 2 1 1 0 0 1 0 0 1 0 0 1 2 - 2 18 2 30 1 4 2 4 1 40 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 4 36 2 31 5 3 3 4 1 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 18 4 61 1 5 3 4 3 33 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 10 4 21 1 2 2 3 1 23 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 - 4 60 4 138 5 5 3 4 4 63 1 1 1 2 1 1 0 1 0 0 0 0 0 0 1 - 2 60 1 148 2 5 2 4 4 60 1 2 1 2 1 0 0 1 0 0 0 0 0 0 2 - 1 48 1 77 1 4 2 4 3 37 3 1 1 1 1 0 0 0 0 1 0 0 0 1 2 - 4 18 3 23 1 1 4 3 1 34 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 7 3 8 5 5 3 4 4 36 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 - 2 36 2 143 1 5 3 2 4 57 3 1 1 2 1 1 0 1 0 0 0 0 0 0 2 - 4 6 4 4 2 3 2 4 3 52 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 - 1 20 2 22 5 4 3 4 3 39 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 18 2 130 1 1 2 4 4 38 3 1 1 2 1 0 1 1 0 0 0 0 0 0 2 - 4 22 2 13 5 4 2 4 2 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 - 3 12 2 13 1 2 3 1 1 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 30 3 43 2 3 3 2 2 26 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 18 4 22 1 3 2 1 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 11 5 2 2 2 1 21 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 2 18 4 74 1 1 3 4 2 40 2 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 2 15 4 23 3 3 3 4 3 27 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 9 2 14 1 4 2 2 3 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 18 1 3 4 2 2 30 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 - 2 12 2 10 4 2 2 4 1 19 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 1 36 2 32 1 4 3 4 4 39 1 1 2 2 1 1 0 1 0 0 0 0 0 0 1 - 1 6 4 20 1 4 2 4 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 9 4 24 1 1 3 3 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 0 1 - 2 39 3 118 2 4 3 3 4 32 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 1 12 2 26 1 1 2 4 4 55 3 1 1 1 1 0 0 1 0 0 0 0 0 0 1 - 1 36 4 23 1 3 4 2 2 46 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 12 2 12 1 5 1 1 1 46 3 2 1 1 1 1 0 1 0 1 0 0 0 1 2 - 4 24 4 15 4 3 2 1 1 43 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 18 2 15 1 2 4 4 1 39 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 18 4 19 5 3 4 4 1 28 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 3 86 1 2 3 2 3 27 1 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 14 3 8 1 3 3 2 3 27 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 - 2 18 3 29 5 5 3 4 3 43 3 1 2 1 1 1 0 1 0 0 1 0 0 1 1 - 2 24 2 20 1 2 4 1 2 22 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 24 4 22 5 4 3 4 3 43 3 2 2 2 1 0 1 1 0 0 1 0 0 1 1 - 1 15 2 11 1 2 4 2 1 27 3 1 1 1 2 0 0 1 0 0 1 0 0 1 1 - 4 24 2 32 3 5 1 2 3 26 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 3 12 4 9 3 4 4 2 1 28 3 3 1 2 1 1 0 1 0 0 1 0 0 1 2 - 2 24 2 20 1 5 2 4 3 20 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 33 4 73 1 4 3 2 3 35 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 - 4 12 4 23 1 1 3 2 3 42 2 2 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 10 2 16 3 3 3 2 4 40 3 1 2 1 2 1 0 1 0 1 0 0 1 0 1 - 1 24 2 14 5 3 2 2 2 35 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 36 4 58 1 5 3 2 2 35 3 2 2 2 1 0 1 1 0 0 1 0 0 1 1 - 1 12 2 26 1 2 3 1 1 33 3 1 2 1 1 1 0 1 0 0 1 0 1 0 2 - 1 18 3 85 5 3 2 2 3 23 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 21 2 28 3 4 2 2 3 31 1 1 1 1 1 1 0 1 0 0 1 0 0 0 1 - 2 18 2 10 5 3 2 2 2 33 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 15 2 32 4 4 2 3 3 20 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 - 2 12 2 20 5 3 3 2 3 30 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 2 12 4 10 1 4 3 3 1 47 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 - 4 21 3 16 2 4 3 3 1 34 3 2 1 1 1 0 0 1 0 0 1 0 0 0 1 - 2 12 2 28 5 5 2 2 2 25 1 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 2 18 2 28 1 3 4 3 3 21 3 1 1 2 1 0 1 1 0 1 0 0 0 1 1 - 4 28 4 27 1 5 3 2 3 29 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 4 11 4 3 3 3 1 46 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 9 2 13 1 5 3 4 3 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 18 4 12 1 1 2 4 4 55 3 3 2 1 1 0 0 1 0 0 0 1 0 0 2 - 4 5 2 34 1 4 3 4 1 74 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 24 2 113 1 3 3 3 3 29 1 2 1 2 1 0 0 0 1 0 1 0 0 0 2 - 1 6 4 19 1 1 3 4 4 36 3 3 1 2 1 0 0 1 0 0 0 0 0 0 1 - 4 24 4 21 1 3 1 2 1 33 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 9 2 21 1 3 3 2 1 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 12 2 15 5 3 4 1 1 25 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 6 2 7 3 4 4 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 4 24 4 13 4 5 2 4 1 37 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 1 42 4 34 1 1 3 4 3 65 3 2 1 1 1 0 0 0 1 0 1 1 0 0 1 - 3 12 1 6 1 2 2 1 1 26 3 1 1 1 1 0 0 1 0 0 1 1 0 0 2 - 4 12 2 19 1 5 3 4 3 39 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 - 1 12 2 16 1 3 2 3 2 30 3 1 1 1 1 0 0 0 1 0 1 0 0 1 1 - 2 20 3 26 1 3 3 3 3 29 1 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 12 2 7 1 5 3 4 3 41 1 1 2 1 1 0 0 1 0 0 1 0 1 0 2 - 2 48 4 51 1 3 2 3 3 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 9 4 12 5 5 2 4 2 41 3 2 1 1 1 0 0 1 0 1 0 0 1 0 1 - 1 36 2 18 1 2 2 4 3 34 3 1 1 2 1 1 0 1 0 0 1 0 0 1 2 - 2 7 2 26 1 3 3 2 1 35 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 - 3 12 2 14 5 5 2 4 1 55 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 2 15 3 15 4 3 4 3 2 61 2 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 36 4 111 5 3 3 2 3 30 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 4 6 2 5 1 3 2 1 1 29 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 0 28 1 5 3 4 2 34 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 27 1 5 3 4 3 35 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 1 24 2 48 1 4 3 3 2 31 3 1 1 2 1 1 0 0 1 0 1 0 0 1 2 - 4 24 2 27 1 2 2 1 4 29 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 1 11 4 39 1 3 3 2 1 36 3 2 2 1 1 1 0 1 0 1 0 0 0 1 1 - 1 12 2 34 1 5 3 4 4 35 3 1 1 2 1 0 1 1 0 0 0 0 0 1 2 - 1 6 2 3 1 2 2 1 1 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 46 1 2 3 2 3 32 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 36 2 36 1 3 3 2 2 37 3 1 2 1 1 0 0 0 0 0 1 0 0 1 1 - 1 15 2 17 1 2 3 3 1 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 2 12 2 30 1 2 2 1 1 34 3 1 1 1 1 0 0 1 0 1 0 0 0 0 1 - 2 12 2 8 5 5 3 4 2 38 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 20 1 4 3 1 3 34 2 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 24 2 29 1 3 3 4 4 63 1 1 2 2 1 0 1 0 0 0 1 0 0 1 1 - 1 24 3 17 1 2 2 2 3 29 3 1 1 2 1 0 0 1 0 1 0 0 1 0 2 - 4 48 3 72 5 5 3 3 3 32 1 2 2 1 1 0 0 1 0 0 1 0 0 1 1 - 4 33 3 28 1 3 2 2 3 26 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 24 3 47 1 4 3 3 3 35 3 2 1 2 1 0 1 1 0 0 1 0 1 0 1 - 2 24 2 31 2 2 4 2 3 22 3 1 1 2 1 0 0 1 0 1 0 0 0 1 2 - 1 6 2 4 1 2 2 4 2 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 9 2 7 1 3 3 3 3 28 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 4 6 2 12 5 1 3 4 2 36 3 1 2 2 1 0 0 1 0 0 1 0 0 0 1 - 2 18 4 12 1 3 4 2 3 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 18 0 31 1 2 2 4 2 26 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 4 39 2 26 3 3 3 4 3 24 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 3 24 2 52 1 4 3 2 3 25 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 12 2 10 2 4 3 4 1 39 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 15 4 15 1 5 3 4 3 44 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 - 2 12 4 36 1 3 2 1 1 23 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 24 2 12 1 2 3 1 2 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 30 2 36 4 5 2 4 2 57 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 15 3 10 4 4 2 2 2 30 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 4 12 3 3 3 4 1 44 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 2 6 3 12 1 1 3 4 2 47 3 1 1 2 1 1 0 1 0 0 1 0 0 0 2 - 4 12 2 31 1 3 3 4 3 52 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 38 1 5 2 4 4 62 3 1 1 2 1 1 0 0 1 0 0 0 0 1 1 - 4 10 2 14 2 3 3 2 1 35 3 1 1 1 2 1 0 1 0 1 0 0 1 0 1 - 4 6 2 35 1 3 3 3 2 26 3 1 1 1 1 1 0 0 0 1 0 0 0 1 1 - 4 12 4 19 1 5 3 2 4 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 27 0 83 1 5 2 4 4 42 3 2 1 2 1 0 0 1 0 0 0 0 0 0 2 - 4 6 4 12 2 3 2 1 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 6 2 4 5 5 3 4 2 38 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 4 21 1 3 3 2 1 39 3 2 2 1 2 1 0 1 0 1 0 0 1 0 1 - 1 24 2 30 5 3 4 4 3 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 36 2 90 2 2 3 1 4 29 3 1 1 2 1 0 0 0 1 1 0 0 0 0 2 - 4 24 4 16 1 4 3 3 2 40 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 18 2 13 1 5 4 2 1 32 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 - 3 6 4 13 2 5 1 4 3 28 3 2 2 2 1 1 0 1 0 0 1 0 0 1 1 - 1 24 2 31 1 2 2 1 2 27 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 1 36 2 55 1 5 3 4 4 42 3 1 2 1 1 0 1 1 0 0 0 0 0 1 1 - 3 9 2 11 2 5 1 4 1 49 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 24 4 12 2 2 3 4 4 38 1 2 2 1 1 0 0 1 0 0 1 0 0 1 2 - 1 24 2 12 1 2 2 4 2 24 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 - 4 10 2 13 5 3 3 4 2 27 3 1 1 1 1 1 0 0 0 0 1 0 1 0 2 - 3 15 4 24 3 3 3 2 3 36 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 - 2 15 1 68 2 1 3 2 2 34 3 1 2 2 1 1 0 1 0 0 1 0 0 0 2 - 4 24 2 14 1 3 4 2 2 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 39 2 86 2 5 3 2 3 45 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 1 12 2 8 1 4 3 2 1 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 36 2 47 1 3 3 2 4 32 3 1 1 2 1 0 1 1 0 0 0 0 0 0 1 - 3 15 2 27 1 4 3 4 2 26 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 2 12 3 6 1 3 4 4 1 20 3 2 1 1 1 0 0 0 1 1 0 0 0 1 1 - 4 24 2 23 5 2 3 1 2 54 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 6 4 6 1 4 2 3 2 37 3 2 1 1 2 1 0 1 0 0 1 0 0 1 1 - 1 6 4 14 1 2 3 4 1 40 3 1 2 1 2 1 0 1 0 0 1 0 1 0 1 - 4 36 4 71 1 2 2 4 2 23 3 2 1 2 1 0 0 1 0 1 0 0 0 1 2 - 1 6 2 12 2 5 3 2 2 43 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 6 4 7 5 5 3 4 4 36 3 2 1 1 1 0 0 1 0 0 0 0 0 1 1 - 4 24 4 55 1 5 3 4 4 44 3 2 1 1 1 0 0 1 0 0 0 0 0 1 1 - 1 18 2 32 1 3 2 2 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 48 0 71 1 3 3 4 4 53 3 2 2 1 1 0 0 1 0 0 0 0 0 1 2 - 4 24 2 35 2 4 2 4 3 23 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 2 18 2 11 1 3 2 4 1 26 3 1 2 1 1 0 0 0 0 0 1 0 1 0 1 - 2 26 2 80 1 2 3 3 3 30 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 - 4 15 4 15 2 3 2 3 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 4 4 15 1 4 3 1 1 42 3 2 2 1 1 0 0 1 0 0 1 0 1 0 1 - 1 36 2 23 1 3 1 4 3 31 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 1 6 2 7 1 2 3 4 1 41 3 1 2 2 1 1 0 1 0 0 1 0 1 0 1 - 2 36 2 23 1 4 3 1 3 32 3 2 2 1 1 0 0 1 0 0 1 0 0 1 1 - 2 15 2 26 2 3 2 4 3 28 3 2 1 2 1 1 0 1 0 1 0 0 0 1 2 - 4 12 3 15 1 3 4 4 1 41 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 - 4 24 2 13 2 4 4 3 2 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 31 5 2 3 2 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 21 4 23 1 2 1 1 3 33 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 1 6 2 14 5 1 2 3 2 75 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 - 2 18 4 36 1 5 2 4 2 37 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 48 2 78 1 5 3 4 4 42 1 1 1 1 1 1 0 1 0 0 0 0 0 0 2 - 3 18 2 30 1 2 2 1 2 45 2 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 12 2 15 1 2 4 1 1 23 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 4 24 3 20 1 5 3 4 4 60 3 2 1 2 1 1 0 1 0 0 0 0 0 1 1 - 1 30 2 64 5 5 3 4 2 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 3 18 2 29 1 3 3 1 1 34 3 1 2 1 1 0 0 1 0 0 1 0 1 0 2 - 4 12 4 13 1 5 3 4 1 61 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 - 1 24 3 13 1 1 3 2 1 43 3 2 2 1 1 1 0 1 0 0 0 0 0 1 2 - 4 24 4 20 1 3 2 4 3 37 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 24 2 16 1 4 3 1 3 32 1 1 2 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 1 6 1 3 2 4 1 24 1 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 4 48 4 89 5 4 3 1 4 35 3 2 1 2 1 0 1 1 0 0 0 0 0 1 1 - 4 12 4 10 5 4 2 4 1 23 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 6 1 18 3 5 3 4 2 45 1 1 2 1 1 0 0 1 0 0 1 0 1 0 1 - 1 48 2 70 1 4 4 1 1 34 3 2 1 2 1 0 0 0 0 0 1 0 0 1 2 - 2 12 4 20 2 2 3 1 3 27 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 2 9 2 12 1 4 2 4 2 67 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 2 12 2 13 1 2 3 1 3 22 2 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 18 0 23 2 2 2 3 3 28 3 2 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 21 0 50 5 3 2 4 2 29 1 2 1 2 1 1 0 1 0 0 1 0 0 1 2 - 1 24 1 36 1 4 3 4 3 27 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 18 4 19 1 2 3 2 1 31 3 2 1 1 1 0 0 1 0 0 1 0 1 0 2 - 1 24 2 30 5 5 3 4 4 49 1 1 2 2 1 0 1 1 0 0 0 0 0 1 1 - 1 24 1 15 1 4 3 4 3 24 1 1 1 1 1 0 0 0 0 1 0 0 1 0 2 - 3 6 3 7 1 2 2 1 2 29 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 36 2 124 5 3 3 4 4 37 3 1 1 2 1 1 0 1 0 0 0 0 0 1 2 - 2 24 3 47 5 3 3 2 2 37 1 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 2 24 3 16 2 4 2 2 2 23 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 - 1 12 2 14 1 4 1 3 3 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 24 4 26 4 5 3 2 3 34 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 48 2 40 5 4 3 1 3 41 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 - 1 48 2 68 1 3 2 2 3 31 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 1 24 2 32 1 2 2 4 1 23 3 1 1 2 1 0 0 1 0 1 0 0 1 0 2 - 4 30 4 60 1 4 3 2 3 38 3 1 1 1 1 0 0 0 1 0 1 0 0 1 1 - 4 24 2 54 5 1 2 4 2 26 3 1 1 2 1 0 1 1 0 1 0 0 0 0 1 - 1 15 2 8 1 3 2 4 2 22 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 9 2 11 1 5 3 4 3 27 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 15 4 28 1 4 2 3 3 24 1 2 1 1 1 0 0 0 1 0 1 0 0 1 1 - 2 12 2 29 1 4 2 1 1 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 4 19 5 3 2 2 3 33 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 36 4 28 1 2 1 4 3 27 3 2 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 24 2 9 1 2 4 3 3 27 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 18 4 11 1 5 3 3 1 30 1 2 1 1 1 1 0 0 0 0 1 0 0 1 2 - 2 12 4 31 1 2 3 3 1 49 1 2 2 1 1 1 0 1 0 0 1 0 1 0 1 - 4 9 2 14 1 3 2 2 1 26 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 2 36 2 24 1 2 3 1 4 33 3 1 1 1 1 0 0 1 0 1 0 0 1 0 2 - 4 12 2 21 5 5 2 4 4 52 3 1 1 2 1 1 0 1 0 0 0 0 0 0 1 - 1 18 2 20 1 3 2 4 1 20 1 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 1 9 4 28 1 3 3 2 1 36 3 2 2 1 1 1 0 1 0 1 0 0 0 1 1 - 1 12 2 13 1 3 3 1 2 21 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 - 1 18 2 12 1 3 4 3 1 47 3 1 1 2 1 0 0 1 0 0 1 0 1 0 2 - 1 12 4 22 1 5 3 3 2 60 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 12 4 4 1 4 2 3 1 58 3 4 1 2 1 0 0 1 0 0 1 0 1 0 1 - 2 24 3 20 5 3 2 4 3 42 3 2 1 2 1 1 0 1 0 1 0 0 0 1 1 - 4 21 2 16 4 5 2 4 1 36 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 24 2 27 1 3 2 4 2 20 3 1 1 2 1 1 0 1 0 1 0 0 1 0 2 - 1 24 1 14 5 5 3 3 3 40 2 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 2 6 1 9 2 2 2 1 2 32 2 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 1 24 2 14 1 4 2 4 3 23 3 2 1 1 1 1 0 1 0 1 0 0 0 1 2 - 2 24 0 42 1 3 3 4 1 36 3 3 1 2 1 0 0 1 0 0 1 0 1 0 2 - 4 18 4 28 1 4 3 2 2 31 1 2 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 24 3 39 1 3 3 2 4 32 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 - 2 7 2 23 1 2 2 1 1 45 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 - 2 9 2 9 1 3 2 1 2 30 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 24 1 18 1 4 2 4 4 34 1 1 1 1 1 0 0 1 0 0 0 0 1 0 2 - 4 36 2 33 1 3 2 2 3 28 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 3 10 2 13 1 2 2 2 2 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 24 1 28 3 3 3 4 1 22 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 24 4 45 1 3 3 2 1 74 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 2 36 2 27 2 3 2 4 4 50 3 1 1 1 1 0 0 0 1 0 0 0 0 1 2 - 4 18 2 21 1 2 3 1 1 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 15 2 13 5 5 3 4 4 45 1 1 2 1 1 0 1 1 0 0 0 0 0 1 1 - 1 12 2 7 2 1 2 3 2 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 3 10 2 12 2 5 2 4 4 48 3 1 2 1 1 1 0 1 0 0 0 0 1 0 2 - 1 21 2 34 4 2 2 2 3 29 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 24 1 36 1 3 2 4 3 22 1 1 1 1 2 0 1 0 0 1 0 0 0 1 1 - 4 18 3 18 1 4 2 1 1 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 48 0 122 5 3 3 2 3 48 1 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 2 60 3 92 5 3 3 2 4 27 3 1 1 1 1 0 0 1 0 0 0 0 0 0 1 - 1 6 4 37 1 3 3 3 1 37 3 3 2 1 1 1 0 1 0 1 0 0 0 1 1 - 2 30 2 34 2 3 2 4 3 21 3 1 1 1 1 0 0 0 1 1 0 0 0 1 2 - 4 12 2 6 1 3 1 2 1 49 3 1 1 1 1 1 0 1 0 0 1 0 1 0 1 - 2 21 4 37 1 4 3 3 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 18 4 15 1 3 3 2 2 32 1 2 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 48 2 39 5 3 1 2 1 38 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 12 2 19 1 2 2 1 3 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 1 18 2 26 1 3 3 4 4 65 3 2 1 1 1 0 0 1 0 0 0 0 0 1 2 - 4 15 2 20 5 5 3 2 3 35 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 6 2 21 1 3 3 2 1 41 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 9 1 14 2 4 3 3 4 29 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 42 4 40 3 3 3 4 1 36 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 9 2 38 5 5 3 4 1 64 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 24 2 37 1 3 2 4 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 18 1 16 1 3 3 3 3 44 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 15 2 14 5 2 3 1 2 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 9 2 20 1 2 2 2 3 19 3 2 1 1 1 0 0 0 1 1 0 0 0 1 2 - 2 24 2 14 1 2 2 4 3 25 3 1 1 2 1 1 0 1 0 0 1 0 1 0 2 - 4 12 2 14 1 5 3 4 2 47 1 3 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 14 3 4 2 1 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 60 3 157 1 4 3 4 3 21 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 12 2 15 1 2 2 3 3 34 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 - 1 42 3 44 1 4 3 2 2 26 1 2 2 2 1 0 0 1 0 0 1 0 0 1 2 - 1 18 2 8 1 1 2 1 1 27 3 1 1 1 1 0 0 1 0 0 1 1 0 0 2 - 2 15 2 13 1 5 3 4 3 38 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 15 2 46 2 3 3 2 2 40 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 24 4 19 1 4 4 2 3 33 3 2 1 2 1 0 0 0 0 0 1 0 0 1 1 - 1 18 4 19 1 4 4 1 2 32 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 4 36 3 80 5 2 3 4 3 27 3 2 1 2 1 0 0 1 0 1 0 0 0 1 2 - 1 30 0 46 1 3 1 2 1 32 3 2 1 1 1 0 0 0 0 0 1 0 0 1 1 - 4 12 2 14 3 3 2 2 2 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 3 24 2 9 1 4 3 3 4 38 1 1 2 1 1 1 0 1 0 0 0 0 0 1 2 - 1 12 2 7 1 3 3 4 3 40 3 1 2 1 1 0 0 1 0 1 0 0 1 0 2 - 1 48 2 75 1 4 3 1 4 50 3 1 1 2 1 0 0 1 0 0 0 0 0 0 1 - 2 12 2 19 1 3 3 2 2 37 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 1 24 2 23 1 5 3 1 1 45 3 1 1 1 1 1 0 0 1 0 1 0 0 1 2 - 2 36 3 81 2 5 3 4 3 42 3 4 1 2 1 1 0 1 0 0 1 0 0 0 2 - 4 24 4 23 1 4 3 3 3 35 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 - 1 14 2 40 1 1 3 4 4 22 3 1 1 1 1 1 0 1 0 0 0 0 0 1 1 - 2 12 2 9 1 5 3 4 3 41 1 1 2 1 1 1 0 1 0 0 1 0 1 0 2 - 4 48 2 102 5 4 3 3 3 37 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 30 0 42 1 3 2 1 3 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 18 4 64 1 5 3 1 4 41 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 3 12 2 13 1 3 4 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 1 12 2 9 5 3 4 2 3 23 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 21 2 22 1 5 3 2 1 50 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 6 3 10 1 1 3 1 2 35 2 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 3 6 4 10 1 3 2 4 2 50 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 24 4 63 1 1 3 2 4 27 1 2 1 2 1 0 0 0 1 0 1 0 0 0 1 - 2 30 1 35 4 3 3 2 3 34 2 1 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 48 1 36 1 3 2 1 1 27 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 4 48 1 5 3 4 2 43 3 2 1 2 1 1 0 0 1 1 0 0 0 1 2 - 3 30 4 30 1 5 3 4 2 47 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 4 41 2 3 3 3 2 27 3 2 1 2 1 0 0 1 0 0 1 0 1 0 1 - 4 36 2 57 2 4 3 2 3 31 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 60 2 104 1 5 3 4 2 42 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 - 4 6 4 21 3 3 4 2 3 24 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 21 3 26 3 2 3 2 1 41 1 1 2 1 1 0 0 1 0 0 1 0 1 0 2 - 4 30 4 45 1 4 2 4 3 26 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 - 4 24 4 52 1 5 3 4 3 33 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 72 2 56 2 3 4 2 3 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 24 2 24 1 5 3 4 1 64 1 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 4 18 2 15 1 2 2 1 1 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 6 2 15 1 2 2 2 4 56 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 2 23 5 3 3 4 4 37 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 - 4 15 3 15 1 3 4 3 1 33 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 4 51 1 2 4 3 4 47 3 3 1 2 1 0 0 1 0 0 0 0 0 1 1 - 2 36 3 99 2 4 3 3 2 31 3 2 2 2 1 0 0 1 0 0 1 0 1 0 1 - 4 60 2 65 5 3 3 4 4 34 3 1 2 2 1 1 0 1 0 0 0 0 0 1 1 - 3 10 4 13 5 4 3 2 2 27 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 36 3 29 2 5 3 3 4 30 3 1 1 1 1 1 0 1 0 0 0 0 0 1 1 - 4 9 2 28 2 5 3 4 3 35 3 1 1 2 1 0 0 0 1 0 1 0 0 1 1 - 1 12 2 37 4 3 3 3 2 31 3 1 2 1 1 1 0 1 0 0 1 0 0 1 1 - 1 15 4 10 1 3 1 3 2 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 15 2 26 2 3 2 2 1 25 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 24 2 29 2 2 3 1 3 29 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 6 4 47 5 2 3 3 1 44 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 - 4 24 2 23 1 4 3 2 3 28 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 6 2 12 3 3 3 4 2 50 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 - 2 12 2 11 1 4 3 3 1 29 3 2 1 1 2 0 0 0 0 0 1 0 0 1 1 - 4 12 4 9 1 1 2 2 2 38 3 1 1 1 1 1 0 1 0 0 1 1 0 0 1 - 4 18 4 18 1 3 3 2 3 24 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 15 2 19 1 5 3 4 3 40 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 - 4 12 2 11 3 3 2 4 3 29 3 1 1 1 1 0 0 1 0 1 0 0 1 0 2 - 1 48 4 63 1 5 3 4 4 46 3 2 1 2 1 0 1 1 0 0 0 0 0 1 2 - 3 24 2 14 2 5 2 2 4 47 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 - 2 30 3 25 2 5 3 2 2 41 2 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 27 2 25 1 2 2 1 2 32 3 1 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 15 2 53 3 5 2 4 4 35 3 1 1 1 1 1 0 1 0 0 0 0 0 1 1 - 2 48 2 66 2 4 3 2 2 24 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 - 2 12 0 30 1 2 2 3 2 25 3 2 1 1 1 0 0 1 0 1 0 0 0 1 2 - 2 9 2 12 1 5 2 4 1 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 9 2 21 1 3 3 2 1 37 3 1 2 1 1 0 0 1 0 0 1 0 1 0 1 - 4 18 4 6 3 5 3 3 2 32 1 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 1 6 1 12 1 5 2 4 4 35 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 - 4 21 2 25 5 5 3 4 1 46 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 1 9 4 11 1 3 3 4 1 25 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 60 2 140 1 4 3 2 4 27 3 1 1 2 1 1 0 1 0 0 1 0 0 0 2 - 4 30 4 76 5 5 3 4 3 63 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 - 4 30 4 31 5 5 3 2 3 40 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 15 1 3 3 2 4 32 3 1 1 2 1 0 0 1 0 0 0 0 0 0 1 - 3 24 4 31 5 3 3 2 3 31 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 20 0 61 2 5 4 4 3 31 1 2 1 2 1 0 1 1 0 0 1 0 0 1 1 - 3 9 0 13 1 2 3 2 3 34 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 - 2 6 1 4 4 2 2 2 2 24 1 1 2 1 1 0 0 1 0 1 0 0 0 1 2 - 1 12 2 12 1 3 2 2 1 24 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 2 9 2 8 3 3 2 3 1 66 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 27 2 26 1 3 2 3 1 21 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 - 4 6 4 2 4 3 2 2 1 41 1 2 1 1 1 1 0 1 0 0 1 0 1 0 1 - 4 15 4 13 3 3 4 2 2 47 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 18 2 19 1 3 2 4 3 25 1 2 1 1 1 0 0 1 0 1 0 0 0 1 2 - 2 48 1 64 1 5 2 3 4 59 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 3 24 4 13 4 3 1 4 1 36 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 24 3 64 1 2 3 2 3 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 24 2 20 1 3 3 4 1 21 3 1 2 1 1 0 0 1 0 1 0 0 1 0 2 - 2 8 2 8 1 4 2 2 1 44 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 - 4 24 2 26 4 3 2 4 3 28 3 1 1 2 1 0 1 1 0 1 0 0 0 1 1 - 4 4 4 34 1 4 2 1 1 37 3 1 2 1 1 1 0 1 0 0 1 0 0 1 1 - 2 36 1 40 5 2 2 2 4 29 1 1 1 1 1 0 0 1 0 0 1 1 0 0 1 - 2 24 2 116 1 3 2 4 3 23 3 2 1 1 1 0 1 1 0 1 0 0 0 0 2 - 1 18 2 44 2 3 3 4 3 35 3 1 2 2 1 1 0 1 0 0 1 0 1 0 1 - 4 6 4 68 1 4 3 3 4 45 3 2 2 2 1 1 0 1 0 0 1 0 0 0 1 - 2 30 0 43 2 3 2 4 3 26 3 2 1 1 1 0 0 1 0 1 0 0 1 0 2 - 1 24 1 23 2 4 3 3 3 32 1 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 2 10 1 10 1 3 3 4 1 23 2 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 21 2 32 5 5 3 3 2 41 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 24 1 25 3 3 3 4 1 22 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 39 4 142 5 4 3 4 2 30 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 1 13 4 18 1 2 3 1 2 28 1 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 15 2 25 1 1 2 4 3 23 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 - 1 12 2 13 1 2 2 1 1 37 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 4 21 2 52 5 3 3 3 3 26 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 4 15 2 30 1 4 3 2 3 33 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 - 1 6 2 4 1 5 2 1 2 49 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 18 2 10 1 2 2 2 3 23 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 2 12 2 8 2 4 2 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 4 30 4 58 1 4 2 2 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 3 16 4 5 3 4 4 55 3 2 2 1 1 0 0 1 0 0 0 0 0 1 2 - 1 24 2 13 5 4 2 4 4 32 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 - 3 6 4 13 1 3 3 1 1 74 3 3 2 1 2 1 0 1 0 0 1 1 0 0 1 - 3 15 4 13 5 3 3 4 4 39 3 2 1 2 1 0 0 1 0 0 0 0 0 1 2 - 4 24 2 14 1 3 3 2 1 31 3 1 1 2 1 1 0 0 0 0 1 0 0 1 1 - 1 12 4 7 1 5 3 3 2 35 3 2 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 15 4 50 5 5 2 4 3 59 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 1 18 4 21 1 3 2 4 1 24 3 2 1 1 1 0 0 1 0 1 0 0 0 1 2 - 1 12 2 22 1 3 3 3 2 24 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 21 4 127 5 5 3 4 4 30 3 1 1 2 1 1 0 1 0 0 0 0 0 0 2 - 4 24 4 25 2 4 4 3 2 27 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 2 12 2 12 1 5 4 3 1 40 1 2 1 1 1 0 0 0 0 0 1 0 1 0 1 - 1 30 2 31 1 2 1 4 2 31 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 4 10 2 29 5 2 2 4 1 31 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 - 2 12 4 36 1 5 3 4 3 28 3 3 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 12 4 17 1 5 3 4 1 63 3 2 1 2 1 0 0 1 0 0 1 0 1 0 1 - 1 24 2 28 5 5 2 4 1 26 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 - 1 36 4 81 1 3 2 2 4 25 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 - 4 21 4 33 1 5 3 4 3 36 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 4 24 4 22 2 5 3 4 2 52 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 12 4 15 3 1 3 4 4 66 1 3 1 1 1 1 0 1 0 0 0 1 0 0 1 - 1 24 2 14 5 3 2 4 1 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 - 4 36 4 35 1 4 3 4 3 37 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 1 18 2 35 1 4 2 1 1 25 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 - 4 36 4 57 4 5 3 2 3 38 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 - 2 18 2 39 1 1 2 4 3 67 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 39 4 49 1 4 3 2 1 25 3 2 1 1 1 0 0 0 0 0 1 0 0 1 2 - 4 24 4 19 4 5 3 4 1 60 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 2 12 0 14 1 3 3 2 1 31 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 - 2 12 2 8 2 2 2 2 2 23 1 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 2 20 2 65 5 1 1 4 1 60 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 2 18 2 19 4 3 3 2 2 35 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 - 4 22 2 27 3 5 3 4 3 40 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 48 4 28 5 5 3 3 3 38 3 2 2 2 1 0 1 1 0 0 1 0 0 1 1 - 2 48 3 62 1 5 3 4 4 50 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 - 1 40 4 60 1 3 3 3 4 27 1 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 2 21 2 12 1 5 2 4 2 39 3 1 2 1 1 0 0 1 0 0 1 0 0 1 2 - 4 24 2 63 5 5 3 4 3 41 3 1 2 2 1 0 1 1 0 0 1 0 0 0 1 - 4 6 4 12 5 3 4 2 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 3 24 2 29 1 5 1 4 4 51 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 - 4 24 2 31 3 5 3 3 4 32 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 9 2 23 2 2 2 4 2 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 1 18 2 75 5 5 3 4 2 51 3 1 2 2 1 0 1 1 0 0 0 0 0 1 2 - 4 12 4 13 1 2 2 4 2 22 3 2 1 1 1 0 0 1 0 1 0 0 1 0 1 - 4 24 3 7 5 5 4 4 3 54 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 2 9 2 15 5 2 3 2 1 35 3 1 1 1 1 1 0 1 0 0 1 1 0 0 1 - 4 24 4 16 1 5 3 4 4 54 3 2 2 1 1 0 0 1 0 0 0 0 0 1 1 - 2 18 4 18 1 5 2 4 1 48 1 2 1 2 1 0 0 0 0 1 0 0 1 0 1 - 1 20 4 43 1 5 2 4 2 24 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 4 10 5 5 3 4 3 35 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 12 2 75 5 1 2 2 1 24 3 1 1 1 1 1 0 1 0 1 0 1 0 0 1 - 1 36 2 93 1 4 3 1 3 24 3 1 1 2 1 1 0 1 0 0 1 0 0 1 2 - 2 6 2 6 1 2 4 3 1 26 3 1 1 1 2 0 0 1 0 0 1 0 1 0 1 - 4 12 4 9 5 5 3 4 1 65 3 4 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 42 1 93 1 1 3 2 4 55 1 1 1 2 1 0 1 1 0 0 0 0 0 0 1 - 2 15 0 18 1 2 2 1 1 26 3 2 1 1 1 1 0 1 0 1 0 1 0 0 2 - 2 8 2 9 1 2 4 2 1 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 2 6 2 5 1 4 4 3 1 28 1 1 1 1 1 0 0 0 0 0 1 0 1 0 1 - 1 36 4 96 1 4 3 4 3 24 3 2 1 2 1 0 1 1 0 0 1 0 0 1 2 - 1 48 2 31 1 3 3 4 3 54 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 48 2 39 1 4 3 4 4 46 3 1 2 1 1 1 0 1 0 0 0 0 0 1 2 - 2 36 3 74 1 3 2 2 2 54 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 - 4 6 2 13 3 3 1 4 1 62 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 6 4 16 1 4 2 2 3 24 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 - 1 36 2 159 1 1 1 3 3 43 3 1 1 1 1 0 0 0 1 0 1 0 0 0 1 - 1 18 2 13 1 3 4 3 1 26 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 12 2 11 1 3 4 2 1 27 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 3 12 2 30 1 3 4 1 3 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 36 2 27 1 5 3 2 2 41 1 1 2 1 1 0 0 1 0 0 1 0 0 1 2 - 1 8 4 7 1 5 3 4 1 47 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 - 4 18 4 38 1 2 1 2 3 35 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 - 1 21 4 16 1 5 4 3 3 30 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 1 18 4 40 1 5 2 4 1 33 1 3 1 2 1 1 0 1 0 1 0 0 0 1 2 - 4 18 0 42 1 3 3 2 3 36 2 2 2 1 1 0 0 1 0 0 1 0 0 1 2 - 1 36 2 83 5 5 3 4 4 47 3 1 1 1 1 0 1 1 0 0 0 0 0 1 2 - 2 48 3 67 5 3 3 4 4 38 3 1 2 2 1 0 0 1 0 0 0 0 0 1 1 - 4 24 3 24 3 3 3 2 3 44 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 - 1 18 2 12 1 2 2 3 3 23 3 1 1 2 1 1 0 1 0 1 0 0 0 1 2 - 1 45 0 118 1 5 3 4 3 29 3 2 1 1 1 0 0 1 0 1 0 0 0 1 2 - 2 24 2 51 5 5 2 4 3 42 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 3 15 2 23 1 2 2 3 1 25 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 1 12 0 11 1 3 3 4 3 48 1 2 1 1 1 1 0 1 0 0 1 0 0 1 2 - 4 12 2 9 5 3 2 2 3 21 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 4 2 6 1 2 2 3 1 23 3 1 2 1 1 0 0 1 0 1 0 0 1 0 1 - 1 24 4 30 1 5 3 4 2 63 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 - 4 24 4 26 1 5 4 3 1 46 3 2 1 1 1 0 0 0 1 0 1 0 0 1 1 - 1 36 2 52 1 4 3 2 2 29 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 21 3 30 1 3 3 2 1 28 2 2 1 1 1 0 1 1 0 0 1 0 1 0 1 - 4 18 2 19 1 2 2 4 1 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 24 1 16 1 4 3 4 3 50 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 34 1 5 3 4 2 47 1 3 2 2 1 0 0 1 0 0 1 0 0 1 1 - 2 21 2 40 5 4 3 3 3 35 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 18 2 68 5 3 3 4 3 68 3 2 1 1 1 1 0 1 0 1 0 0 0 1 2 - 4 24 2 12 1 2 4 2 1 28 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 9 2 14 1 4 3 4 1 59 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 2 7 1 5 3 4 1 57 2 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 1 20 4 22 1 3 4 2 2 33 1 2 1 1 2 1 0 0 0 1 0 0 0 1 2 - 4 24 4 40 5 4 3 4 2 43 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 - 4 15 4 15 1 3 3 4 4 35 3 2 1 2 1 0 0 1 0 0 0 0 0 1 1 - 1 18 1 14 1 4 3 4 4 32 3 2 2 1 1 1 0 1 0 0 0 0 1 0 2 - 4 36 3 109 1 5 3 2 3 45 3 2 2 2 1 1 0 1 0 0 1 0 0 1 1 - 4 24 2 15 2 2 4 3 1 33 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 10 2 9 5 4 2 3 2 40 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 15 4 33 1 3 3 2 4 28 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 - 1 15 2 40 1 3 2 2 2 29 3 1 1 2 1 1 0 1 0 0 1 0 0 1 2 - 4 9 2 36 2 3 3 2 1 26 3 1 2 1 2 1 0 0 0 1 0 0 0 1 1 - 4 24 4 58 4 3 3 2 1 27 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 - 4 18 3 22 1 3 4 2 3 28 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 1 24 2 24 1 2 2 4 1 35 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 27 4 45 4 2 3 2 1 32 2 2 2 2 1 0 0 1 0 0 1 0 1 0 1 - 4 10 2 22 1 3 3 2 1 25 1 1 1 1 1 0 0 1 0 1 0 0 1 0 2 - 4 15 2 22 3 3 2 4 3 20 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 1 18 2 24 1 2 2 1 3 27 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 12 4 33 1 5 3 4 2 42 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 36 2 74 5 5 3 2 2 37 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 2 7 1 5 2 4 2 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 4 36 3 77 3 4 2 4 3 40 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 3 6 4 13 1 5 3 4 1 46 3 2 2 1 2 1 0 1 0 0 1 0 0 1 1 - 1 24 4 14 2 4 3 1 1 26 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 15 2 9 5 2 2 1 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 12 2 36 1 3 3 2 2 29 3 1 2 1 1 0 0 0 1 0 1 0 1 0 1 - 2 11 4 13 4 3 2 4 3 40 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 18 1 19 1 2 3 4 4 36 1 1 1 2 1 0 0 0 1 0 0 0 0 0 1 - 4 36 2 36 1 5 3 2 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 9 2 14 1 2 3 2 4 27 3 1 1 2 1 1 0 1 0 0 0 0 0 0 2 - 4 30 4 67 5 4 3 3 2 36 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 2 78 1 4 3 3 3 38 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 4 24 2 93 5 3 1 4 4 48 3 1 1 2 1 0 1 1 0 0 0 0 0 1 1 - 2 30 4 22 5 5 3 4 1 36 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 18 4 11 1 1 2 4 3 65 3 2 1 1 1 0 0 1 0 0 1 1 0 0 1 - 2 24 2 41 1 4 1 3 3 43 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 1 12 2 8 1 2 2 4 2 53 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 24 4 28 5 4 3 3 4 34 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 - 2 48 2 157 1 3 3 2 3 23 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 36 4 66 1 5 3 4 3 34 3 2 1 2 1 1 0 1 0 0 1 0 0 0 1 - 4 28 1 78 5 2 3 4 1 40 1 2 2 2 1 0 1 0 0 1 0 0 0 1 1 - 1 27 4 24 1 5 3 4 3 43 2 4 2 2 1 0 0 1 0 0 1 0 0 0 1 - 4 15 4 18 1 5 3 4 3 46 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 12 4 22 1 3 3 4 2 38 1 2 1 1 2 1 0 1 0 0 1 0 1 0 1 - 2 36 4 58 1 3 3 4 3 34 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 - 4 18 4 12 5 3 3 3 2 29 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 36 3 89 5 4 3 2 3 31 2 1 2 2 1 0 1 1 0 0 1 0 0 0 1 - 1 21 2 26 1 2 2 4 2 28 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 - 4 12 4 16 4 4 2 2 2 35 3 1 1 1 2 0 0 1 0 0 1 0 0 1 1 - 4 15 2 22 5 4 2 4 1 33 1 1 1 1 1 0 0 1 0 1 0 0 1 0 1 - 1 18 2 42 1 3 3 3 3 42 3 1 1 1 1 0 0 0 1 0 1 0 0 1 2 - 1 16 4 26 1 5 3 4 2 43 1 1 1 2 1 1 0 0 0 1 0 0 0 1 2 - 4 20 4 35 5 2 1 4 1 44 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 36 4 105 5 5 3 4 4 42 3 2 1 1 1 0 1 1 0 0 0 0 0 1 1 - 4 15 2 14 5 3 4 2 1 40 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 4 24 2 13 1 5 3 1 1 36 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 1 12 2 11 1 3 3 2 1 20 3 1 2 2 1 0 0 1 0 1 0 0 0 0 1 - 1 21 2 38 5 4 3 2 1 24 3 1 1 1 2 1 0 0 1 0 1 0 1 0 1 - 2 36 2 37 5 3 4 2 3 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 15 3 36 1 2 2 2 2 46 3 2 1 1 1 0 1 1 0 0 1 0 1 0 1 - 2 9 2 32 5 3 2 2 1 33 3 1 1 1 1 1 0 1 0 0 1 0 1 0 1 - 4 36 3 45 1 3 2 4 1 34 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 24 4 47 1 2 2 4 3 25 1 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 2 30 2 30 5 5 2 4 3 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 11 2 21 4 5 1 2 1 28 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 24 1 32 1 3 3 2 2 31 3 1 1 2 1 0 0 1 0 1 0 0 0 1 2 - 2 48 0 184 1 3 2 2 2 32 1 1 1 2 2 0 0 1 0 0 1 0 0 0 2 - 4 10 2 28 2 3 3 2 1 32 3 1 2 1 1 0 1 0 1 0 1 0 0 1 1 - 1 6 2 149 1 5 3 4 4 68 1 1 1 2 1 1 0 1 0 0 1 0 0 0 2 - 1 24 2 24 2 1 1 1 2 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 1 24 2 33 1 5 3 2 2 39 3 1 1 2 1 0 0 1 0 1 0 0 0 0 2 - 4 18 4 18 1 3 2 2 4 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 48 3 127 3 4 3 1 3 37 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 1 9 2 14 1 2 2 4 2 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 - 2 12 2 20 1 4 3 4 2 30 3 1 2 2 1 1 0 1 0 1 0 0 0 1 1 - 1 24 1 69 1 2 1 1 2 55 1 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 1 12 1 7 1 2 3 2 3 46 1 2 1 2 1 1 0 1 0 0 1 0 0 1 2 - 1 18 4 10 1 2 2 4 2 21 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 - 1 48 2 103 1 4 3 4 4 39 2 3 2 2 1 0 1 1 0 0 0 0 0 1 2 - 4 30 2 19 5 5 3 4 3 58 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 1 12 3 13 1 3 3 2 1 43 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 - 1 24 2 17 1 2 3 1 2 24 3 1 1 1 2 0 0 0 1 0 1 0 1 0 1 - 2 9 2 17 1 2 2 2 3 22 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 - 4 9 4 12 1 3 3 1 1 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 12 4 5 3 5 3 4 2 42 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 - 1 12 2 15 1 3 2 1 3 23 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 30 3 19 2 2 3 3 4 30 2 2 1 1 1 0 0 1 0 0 1 0 0 0 2 - 3 9 2 7 1 3 2 2 1 28 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 - 2 6 2 21 1 2 4 3 3 30 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 - 2 60 2 63 1 3 3 4 4 42 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 - 4 24 4 68 5 3 3 4 2 46 3 2 2 2 1 0 1 1 0 0 1 0 0 0 1 - 4 12 2 35 5 2 3 3 2 45 3 1 2 2 1 1 0 1 0 0 1 0 0 0 1 - 4 10 2 15 1 3 3 2 1 31 3 1 2 1 2 1 0 1 0 0 1 0 1 0 1 - 4 24 2 9 5 4 3 2 3 31 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 4 4 4 15 1 4 3 1 1 42 3 3 2 1 1 1 0 1 0 0 1 0 1 0 1 - 1 15 2 18 1 2 2 1 2 46 3 1 1 1 1 0 0 0 0 1 0 0 0 1 1 - 2 48 0 84 3 2 2 1 3 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 1 24 1 33 3 2 3 4 4 30 3 1 2 2 1 0 0 1 0 0 0 0 0 1 2 - 4 12 2 29 5 1 3 4 4 38 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 - 4 18 2 15 1 2 4 1 2 43 3 1 2 1 1 0 0 0 1 0 1 0 1 0 2 - 4 24 2 36 2 5 3 4 3 31 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 - 2 18 4 36 1 1 4 3 3 40 3 3 2 2 1 0 0 1 0 0 1 1 0 0 1 - 1 36 3 21 1 4 3 1 3 24 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 2 24 2 41 3 2 2 4 3 28 3 1 1 1 1 0 1 1 0 1 0 0 0 1 2 - 4 36 2 110 1 1 2 2 3 26 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 - 1 12 2 19 1 3 2 4 2 29 3 1 1 2 1 1 0 0 0 0 1 0 0 1 1 - 1 24 4 12 4 5 2 4 2 57 3 2 1 2 1 0 0 1 0 1 0 0 0 0 1 - 3 30 4 37 5 5 3 4 2 49 2 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 9 4 12 1 5 3 4 1 37 3 3 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 28 2 40 1 3 3 2 3 45 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 - 2 24 2 31 2 5 3 4 4 30 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 - 4 6 4 17 1 5 4 2 1 30 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 - 2 21 3 24 1 3 1 4 2 47 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 15 2 36 5 3 3 2 4 29 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 - 4 24 2 24 3 5 3 2 3 35 1 2 1 2 1 0 0 1 0 0 1 0 0 1 2 - 2 6 2 5 1 2 4 1 2 22 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 30 2 17 5 3 2 1 3 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 2 27 4 25 3 3 3 2 2 23 3 2 1 1 1 0 0 1 0 0 1 0 1 0 2 - 4 15 2 36 1 5 2 2 3 54 1 1 1 2 1 0 0 1 0 1 0 0 0 0 1 - 4 42 2 72 5 4 4 4 2 29 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 - 1 11 4 39 1 3 3 2 1 40 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 - 2 15 2 15 2 3 3 2 1 22 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 - 4 24 2 74 1 3 3 4 2 43 3 1 2 1 1 1 0 1 0 0 1 0 1 0 1 - 1 24 1 12 1 1 2 4 4 29 3 2 1 1 1 1 0 0 1 1 0 1 0 0 2 - 1 60 2 73 1 5 3 4 4 36 3 1 1 1 1 0 0 0 1 1 0 0 0 1 2 - 4 30 4 28 1 3 2 2 3 33 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 - 3 24 2 13 3 3 2 3 3 57 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 2 6 2 8 1 3 2 3 1 64 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 - 2 18 3 24 5 5 3 2 2 42 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 24 3 25 1 5 3 4 3 47 3 2 2 1 1 1 0 1 0 0 1 0 1 0 2 - 2 15 1 13 2 3 4 2 2 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 - 2 30 4 84 1 4 3 2 2 49 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 - 4 48 2 48 1 1 3 2 3 33 1 1 1 2 1 0 0 1 0 1 0 0 0 0 2 - 3 21 2 29 2 3 2 1 3 28 1 1 1 2 1 1 0 1 0 0 1 0 0 0 1 - 1 36 2 82 1 3 3 2 2 26 3 1 2 1 1 0 1 1 0 0 1 0 0 1 2 - 4 24 4 20 1 4 3 2 2 30 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 15 4 14 1 3 2 3 2 25 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 - 3 42 0 63 1 2 1 1 2 33 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 - 4 13 2 14 2 1 2 4 1 64 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 24 2 66 1 1 3 2 4 29 3 1 1 2 1 0 1 1 0 0 0 0 0 0 1 - 2 24 4 17 1 5 3 2 2 48 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 - 4 12 4 36 5 2 3 1 2 37 3 2 2 1 1 0 0 1 0 0 1 0 1 0 1 - 4 15 1 16 2 5 3 4 3 34 1 1 2 1 1 0 0 1 0 0 1 0 1 0 1 - 1 18 2 19 5 4 4 4 3 23 3 2 1 1 1 0 0 1 0 1 0 0 1 0 1 - 1 36 2 40 1 1 3 3 2 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 - 4 12 2 24 5 5 3 3 3 50 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 - 4 12 2 17 1 4 2 4 1 31 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 - 1 30 2 39 1 3 1 4 2 40 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 - 4 12 2 8 1 5 3 4 3 38 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 - 1 45 2 18 1 3 3 4 4 23 3 1 1 2 1 0 0 1 0 0 0 0 0 1 2 - 2 45 4 46 2 1 3 4 3 27 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 1 6 4 12 5 5 3 4 1 67 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 48 2 60 1 3 2 2 1 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 12 4 21 1 4 3 3 1 49 3 1 2 1 1 0 0 1 0 0 1 0 1 0 1 + 1 42 2 79 1 4 3 4 2 45 3 1 2 1 1 0 0 0 0 0 0 0 0 1 1 + 1 24 3 49 1 3 3 4 4 53 3 2 2 1 1 1 0 1 0 0 0 0 0 1 2 + 4 36 2 91 5 3 3 4 4 35 3 1 2 2 1 0 0 1 0 0 0 0 1 0 1 + 4 24 2 28 3 5 3 4 2 53 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 36 2 69 1 3 3 2 3 35 3 1 1 2 1 0 1 1 0 1 0 0 0 0 1 + 4 12 2 31 4 4 1 4 1 61 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 30 4 52 1 1 4 2 3 28 3 2 1 1 1 1 0 1 0 0 1 0 0 0 2 + 2 12 2 13 1 2 2 1 3 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 + 1 48 2 43 1 2 2 4 2 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 2 12 2 16 1 3 2 1 3 22 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 24 4 12 1 5 3 4 3 60 3 2 1 1 1 1 0 1 0 0 1 0 1 0 2 + 1 15 2 14 1 3 2 4 3 28 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 + 1 24 2 13 2 3 2 2 3 32 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 4 24 4 24 5 5 3 4 2 53 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 30 0 81 5 2 3 3 3 25 1 3 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 24 2 126 1 5 2 2 4 44 3 1 1 2 1 0 1 1 0 0 0 0 0 0 2 + 4 24 2 34 3 5 3 2 3 31 3 1 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 9 4 21 1 3 3 4 3 48 3 3 1 2 1 1 0 1 0 0 1 0 0 1 1 + 1 6 2 26 3 3 3 3 1 44 3 1 2 1 1 0 0 1 0 1 0 0 0 1 1 + 1 10 4 22 1 2 3 3 1 48 3 2 2 1 2 1 0 1 0 1 0 0 1 0 1 + 2 12 4 18 2 2 3 4 2 44 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 4 10 4 21 5 3 4 1 3 26 3 2 1 1 2 0 0 1 0 0 1 0 0 1 1 + 1 6 2 14 1 3 3 2 1 36 1 1 1 2 1 0 0 1 0 0 1 0 1 0 1 + 4 6 0 4 1 5 4 4 3 39 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 3 12 1 4 4 3 2 3 1 42 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 + 2 7 2 24 1 3 3 2 1 34 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 + 1 60 3 68 1 5 3 4 4 63 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 2 18 2 19 4 2 4 3 1 36 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 24 2 40 1 3 3 2 3 27 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 18 2 59 2 3 3 2 3 30 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 12 4 13 5 5 3 4 4 57 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 3 12 2 15 1 2 2 1 2 33 1 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 2 45 4 47 1 2 3 2 2 25 3 2 1 1 1 0 0 1 0 0 1 0 1 0 2 + 4 48 4 61 1 3 3 3 4 31 1 1 1 2 1 0 0 1 0 0 0 0 0 1 1 + 3 18 2 21 1 3 3 2 1 37 2 1 1 1 1 0 0 0 1 0 1 0 0 1 2 + 3 10 2 12 1 3 3 2 3 37 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 9 2 5 1 3 3 3 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 30 2 23 3 5 3 2 3 30 1 1 1 1 1 0 0 1 0 0 1 0 0 0 1 + 2 12 2 12 3 3 1 1 3 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 18 3 62 1 3 3 4 1 44 3 1 2 2 1 0 0 1 0 0 1 0 1 0 1 + 1 30 4 62 2 4 4 4 3 24 3 2 1 1 1 0 1 1 0 1 0 0 0 1 1 + 1 48 4 61 1 5 2 4 4 58 2 2 1 1 1 0 1 1 0 0 0 0 1 0 2 + 4 11 4 14 1 2 2 4 3 35 3 2 1 1 1 1 0 1 0 0 1 0 0 0 1 + 4 36 2 23 3 5 3 4 3 39 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 6 2 14 3 1 2 2 2 23 3 1 1 2 1 0 1 1 0 1 0 1 0 0 1 + 4 11 4 72 1 3 3 4 2 39 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 + 4 12 2 21 2 3 2 2 1 28 3 1 1 1 1 0 0 0 1 0 1 0 0 1 1 + 2 24 3 23 5 2 3 2 2 29 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 27 3 60 1 5 3 2 3 30 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 + 4 12 2 13 1 3 3 2 3 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 34 5 3 3 1 2 31 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 + 2 36 3 22 1 5 3 4 4 57 1 2 1 2 1 1 0 1 0 0 0 0 0 1 2 + 4 6 1 8 5 3 3 2 1 26 2 1 2 1 1 1 0 0 0 0 1 0 1 0 1 + 2 12 2 65 5 1 3 1 4 52 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 36 4 96 1 3 2 2 3 31 2 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 18 2 20 1 5 2 2 3 23 3 1 1 1 1 1 0 1 0 0 1 0 0 0 1 + 1 36 4 62 1 2 2 4 4 23 3 2 1 2 1 0 0 0 1 1 0 0 1 0 2 + 2 9 2 14 1 3 4 1 1 27 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 15 4 15 5 5 3 4 1 50 3 2 1 2 1 0 0 0 0 0 1 0 0 1 1 + 2 36 0 20 1 5 3 4 4 61 3 1 1 2 1 0 0 1 0 0 0 0 0 0 2 + 2 48 0 144 1 3 3 2 3 25 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 24 2 32 1 2 2 4 2 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 27 2 52 5 5 3 4 2 48 3 4 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 12 2 22 1 2 2 2 3 29 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 12 2 10 4 3 4 1 1 22 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 36 2 18 1 3 3 4 4 37 2 1 1 2 1 0 0 1 0 0 0 0 0 1 2 + 4 36 2 24 5 3 2 4 3 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 36 2 81 1 3 2 2 2 30 1 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 4 7 4 7 5 5 3 2 2 46 3 2 1 2 1 0 0 1 0 1 0 0 1 0 1 + 1 8 4 12 1 5 3 4 4 51 1 2 2 2 1 0 0 1 0 0 0 0 0 0 1 + 2 42 4 60 1 4 2 1 1 41 1 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 36 2 20 5 5 3 4 4 40 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 1 12 4 15 1 5 3 4 4 66 3 2 1 1 1 0 1 1 0 0 0 0 0 0 1 + 1 42 2 40 1 2 3 3 3 34 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 11 3 48 1 4 3 4 2 51 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 54 0 94 5 3 3 2 2 39 3 1 2 1 1 0 1 1 0 0 1 0 1 0 1 + 2 30 2 38 1 2 4 1 2 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 59 5 2 2 1 3 44 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 15 2 12 3 5 3 3 2 47 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 16 2 3 2 4 2 24 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 1 24 2 18 1 5 2 4 1 58 3 1 1 2 1 0 0 0 0 0 1 0 1 0 1 + 1 10 2 23 1 5 3 4 1 52 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 12 4 14 1 3 2 2 1 29 3 2 1 2 1 0 0 0 0 0 1 0 0 0 1 + 2 18 4 13 1 2 2 1 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 36 2 126 2 3 3 4 4 47 3 1 2 2 1 0 0 1 0 0 0 0 0 1 2 + 1 18 2 22 2 4 3 3 3 30 3 1 2 2 1 1 0 1 0 0 1 0 0 0 1 + 1 12 0 11 1 4 3 3 1 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 12 4 6 1 5 3 4 1 56 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 4 14 1 5 3 3 1 54 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 4 12 4 8 5 5 2 3 2 33 1 1 2 1 1 0 0 1 0 0 1 0 1 0 2 + 3 24 4 36 5 5 3 4 4 20 3 2 1 1 1 0 0 0 1 1 0 0 0 1 1 + 2 12 2 13 4 5 3 4 1 54 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 2 54 0 159 1 2 3 4 4 58 3 1 1 2 1 0 0 1 0 1 0 0 0 1 2 + 4 12 4 20 5 4 2 2 3 61 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 18 2 26 2 3 3 4 3 34 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 36 4 23 1 5 3 4 1 36 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 20 3 71 5 4 3 4 2 36 1 2 2 2 1 0 1 1 0 1 0 0 0 0 1 + 4 24 2 15 2 5 4 4 1 41 3 1 1 1 1 1 0 1 0 1 0 0 1 0 1 + 2 36 2 23 1 4 3 4 3 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 4 6 3 9 1 3 2 2 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 9 4 19 1 4 3 3 3 35 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 12 2 24 5 2 4 4 3 26 3 1 1 2 1 0 1 1 0 1 0 0 0 1 1 + 2 24 4 119 1 3 3 3 3 39 3 2 2 2 1 0 0 0 1 0 1 0 0 0 2 + 4 18 1 65 1 5 3 4 4 39 1 2 2 2 1 1 0 1 0 0 1 0 0 0 2 + 2 12 2 61 1 4 3 2 3 32 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 24 2 77 5 2 2 2 2 30 3 1 1 2 2 0 0 1 0 0 1 0 0 1 1 + 2 14 2 14 3 5 4 2 1 35 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 6 3 14 2 5 1 2 3 31 1 2 2 1 1 0 0 1 0 0 1 0 0 1 1 + 3 15 2 4 1 2 2 4 2 23 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 2 18 2 63 1 4 3 3 1 28 3 1 1 1 1 1 0 1 0 1 0 0 1 0 1 + 4 36 4 79 1 3 2 2 1 25 2 2 1 2 1 1 0 1 0 0 1 0 0 1 2 + 1 12 2 17 3 5 4 1 1 35 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 48 4 36 5 5 3 1 1 47 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 42 2 72 5 4 2 3 3 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 1 10 4 21 5 2 2 3 1 27 3 2 1 1 2 0 0 0 1 1 0 0 0 1 1 + 1 33 4 43 3 3 2 4 3 23 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 12 4 24 3 4 1 3 3 36 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 + 1 21 2 18 1 3 2 2 1 25 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 24 4 39 1 5 2 2 3 41 3 2 1 2 1 0 1 1 0 1 0 0 0 0 1 + 4 12 2 18 1 3 3 2 1 24 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 3 10 4 8 1 5 3 4 4 63 3 2 1 2 1 1 0 1 0 0 0 0 0 1 1 + 2 18 2 19 5 2 2 3 1 27 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 1 12 4 21 1 3 3 2 2 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 12 2 7 1 3 4 2 1 40 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 12 2 6 1 3 3 2 3 30 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 12 4 19 1 1 3 2 3 34 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 + 1 12 4 35 1 3 2 2 1 29 3 2 1 1 1 1 0 0 1 0 1 0 0 1 2 + 2 48 2 85 5 4 2 2 3 24 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 36 3 69 1 3 3 3 2 29 2 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 15 2 27 1 2 3 3 2 27 1 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 18 2 20 1 3 3 4 4 47 1 2 1 1 1 0 0 1 0 0 0 0 0 1 1 + 4 60 2 101 2 4 2 4 1 21 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 12 4 12 5 5 2 2 1 38 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 27 3 86 4 3 3 2 3 27 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 + 2 12 2 8 3 3 3 3 1 66 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 2 15 4 27 5 4 3 2 1 35 1 3 1 2 1 0 0 0 0 0 1 0 0 1 1 + 3 12 2 19 1 3 2 2 3 44 3 1 1 2 1 0 0 1 0 1 0 0 1 0 1 + 3 6 2 7 4 2 4 2 1 27 3 1 1 1 2 1 0 1 0 0 1 1 0 0 1 + 2 36 2 48 1 2 2 1 4 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 1 27 2 34 1 3 3 2 3 27 3 1 1 1 1 0 0 1 0 0 1 0 0 0 1 + 1 18 2 25 1 3 3 2 3 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 21 4 23 1 2 2 4 2 23 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 48 1 36 2 4 3 2 3 30 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 6 4 9 1 5 2 4 4 39 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 12 4 7 2 4 2 3 3 51 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 1 36 4 54 1 3 3 2 2 28 3 2 1 1 1 0 0 0 0 0 1 0 0 1 1 + 4 18 4 16 4 5 3 4 3 46 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 6 2 13 2 5 3 4 4 42 1 1 2 2 1 0 0 1 0 0 0 0 0 1 1 + 4 10 2 19 1 3 3 4 2 38 3 1 1 2 2 0 0 1 0 0 1 0 0 1 1 + 3 36 2 58 1 3 3 1 3 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 24 4 78 4 5 2 4 4 29 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 + 2 24 3 70 2 4 3 4 3 36 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 + 1 12 2 13 1 3 2 4 3 20 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 1 9 4 13 2 5 3 4 1 48 3 2 2 1 2 0 0 0 0 0 1 0 0 1 1 + 1 12 1 3 1 5 4 1 3 45 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 24 2 35 2 4 3 3 3 38 1 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 6 4 19 5 3 3 2 1 34 3 2 2 1 1 0 0 1 0 0 1 0 1 0 1 + 4 24 4 29 2 5 3 4 1 36 3 1 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 18 4 11 1 2 2 1 2 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 15 2 13 3 4 3 3 2 36 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 10 2 73 1 1 3 4 4 70 1 1 1 2 1 1 0 1 0 0 0 0 0 0 1 + 4 36 2 9 3 5 3 4 2 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 6 2 30 3 3 3 2 3 32 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 18 2 11 1 1 2 2 3 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 11 2 16 4 2 2 1 1 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 40 1 4 2 4 2 25 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 2 24 4 19 1 5 1 4 1 31 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 1 15 0 10 1 5 3 3 3 33 3 2 2 1 1 1 0 1 0 1 0 0 0 1 2 + 4 12 2 8 1 3 2 1 1 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 24 3 21 1 1 2 2 2 34 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 2 8 2 14 1 3 3 2 1 33 3 1 1 1 2 0 0 0 0 0 1 0 0 1 1 + 1 21 3 34 1 2 3 1 2 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 30 1 75 5 1 2 1 1 53 1 1 1 2 1 0 1 1 0 0 1 0 0 0 2 + 1 12 2 26 1 3 1 1 3 42 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 6 4 3 3 5 3 4 3 52 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 2 20 1 4 3 2 3 31 3 2 2 2 1 0 0 1 0 1 0 0 0 0 1 + 1 21 4 6 1 5 3 4 1 65 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 36 3 96 1 2 1 1 3 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 36 3 45 1 3 1 2 1 30 2 2 1 2 1 0 0 1 0 0 1 0 0 0 2 + 1 21 1 16 5 3 3 2 2 40 3 2 2 1 1 1 0 1 0 0 1 0 1 0 2 + 4 24 4 38 4 3 3 4 1 50 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 18 4 9 1 5 3 4 3 36 1 1 2 2 1 1 0 1 0 0 1 0 0 1 2 + 4 15 4 14 1 3 3 2 2 31 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 9 1 51 1 5 2 4 4 74 1 1 2 2 1 0 1 1 0 0 0 0 0 0 2 + 2 16 4 12 1 1 3 3 3 68 3 3 1 2 1 1 0 1 0 0 0 1 0 0 1 + 1 12 2 7 2 4 4 1 2 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 18 0 32 1 3 2 4 3 33 1 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 46 4 3 3 3 2 54 3 3 1 2 1 0 0 1 0 0 1 0 0 0 2 + 2 48 0 38 2 4 3 4 4 34 3 1 2 1 1 0 0 1 0 0 0 0 1 0 2 + 2 27 2 39 1 3 3 2 3 36 3 1 2 2 1 0 0 1 0 0 1 0 0 1 2 + 4 6 2 21 1 4 4 2 1 29 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 2 45 2 30 2 3 3 4 2 21 3 1 1 1 1 0 0 0 0 1 0 0 0 1 2 + 2 9 4 15 1 5 2 3 3 34 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 6 4 14 1 3 2 1 3 28 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 12 2 10 2 2 2 4 3 27 1 4 1 1 1 0 0 1 0 1 0 0 0 1 2 + 2 24 2 28 5 5 3 4 4 36 1 1 1 2 1 0 1 1 0 0 0 0 0 1 1 + 2 18 3 43 1 5 1 3 4 40 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 9 4 9 3 5 3 2 3 52 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 12 2 12 1 3 4 3 1 27 3 1 1 1 1 1 0 1 0 0 1 0 1 0 1 + 4 27 3 51 1 4 3 4 3 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 2 9 1 4 4 4 2 21 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 4 12 4 15 1 5 3 1 1 38 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 + 1 30 4 106 1 5 3 4 4 38 3 3 2 2 1 0 1 1 0 0 0 0 0 0 1 + 4 12 4 19 1 5 3 4 1 43 3 3 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 12 4 14 1 4 3 3 2 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 24 2 66 1 3 4 2 3 21 2 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 12 2 14 4 4 3 2 2 55 3 1 1 1 2 0 1 1 0 0 1 0 0 1 1 + 4 9 4 31 5 3 3 2 1 33 3 2 2 1 1 0 0 1 0 0 1 0 0 1 1 + 4 36 2 38 5 5 2 4 1 45 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 + 1 27 0 53 1 1 3 4 2 50 2 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 3 30 3 19 1 5 3 4 1 66 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 36 4 33 5 5 3 2 3 51 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 6 4 9 5 4 2 3 2 39 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 18 0 31 1 4 3 1 2 31 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 3 36 2 39 1 3 3 2 1 23 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 24 2 30 1 3 1 2 1 24 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 4 10 2 14 1 3 2 4 3 64 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 2 12 2 6 1 2 4 1 1 26 1 1 1 1 1 0 0 0 0 0 1 0 1 0 1 + 1 12 2 12 5 3 2 4 2 23 1 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 12 2 7 1 3 3 2 1 30 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 3 30 5 3 3 4 1 32 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 15 2 47 1 3 3 2 3 30 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 + 4 36 0 26 1 3 3 2 3 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 48 2 110 4 4 3 2 4 27 1 2 1 2 1 0 0 0 1 0 1 0 0 1 2 + 1 12 2 79 1 5 3 4 4 53 3 1 1 2 1 0 0 1 0 0 0 0 0 0 2 + 4 9 2 15 1 4 3 2 3 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 24 2 31 1 2 3 1 4 22 1 1 1 1 1 0 0 1 0 0 0 0 0 1 1 + 3 36 2 42 1 3 3 2 3 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 9 2 25 3 5 3 4 4 51 3 1 1 1 1 1 0 1 0 0 0 0 1 0 1 + 4 12 2 21 2 4 3 1 4 35 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 18 2 9 1 3 4 2 1 25 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 + 4 4 4 15 1 4 3 1 1 42 3 3 2 1 1 0 0 1 0 0 1 0 1 0 1 + 1 24 2 18 1 1 3 2 3 30 2 1 2 1 1 0 0 1 0 0 1 0 0 0 2 + 2 6 2 146 5 1 3 2 2 23 3 1 1 2 1 1 0 1 0 0 1 1 0 0 2 + 2 21 2 28 2 5 1 2 3 61 1 2 1 1 1 0 0 1 0 1 0 0 1 0 2 + 4 12 4 13 1 3 2 2 2 35 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 30 2 25 1 5 3 3 2 39 3 1 2 1 1 0 0 0 0 0 1 0 0 1 1 + 1 24 2 9 5 5 2 2 3 29 1 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 6 2 16 1 4 3 2 2 51 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 + 1 48 0 46 1 5 3 4 4 24 3 2 2 1 1 0 1 1 0 0 0 0 0 1 2 + 4 12 4 12 1 3 2 2 1 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 1 34 3 3 2 3 1 35 3 1 2 1 1 0 0 1 0 0 1 0 1 0 1 + 4 24 2 13 1 4 3 1 1 25 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 12 4 7 1 5 3 4 1 52 3 3 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 6 0 12 2 3 3 1 4 35 1 1 1 1 2 1 0 1 0 1 0 0 0 1 1 + 3 24 2 19 1 3 3 2 1 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 4 1 1 2 4 1 22 3 1 1 1 1 0 0 0 1 1 0 0 0 1 2 + 1 6 4 7 4 4 2 4 1 39 3 2 1 2 1 1 0 1 0 0 1 0 1 0 1 + 3 12 2 23 1 3 2 2 3 46 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 30 2 22 1 3 2 2 4 24 1 1 1 1 1 1 0 0 0 0 1 0 0 1 2 + 4 24 3 42 2 3 3 3 2 35 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 9 2 20 5 4 3 1 3 24 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 60 3 74 5 3 3 1 1 27 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 24 4 27 1 3 3 2 1 35 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 12 1 21 1 3 1 1 4 29 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 + 4 15 2 38 2 2 2 4 3 23 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 + 4 11 4 12 2 1 2 4 1 57 3 3 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 12 2 17 1 3 3 2 1 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 24 2 16 1 5 2 4 3 55 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 18 4 53 1 5 3 4 4 36 3 3 1 2 1 1 0 1 0 0 0 0 0 0 1 + 4 12 4 27 1 5 2 4 4 57 1 3 1 1 1 0 0 1 0 0 0 0 1 0 1 + 4 10 4 12 1 5 3 4 1 32 3 2 2 1 2 1 0 1 0 0 1 0 1 0 1 + 2 15 2 8 1 5 3 3 3 37 3 1 2 1 1 0 0 1 0 0 1 0 0 1 2 + 4 36 4 63 5 5 3 4 1 36 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 15 1 2 2 3 3 38 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 14 2 90 1 5 1 4 2 45 3 1 1 2 2 1 0 1 0 0 1 0 0 0 2 + 4 24 2 10 5 5 3 2 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 27 5 4 3 3 2 32 3 1 1 1 2 1 0 1 0 0 1 0 0 1 1 + 4 12 4 14 3 4 2 4 3 37 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 2 48 1 122 5 1 3 4 4 36 3 1 1 2 1 1 0 0 1 0 0 0 0 0 1 + 2 48 2 31 1 4 3 4 1 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 30 2 120 1 2 1 1 4 34 3 1 1 2 1 0 0 1 0 0 1 0 1 0 2 + 4 9 2 27 1 3 3 2 1 32 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 4 24 1 3 2 2 3 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 2 13 5 5 1 4 2 49 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 + 4 6 2 46 1 2 2 4 2 32 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 24 2 19 2 3 3 4 3 29 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 + 4 15 4 34 4 5 3 4 4 23 3 2 1 2 1 0 1 1 0 1 0 0 0 1 1 + 4 12 2 16 1 3 3 2 1 50 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 18 1 14 5 4 3 4 3 49 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 15 4 15 5 5 3 4 2 63 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 24 4 39 2 2 1 2 3 37 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 1 47 2 107 1 2 2 1 1 35 3 1 1 2 1 1 0 1 0 0 1 0 1 0 1 + 1 48 2 48 1 4 3 3 2 26 3 1 2 1 1 0 1 1 0 0 1 0 0 1 1 + 2 48 3 76 2 1 3 4 4 31 3 1 1 2 1 0 0 1 0 0 0 0 0 0 1 + 2 12 2 11 1 3 2 4 1 49 3 2 1 2 1 0 0 0 0 0 1 0 0 1 1 + 1 24 3 10 1 2 4 4 1 48 2 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 12 2 11 1 3 4 2 1 26 3 1 1 2 2 0 0 1 0 0 1 0 0 1 1 + 2 36 2 94 1 2 4 4 3 28 3 1 1 2 1 0 1 1 0 1 0 0 0 0 2 + 1 24 4 64 1 5 2 4 4 44 3 2 2 2 1 0 1 1 0 0 0 0 0 0 1 + 3 42 4 48 1 5 3 4 4 56 3 1 1 1 1 0 1 1 0 0 0 0 0 1 1 + 4 48 4 76 5 5 1 2 3 46 1 2 2 1 1 0 0 1 0 0 1 0 0 0 1 + 2 48 2 100 1 2 2 2 3 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 12 2 47 5 2 2 4 3 20 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 + 4 10 2 13 5 5 3 2 2 45 3 1 1 1 2 1 0 0 1 0 1 0 1 0 1 + 4 18 2 25 1 3 3 4 1 43 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 21 4 27 4 4 3 2 3 32 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 6 2 7 1 1 2 4 1 54 3 1 1 2 1 1 0 1 0 0 1 1 0 0 1 + 2 36 0 38 1 3 2 1 3 42 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 3 24 4 13 5 4 3 2 1 37 1 2 2 1 1 1 0 1 0 0 1 0 1 0 2 + 1 10 4 10 1 4 3 3 2 49 3 2 1 2 1 1 0 0 1 0 1 0 0 1 1 + 4 48 4 101 3 3 3 2 4 44 1 1 1 1 1 1 0 1 0 0 0 0 0 1 2 + 4 6 2 15 4 3 1 2 1 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 30 2 48 5 4 2 4 2 24 2 1 1 1 1 0 1 1 0 1 0 0 1 0 1 + 1 12 2 7 2 2 4 3 4 33 3 1 1 2 1 0 0 1 0 0 1 0 1 0 2 + 2 8 2 12 1 3 2 4 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 9 2 3 1 3 4 4 1 22 3 1 1 1 1 1 0 1 0 1 0 0 1 0 1 + 2 48 2 54 5 1 3 4 4 40 1 1 1 2 1 0 0 1 0 0 0 1 0 0 1 + 4 24 2 55 2 3 3 1 3 25 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 24 2 37 1 2 2 4 3 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 12 2 7 1 4 4 3 3 25 1 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 3 4 2 15 5 2 3 2 1 29 3 1 2 1 2 1 0 1 0 0 1 0 1 0 1 + 1 36 1 27 1 5 3 4 3 31 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 12 2 7 1 3 3 3 2 38 3 1 2 1 1 0 0 0 0 0 1 0 1 0 1 + 2 24 2 44 5 3 2 4 2 48 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 + 4 12 4 7 1 3 3 2 3 32 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 15 3 36 1 5 2 4 2 27 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 30 4 42 1 1 4 2 3 28 3 2 1 1 1 1 0 1 0 0 1 0 0 0 2 + 1 24 2 19 1 2 1 3 2 32 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 24 2 29 1 4 3 1 4 34 3 1 1 2 1 0 1 1 0 0 0 0 0 0 1 + 1 18 2 27 4 3 3 2 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 4 10 1 3 2 3 1 36 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 8 4 34 1 4 3 4 1 39 3 2 1 1 2 1 0 1 0 0 1 0 1 0 1 + 4 12 4 58 5 5 3 4 2 49 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 24 2 15 4 4 2 3 3 34 3 1 2 2 1 1 0 1 0 0 1 0 0 1 1 + 3 36 2 45 1 5 3 2 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 6 2 11 1 5 3 4 3 28 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 + 1 24 4 66 1 1 3 4 4 75 3 2 1 2 1 0 1 1 0 0 0 0 0 0 1 + 4 18 4 19 2 3 2 2 1 30 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 60 2 74 2 2 2 2 2 24 3 1 1 1 1 1 0 1 0 0 1 0 0 0 2 + 4 48 4 116 2 3 2 4 3 24 1 2 1 1 1 0 1 1 0 1 0 0 1 0 2 + 1 24 0 41 1 5 3 4 4 23 1 2 2 1 1 0 0 1 0 1 0 0 0 1 2 + 1 6 4 34 1 3 1 4 1 44 3 1 1 2 1 0 0 1 0 1 0 0 0 0 2 + 2 13 2 21 1 2 2 4 2 23 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 + 1 15 2 13 5 3 2 2 3 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 1 24 2 42 1 3 3 4 2 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 10 2 15 1 3 1 2 3 31 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 24 4 57 1 2 2 4 4 24 3 2 1 2 1 0 0 1 0 0 0 0 0 1 1 + 1 21 2 36 1 4 2 4 3 26 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 2 18 2 32 3 2 4 3 1 25 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 2 18 2 44 1 5 3 1 1 33 1 1 1 2 1 0 0 0 1 0 1 0 0 0 1 + 3 10 2 39 1 2 3 1 2 37 3 1 2 1 1 1 0 0 0 0 1 0 1 0 1 + 4 15 4 15 1 3 2 2 3 43 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 13 4 9 1 2 3 4 1 23 3 2 1 1 1 0 0 0 0 0 1 0 0 1 1 + 2 24 2 38 3 1 2 4 4 23 3 1 1 1 1 0 0 1 0 1 0 1 0 0 1 + 4 6 3 17 2 3 3 2 1 34 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 9 4 11 4 5 3 3 4 32 3 2 2 1 1 0 0 1 0 0 0 0 0 1 2 + 4 9 2 12 1 2 2 4 1 23 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 2 9 2 10 1 3 2 2 3 29 3 1 1 1 2 0 0 1 0 0 1 0 0 1 2 + 4 18 4 32 5 1 3 4 4 38 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 1 12 0 62 1 3 3 2 2 28 3 2 1 2 1 0 0 1 0 1 0 0 0 1 2 + 4 10 2 7 3 5 3 4 4 46 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 + 2 24 2 12 1 2 3 2 1 23 2 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 4 12 4 23 5 5 3 4 1 49 3 1 1 2 1 0 0 0 1 0 1 0 0 1 1 + 4 36 3 45 1 3 3 2 3 26 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 12 2 8 1 3 4 2 1 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 30 2 24 1 4 2 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 2 18 2 12 5 3 3 4 4 61 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 + 3 12 2 34 5 5 3 3 3 37 3 1 1 1 1 0 0 1 0 0 1 0 0 0 1 + 3 12 3 22 1 3 2 2 3 36 2 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 6 2 18 1 3 4 2 2 21 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 1 18 2 25 1 1 3 1 3 25 3 1 1 1 1 0 0 1 0 0 1 1 0 0 2 + 4 12 2 15 1 4 3 4 3 36 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 18 4 38 1 4 3 1 3 27 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 + 1 18 2 36 1 2 2 4 3 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 1 36 2 34 1 5 3 2 3 42 3 1 2 1 1 0 0 1 0 0 1 0 0 1 2 + 2 18 2 30 1 4 2 4 1 40 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 4 36 2 31 5 3 3 4 1 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 18 4 61 1 5 3 4 3 33 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 10 4 21 1 2 2 3 1 23 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 + 4 60 4 138 5 5 3 4 4 63 1 1 1 2 1 1 0 1 0 0 0 0 0 0 1 + 2 60 1 148 2 5 2 4 4 60 1 2 1 2 1 0 0 1 0 0 0 0 0 0 2 + 1 48 1 77 1 4 2 4 3 37 3 1 1 1 1 0 0 0 0 1 0 0 0 1 2 + 4 18 3 23 1 1 4 3 1 34 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 7 3 8 5 5 3 4 4 36 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 + 2 36 2 143 1 5 3 2 4 57 3 1 1 2 1 1 0 1 0 0 0 0 0 0 2 + 4 6 4 4 2 3 2 4 3 52 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 + 1 20 2 22 5 4 3 4 3 39 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 18 2 130 1 1 2 4 4 38 3 1 1 2 1 0 1 1 0 0 0 0 0 0 2 + 4 22 2 13 5 4 2 4 2 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 + 3 12 2 13 1 2 3 1 1 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 30 3 43 2 3 3 2 2 26 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 18 4 22 1 3 2 1 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 11 5 2 2 2 1 21 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 2 18 4 74 1 1 3 4 2 40 2 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 2 15 4 23 3 3 3 4 3 27 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 9 2 14 1 4 2 2 3 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 18 1 3 4 2 2 30 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 + 2 12 2 10 4 2 2 4 1 19 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 1 36 2 32 1 4 3 4 4 39 1 1 2 2 1 1 0 1 0 0 0 0 0 0 1 + 1 6 4 20 1 4 2 4 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 9 4 24 1 1 3 3 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 0 1 + 2 39 3 118 2 4 3 3 4 32 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 1 12 2 26 1 1 2 4 4 55 3 1 1 1 1 0 0 1 0 0 0 0 0 0 1 + 1 36 4 23 1 3 4 2 2 46 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 12 2 12 1 5 1 1 1 46 3 2 1 1 1 1 0 1 0 1 0 0 0 1 2 + 4 24 4 15 4 3 2 1 1 43 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 18 2 15 1 2 4 4 1 39 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 18 4 19 5 3 4 4 1 28 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 3 86 1 2 3 2 3 27 1 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 14 3 8 1 3 3 2 3 27 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 + 2 18 3 29 5 5 3 4 3 43 3 1 2 1 1 1 0 1 0 0 1 0 0 1 1 + 2 24 2 20 1 2 4 1 2 22 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 24 4 22 5 4 3 4 3 43 3 2 2 2 1 0 1 1 0 0 1 0 0 1 1 + 1 15 2 11 1 2 4 2 1 27 3 1 1 1 2 0 0 1 0 0 1 0 0 1 1 + 4 24 2 32 3 5 1 2 3 26 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 3 12 4 9 3 4 4 2 1 28 3 3 1 2 1 1 0 1 0 0 1 0 0 1 2 + 2 24 2 20 1 5 2 4 3 20 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 33 4 73 1 4 3 2 3 35 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 + 4 12 4 23 1 1 3 2 3 42 2 2 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 10 2 16 3 3 3 2 4 40 3 1 2 1 2 1 0 1 0 1 0 0 1 0 1 + 1 24 2 14 5 3 2 2 2 35 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 36 4 58 1 5 3 2 2 35 3 2 2 2 1 0 1 1 0 0 1 0 0 1 1 + 1 12 2 26 1 2 3 1 1 33 3 1 2 1 1 1 0 1 0 0 1 0 1 0 2 + 1 18 3 85 5 3 2 2 3 23 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 21 2 28 3 4 2 2 3 31 1 1 1 1 1 1 0 1 0 0 1 0 0 0 1 + 2 18 2 10 5 3 2 2 2 33 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 15 2 32 4 4 2 3 3 20 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 + 2 12 2 20 5 3 3 2 3 30 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 2 12 4 10 1 4 3 3 1 47 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 + 4 21 3 16 2 4 3 3 1 34 3 2 1 1 1 0 0 1 0 0 1 0 0 0 1 + 2 12 2 28 5 5 2 2 2 25 1 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 2 18 2 28 1 3 4 3 3 21 3 1 1 2 1 0 1 1 0 1 0 0 0 1 1 + 4 28 4 27 1 5 3 2 3 29 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 4 11 4 3 3 3 1 46 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 9 2 13 1 5 3 4 3 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 18 4 12 1 1 2 4 4 55 3 3 2 1 1 0 0 1 0 0 0 1 0 0 2 + 4 5 2 34 1 4 3 4 1 74 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 24 2 113 1 3 3 3 3 29 1 2 1 2 1 0 0 0 1 0 1 0 0 0 2 + 1 6 4 19 1 1 3 4 4 36 3 3 1 2 1 0 0 1 0 0 0 0 0 0 1 + 4 24 4 21 1 3 1 2 1 33 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 9 2 21 1 3 3 2 1 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 12 2 15 5 3 4 1 1 25 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 6 2 7 3 4 4 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 4 24 4 13 4 5 2 4 1 37 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 1 42 4 34 1 1 3 4 3 65 3 2 1 1 1 0 0 0 1 0 1 1 0 0 1 + 3 12 1 6 1 2 2 1 1 26 3 1 1 1 1 0 0 1 0 0 1 1 0 0 2 + 4 12 2 19 1 5 3 4 3 39 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 + 1 12 2 16 1 3 2 3 2 30 3 1 1 1 1 0 0 0 1 0 1 0 0 1 1 + 2 20 3 26 1 3 3 3 3 29 1 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 12 2 7 1 5 3 4 3 41 1 1 2 1 1 0 0 1 0 0 1 0 1 0 2 + 2 48 4 51 1 3 2 3 3 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 9 4 12 5 5 2 4 2 41 3 2 1 1 1 0 0 1 0 1 0 0 1 0 1 + 1 36 2 18 1 2 2 4 3 34 3 1 1 2 1 1 0 1 0 0 1 0 0 1 2 + 2 7 2 26 1 3 3 2 1 35 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 + 3 12 2 14 5 5 2 4 1 55 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 2 15 3 15 4 3 4 3 2 61 2 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 36 4 111 5 3 3 2 3 30 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 4 6 2 5 1 3 2 1 1 29 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 0 28 1 5 3 4 2 34 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 27 1 5 3 4 3 35 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 1 24 2 48 1 4 3 3 2 31 3 1 1 2 1 1 0 0 1 0 1 0 0 1 2 + 4 24 2 27 1 2 2 1 4 29 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 1 11 4 39 1 3 3 2 1 36 3 2 2 1 1 1 0 1 0 1 0 0 0 1 1 + 1 12 2 34 1 5 3 4 4 35 3 1 1 2 1 0 1 1 0 0 0 0 0 1 2 + 1 6 2 3 1 2 2 1 1 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 46 1 2 3 2 3 32 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 36 2 36 1 3 3 2 2 37 3 1 2 1 1 0 0 0 0 0 1 0 0 1 1 + 1 15 2 17 1 2 3 3 1 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 2 12 2 30 1 2 2 1 1 34 3 1 1 1 1 0 0 1 0 1 0 0 0 0 1 + 2 12 2 8 5 5 3 4 2 38 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 20 1 4 3 1 3 34 2 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 24 2 29 1 3 3 4 4 63 1 1 2 2 1 0 1 0 0 0 1 0 0 1 1 + 1 24 3 17 1 2 2 2 3 29 3 1 1 2 1 0 0 1 0 1 0 0 1 0 2 + 4 48 3 72 5 5 3 3 3 32 1 2 2 1 1 0 0 1 0 0 1 0 0 1 1 + 4 33 3 28 1 3 2 2 3 26 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 24 3 47 1 4 3 3 3 35 3 2 1 2 1 0 1 1 0 0 1 0 1 0 1 + 2 24 2 31 2 2 4 2 3 22 3 1 1 2 1 0 0 1 0 1 0 0 0 1 2 + 1 6 2 4 1 2 2 4 2 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 9 2 7 1 3 3 3 3 28 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 4 6 2 12 5 1 3 4 2 36 3 1 2 2 1 0 0 1 0 0 1 0 0 0 1 + 2 18 4 12 1 3 4 2 3 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 18 0 31 1 2 2 4 2 26 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 4 39 2 26 3 3 3 4 3 24 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 3 24 2 52 1 4 3 2 3 25 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 12 2 10 2 4 3 4 1 39 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 15 4 15 1 5 3 4 3 44 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 + 2 12 4 36 1 3 2 1 1 23 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 24 2 12 1 2 3 1 2 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 30 2 36 4 5 2 4 2 57 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 15 3 10 4 4 2 2 2 30 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 4 12 3 3 3 4 1 44 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 2 6 3 12 1 1 3 4 2 47 3 1 1 2 1 1 0 1 0 0 1 0 0 0 2 + 4 12 2 31 1 3 3 4 3 52 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 38 1 5 2 4 4 62 3 1 1 2 1 1 0 0 1 0 0 0 0 1 1 + 4 10 2 14 2 3 3 2 1 35 3 1 1 1 2 1 0 1 0 1 0 0 1 0 1 + 4 6 2 35 1 3 3 3 2 26 3 1 1 1 1 1 0 0 0 1 0 0 0 1 1 + 4 12 4 19 1 5 3 2 4 26 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 27 0 83 1 5 2 4 4 42 3 2 1 2 1 0 0 1 0 0 0 0 0 0 2 + 4 6 4 12 2 3 2 1 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 6 2 4 5 5 3 4 2 38 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 4 21 1 3 3 2 1 39 3 2 2 1 2 1 0 1 0 1 0 0 1 0 1 + 1 24 2 30 5 3 4 4 3 20 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 36 2 90 2 2 3 1 4 29 3 1 1 2 1 0 0 0 1 1 0 0 0 0 2 + 4 24 4 16 1 4 3 3 2 40 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 18 2 13 1 5 4 2 1 32 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 + 3 6 4 13 2 5 1 4 3 28 3 2 2 2 1 1 0 1 0 0 1 0 0 1 1 + 1 24 2 31 1 2 2 1 2 27 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 1 36 2 55 1 5 3 4 4 42 3 1 2 1 1 0 1 1 0 0 0 0 0 1 1 + 3 9 2 11 2 5 1 4 1 49 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 24 4 12 2 2 3 4 4 38 1 2 2 1 1 0 0 1 0 0 1 0 0 1 2 + 1 24 2 12 1 2 2 4 2 24 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 + 4 10 2 13 5 3 3 4 2 27 3 1 1 1 1 1 0 0 0 0 1 0 1 0 2 + 3 15 4 24 3 3 3 2 3 36 3 1 1 2 1 0 1 1 0 0 1 0 0 1 1 + 2 15 1 68 2 1 3 2 2 34 3 1 2 2 1 1 0 1 0 0 1 0 0 0 2 + 4 24 2 14 1 3 4 2 2 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 39 2 86 2 5 3 2 3 45 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 1 12 2 8 1 4 3 2 1 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 36 2 47 1 3 3 2 4 32 3 1 1 2 1 0 1 1 0 0 0 0 0 0 1 + 3 15 2 27 1 4 3 4 2 26 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 2 12 3 6 1 3 4 4 1 20 3 2 1 1 1 0 0 0 1 1 0 0 0 1 1 + 4 24 2 23 5 2 3 1 2 54 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 6 4 6 1 4 2 3 2 37 3 2 1 1 2 1 0 1 0 0 1 0 0 1 1 + 1 6 4 14 1 2 3 4 1 40 3 1 2 1 2 1 0 1 0 0 1 0 1 0 1 + 4 36 4 71 1 2 2 4 2 23 3 2 1 2 1 0 0 1 0 1 0 0 0 1 2 + 1 6 2 12 2 5 3 2 2 43 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 6 4 7 5 5 3 4 4 36 3 2 1 1 1 0 0 1 0 0 0 0 0 1 1 + 4 24 4 55 1 5 3 4 4 44 3 2 1 1 1 0 0 1 0 0 0 0 0 1 1 + 1 18 2 32 1 3 2 2 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 48 0 71 1 3 3 4 4 53 3 2 2 1 1 0 0 1 0 0 0 0 0 1 2 + 4 24 2 35 2 4 2 4 3 23 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 2 18 2 11 1 3 2 4 1 26 3 1 2 1 1 0 0 0 0 0 1 0 1 0 1 + 2 26 2 80 1 2 3 3 3 30 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 + 4 15 4 15 2 3 2 3 3 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 4 4 15 1 4 3 1 1 42 3 2 2 1 1 0 0 1 0 0 1 0 1 0 1 + 1 36 2 23 1 3 1 4 3 31 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 1 6 2 7 1 2 3 4 1 41 3 1 2 2 1 1 0 1 0 0 1 0 1 0 1 + 2 36 2 23 1 4 3 1 3 32 3 2 2 1 1 0 0 1 0 0 1 0 0 1 1 + 2 15 2 26 2 3 2 4 3 28 3 2 1 2 1 1 0 1 0 1 0 0 0 1 2 + 4 12 3 15 1 3 4 4 1 41 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 + 4 24 2 13 2 4 4 3 2 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 31 5 2 3 2 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 21 4 23 1 2 1 1 3 33 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 1 6 2 14 5 1 2 3 2 75 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 + 2 18 4 36 1 5 2 4 2 37 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 48 2 78 1 5 3 4 4 42 1 1 1 1 1 1 0 1 0 0 0 0 0 0 2 + 3 18 2 30 1 2 2 1 2 45 2 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 12 2 15 1 2 4 1 1 23 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 4 24 3 20 1 5 3 4 4 60 3 2 1 2 1 1 0 1 0 0 0 0 0 1 1 + 1 30 2 64 5 5 3 4 2 31 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 3 18 2 29 1 3 3 1 1 34 3 1 2 1 1 0 0 1 0 0 1 0 1 0 2 + 4 12 4 13 1 5 3 4 1 61 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 + 1 24 3 13 1 1 3 2 1 43 3 2 2 1 1 1 0 1 0 0 0 0 0 1 2 + 4 24 4 20 1 3 2 4 3 37 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 24 2 16 1 4 3 1 3 32 1 1 2 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 1 6 1 3 2 4 1 24 1 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 4 48 4 89 5 4 3 1 4 35 3 2 1 2 1 0 1 1 0 0 0 0 0 1 1 + 4 12 4 10 5 4 2 4 1 23 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 6 1 18 3 5 3 4 2 45 1 1 2 1 1 0 0 1 0 0 1 0 1 0 1 + 1 48 2 70 1 4 4 1 1 34 3 2 1 2 1 0 0 0 0 0 1 0 0 1 2 + 2 12 4 20 2 2 3 1 3 27 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 2 9 2 12 1 4 2 4 2 67 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 2 12 2 13 1 2 3 1 3 22 2 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 18 0 23 2 2 2 3 3 28 3 2 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 21 0 50 5 3 2 4 2 29 1 2 1 2 1 1 0 1 0 0 1 0 0 1 2 + 1 24 1 36 1 4 3 4 3 27 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 18 4 19 1 2 3 2 1 31 3 2 1 1 1 0 0 1 0 0 1 0 1 0 2 + 1 24 2 30 5 5 3 4 4 49 1 1 2 2 1 0 1 1 0 0 0 0 0 1 1 + 1 24 1 15 1 4 3 4 3 24 1 1 1 1 1 0 0 0 0 1 0 0 1 0 2 + 3 6 3 7 1 2 2 1 2 29 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 36 2 124 5 3 3 4 4 37 3 1 1 2 1 1 0 1 0 0 0 0 0 1 2 + 2 24 3 47 5 3 3 2 2 37 1 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 2 24 3 16 2 4 2 2 2 23 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 + 1 12 2 14 1 4 1 3 3 36 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 24 4 26 4 5 3 2 3 34 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 48 2 40 5 4 3 1 3 41 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 + 1 48 2 68 1 3 2 2 3 31 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 1 24 2 32 1 2 2 4 1 23 3 1 1 2 1 0 0 1 0 1 0 0 1 0 2 + 4 30 4 60 1 4 3 2 3 38 3 1 1 1 1 0 0 0 1 0 1 0 0 1 1 + 4 24 2 54 5 1 2 4 2 26 3 1 1 2 1 0 1 1 0 1 0 0 0 0 1 + 1 15 2 8 1 3 2 4 2 22 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 9 2 11 1 5 3 4 3 27 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 15 4 28 1 4 2 3 3 24 1 2 1 1 1 0 0 0 1 0 1 0 0 1 1 + 2 12 2 29 1 4 2 1 1 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 4 19 5 3 2 2 3 33 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 36 4 28 1 2 1 4 3 27 3 2 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 24 2 9 1 2 4 3 3 27 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 18 4 11 1 5 3 3 1 30 1 2 1 1 1 1 0 0 0 0 1 0 0 1 2 + 2 12 4 31 1 2 3 3 1 49 1 2 2 1 1 1 0 1 0 0 1 0 1 0 1 + 4 9 2 14 1 3 2 2 1 26 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 2 36 2 24 1 2 3 1 4 33 3 1 1 1 1 0 0 1 0 1 0 0 1 0 2 + 4 12 2 21 5 5 2 4 4 52 3 1 1 2 1 1 0 1 0 0 0 0 0 0 1 + 1 18 2 20 1 3 2 4 1 20 1 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 1 9 4 28 1 3 3 2 1 36 3 2 2 1 1 1 0 1 0 1 0 0 0 1 1 + 1 12 2 13 1 3 3 1 2 21 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 + 1 18 2 12 1 3 4 3 1 47 3 1 1 2 1 0 0 1 0 0 1 0 1 0 2 + 1 12 4 22 1 5 3 3 2 60 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 12 4 4 1 4 2 3 1 58 3 4 1 2 1 0 0 1 0 0 1 0 1 0 1 + 2 24 3 20 5 3 2 4 3 42 3 2 1 2 1 1 0 1 0 1 0 0 0 1 1 + 4 21 2 16 4 5 2 4 1 36 1 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 24 2 27 1 3 2 4 2 20 3 1 1 2 1 1 0 1 0 1 0 0 1 0 2 + 1 24 1 14 5 5 3 3 3 40 2 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 2 6 1 9 2 2 2 1 2 32 2 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 1 24 2 14 1 4 2 4 3 23 3 2 1 1 1 1 0 1 0 1 0 0 0 1 2 + 2 24 0 42 1 3 3 4 1 36 3 3 1 2 1 0 0 1 0 0 1 0 1 0 2 + 4 18 4 28 1 4 3 2 2 31 1 2 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 24 3 39 1 3 3 2 4 32 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 + 2 7 2 23 1 2 2 1 1 45 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 + 2 9 2 9 1 3 2 1 2 30 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 24 1 18 1 4 2 4 4 34 1 1 1 1 1 0 0 1 0 0 0 0 1 0 2 + 4 36 2 33 1 3 2 2 3 28 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 3 10 2 13 1 2 2 2 2 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 24 1 28 3 3 3 4 1 22 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 24 4 45 1 3 3 2 1 74 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 2 36 2 27 2 3 2 4 4 50 3 1 1 1 1 0 0 0 1 0 0 0 0 1 2 + 4 18 2 21 1 2 3 1 1 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 15 2 13 5 5 3 4 4 45 1 1 2 1 1 0 1 1 0 0 0 0 0 1 1 + 1 12 2 7 2 1 2 3 2 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 3 10 2 12 2 5 2 4 4 48 3 1 2 1 1 1 0 1 0 0 0 0 1 0 2 + 1 21 2 34 4 2 2 2 3 29 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 24 1 36 1 3 2 4 3 22 1 1 1 1 2 0 1 0 0 1 0 0 0 1 1 + 4 18 3 18 1 4 2 1 1 22 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 48 0 122 5 3 3 2 3 48 1 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 2 60 3 92 5 3 3 2 4 27 3 1 1 1 1 0 0 1 0 0 0 0 0 0 1 + 1 6 4 37 1 3 3 3 1 37 3 3 2 1 1 1 0 1 0 1 0 0 0 1 1 + 2 30 2 34 2 3 2 4 3 21 3 1 1 1 1 0 0 0 1 1 0 0 0 1 2 + 4 12 2 6 1 3 1 2 1 49 3 1 1 1 1 1 0 1 0 0 1 0 1 0 1 + 2 21 4 37 1 4 3 3 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 18 4 15 1 3 3 2 2 32 1 2 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 48 2 39 5 3 1 2 1 38 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 12 2 19 1 2 2 1 3 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 1 18 2 26 1 3 3 4 4 65 3 2 1 1 1 0 0 1 0 0 0 0 0 1 2 + 4 15 2 20 5 5 3 2 3 35 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 6 2 21 1 3 3 2 1 41 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 9 1 14 2 4 3 3 4 29 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 42 4 40 3 3 3 4 1 36 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 9 2 38 5 5 3 4 1 64 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 24 2 37 1 3 2 4 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 18 1 16 1 3 3 3 3 44 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 15 2 14 5 2 3 1 2 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 9 2 20 1 2 2 2 3 19 3 2 1 1 1 0 0 0 1 1 0 0 0 1 2 + 2 24 2 14 1 2 2 4 3 25 3 1 1 2 1 1 0 1 0 0 1 0 1 0 2 + 4 12 2 14 1 5 3 4 2 47 1 3 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 14 3 4 2 1 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 60 3 157 1 4 3 4 3 21 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 12 2 15 1 2 2 3 3 34 3 1 2 1 1 0 0 1 0 0 1 0 0 1 1 + 1 42 3 44 1 4 3 2 2 26 1 2 2 2 1 0 0 1 0 0 1 0 0 1 2 + 1 18 2 8 1 1 2 1 1 27 3 1 1 1 1 0 0 1 0 0 1 1 0 0 2 + 2 15 2 13 1 5 3 4 3 38 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 15 2 46 2 3 3 2 2 40 3 1 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 24 4 19 1 4 4 2 3 33 3 2 1 2 1 0 0 0 0 0 1 0 0 1 1 + 1 18 4 19 1 4 4 1 2 32 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 4 36 3 80 5 2 3 4 3 27 3 2 1 2 1 0 0 1 0 1 0 0 0 1 2 + 1 30 0 46 1 3 1 2 1 32 3 2 1 1 1 0 0 0 0 0 1 0 0 1 1 + 4 12 2 14 3 3 2 2 2 26 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 3 24 2 9 1 4 3 3 4 38 1 1 2 1 1 1 0 1 0 0 0 0 0 1 2 + 1 12 2 7 1 3 3 4 3 40 3 1 2 1 1 0 0 1 0 1 0 0 1 0 2 + 1 48 2 75 1 4 3 1 4 50 3 1 1 2 1 0 0 1 0 0 0 0 0 0 1 + 2 12 2 19 1 3 3 2 2 37 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 1 24 2 23 1 5 3 1 1 45 3 1 1 1 1 1 0 0 1 0 1 0 0 1 2 + 2 36 3 81 2 5 3 4 3 42 3 4 1 2 1 1 0 1 0 0 1 0 0 0 2 + 4 24 4 23 1 4 3 3 3 35 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 + 1 14 2 40 1 1 3 4 4 22 3 1 1 1 1 1 0 1 0 0 0 0 0 1 1 + 2 12 2 9 1 5 3 4 3 41 1 1 2 1 1 1 0 1 0 0 1 0 1 0 2 + 4 48 2 102 5 4 3 3 3 37 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 30 0 42 1 3 2 1 3 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 18 4 64 1 5 3 1 4 41 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 3 12 2 13 1 3 4 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 1 12 2 9 5 3 4 2 3 23 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 21 2 22 1 5 3 2 1 50 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 6 3 10 1 1 3 1 2 35 2 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 3 6 4 10 1 3 2 4 2 50 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 24 4 63 1 1 3 2 4 27 1 2 1 2 1 0 0 0 1 0 1 0 0 0 1 + 2 30 1 35 4 3 3 2 3 34 2 1 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 48 1 36 1 3 2 1 1 27 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 4 48 1 5 3 4 2 43 3 2 1 2 1 1 0 0 1 1 0 0 0 1 2 + 3 30 4 30 1 5 3 4 2 47 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 4 41 2 3 3 3 2 27 3 2 1 2 1 0 0 1 0 0 1 0 1 0 1 + 4 36 2 57 2 4 3 2 3 31 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 60 2 104 1 5 3 4 2 42 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 + 4 6 4 21 3 3 4 2 3 24 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 21 3 26 3 2 3 2 1 41 1 1 2 1 1 0 0 1 0 0 1 0 1 0 2 + 4 30 4 45 1 4 2 4 3 26 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 + 4 24 4 52 1 5 3 4 3 33 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 72 2 56 2 3 4 2 3 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 24 2 24 1 5 3 4 1 64 1 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 4 18 2 15 1 2 2 1 1 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 6 2 15 1 2 2 2 4 56 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 2 23 5 3 3 4 4 37 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 + 4 15 3 15 1 3 4 3 1 33 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 4 51 1 2 4 3 4 47 3 3 1 2 1 0 0 1 0 0 0 0 0 1 1 + 2 36 3 99 2 4 3 3 2 31 3 2 2 2 1 0 0 1 0 0 1 0 1 0 1 + 4 60 2 65 5 3 3 4 4 34 3 1 2 2 1 1 0 1 0 0 0 0 0 1 1 + 3 10 4 13 5 4 3 2 2 27 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 36 3 29 2 5 3 3 4 30 3 1 1 1 1 1 0 1 0 0 0 0 0 1 1 + 4 9 2 28 2 5 3 4 3 35 3 1 1 2 1 0 0 0 1 0 1 0 0 1 1 + 1 12 2 37 4 3 3 3 2 31 3 1 2 1 1 1 0 1 0 0 1 0 0 1 1 + 1 15 4 10 1 3 1 3 2 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 15 2 26 2 3 2 2 1 25 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 24 2 29 2 2 3 1 3 29 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 6 4 47 5 2 3 3 1 44 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 + 4 24 2 23 1 4 3 2 3 28 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 6 2 12 3 3 3 4 2 50 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 + 2 12 2 11 1 4 3 3 1 29 3 2 1 1 2 0 0 0 0 0 1 0 0 1 1 + 4 12 4 9 1 1 2 2 2 38 3 1 1 1 1 1 0 1 0 0 1 1 0 0 1 + 4 18 4 18 1 3 3 2 3 24 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 15 2 19 1 5 3 4 3 40 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 + 4 12 2 11 3 3 2 4 3 29 3 1 1 1 1 0 0 1 0 1 0 0 1 0 2 + 1 48 4 63 1 5 3 4 4 46 3 2 1 2 1 0 1 1 0 0 0 0 0 1 2 + 3 24 2 14 2 5 2 2 4 47 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 + 2 30 3 25 2 5 3 2 2 41 2 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 27 2 25 1 2 2 1 2 32 3 1 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 15 2 53 3 5 2 4 4 35 3 1 1 1 1 1 0 1 0 0 0 0 0 1 1 + 2 48 2 66 2 4 3 2 2 24 3 1 1 1 1 1 0 1 0 0 1 0 0 1 2 + 2 12 0 30 1 2 2 3 2 25 3 2 1 1 1 0 0 1 0 1 0 0 0 1 2 + 2 9 2 12 1 5 2 4 1 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 9 2 21 1 3 3 2 1 37 3 1 2 1 1 0 0 1 0 0 1 0 1 0 1 + 4 18 4 6 3 5 3 3 2 32 1 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 1 6 1 12 1 5 2 4 4 35 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 + 4 21 2 25 5 5 3 4 1 46 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 1 9 4 11 1 3 3 4 1 25 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 60 2 140 1 4 3 2 4 27 3 1 1 2 1 1 0 1 0 0 1 0 0 0 2 + 4 30 4 76 5 5 3 4 3 63 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 + 4 30 4 31 5 5 3 2 3 40 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 15 1 3 3 2 4 32 3 1 1 2 1 0 0 1 0 0 0 0 0 0 1 + 3 24 4 31 5 3 3 2 3 31 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 20 0 61 2 5 4 4 3 31 1 2 1 2 1 0 1 1 0 0 1 0 0 1 1 + 3 9 0 13 1 2 3 2 3 34 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 + 2 6 1 4 4 2 2 2 2 24 1 1 2 1 1 0 0 1 0 1 0 0 0 1 2 + 1 12 2 12 1 3 2 2 1 24 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 2 9 2 8 3 3 2 3 1 66 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 27 2 26 1 3 2 3 1 21 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 + 4 6 4 2 4 3 2 2 1 41 1 2 1 1 1 1 0 1 0 0 1 0 1 0 1 + 4 15 4 13 3 3 4 2 2 47 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 18 2 19 1 3 2 4 3 25 1 2 1 1 1 0 0 1 0 1 0 0 0 1 2 + 2 48 1 64 1 5 2 3 4 59 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 3 24 4 13 4 3 1 4 1 36 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 24 3 64 1 2 3 2 3 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 24 2 20 1 3 3 4 1 21 3 1 2 1 1 0 0 1 0 1 0 0 1 0 2 + 2 8 2 8 1 4 2 2 1 44 3 1 1 1 1 0 0 0 0 0 1 0 1 0 1 + 4 24 2 26 4 3 2 4 3 28 3 1 1 2 1 0 1 1 0 1 0 0 0 1 1 + 4 4 4 34 1 4 2 1 1 37 3 1 2 1 1 1 0 1 0 0 1 0 0 1 1 + 2 36 1 40 5 2 2 2 4 29 1 1 1 1 1 0 0 1 0 0 1 1 0 0 1 + 2 24 2 116 1 3 2 4 3 23 3 2 1 1 1 0 1 1 0 1 0 0 0 0 2 + 1 18 2 44 2 3 3 4 3 35 3 1 2 2 1 1 0 1 0 0 1 0 1 0 1 + 4 6 4 68 1 4 3 3 4 45 3 2 2 2 1 1 0 1 0 0 1 0 0 0 1 + 2 30 0 43 2 3 2 4 3 26 3 2 1 1 1 0 0 1 0 1 0 0 1 0 2 + 1 24 1 23 2 4 3 3 3 32 1 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 2 10 1 10 1 3 3 4 1 23 2 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 21 2 32 5 5 3 3 2 41 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 24 1 25 3 3 3 4 1 22 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 39 4 142 5 4 3 4 2 30 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 1 13 4 18 1 2 3 1 2 28 1 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 15 2 25 1 1 2 4 3 23 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 + 1 12 2 13 1 2 2 1 1 37 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 4 21 2 52 5 3 3 3 3 26 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 4 15 2 30 1 4 3 2 3 33 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 + 1 6 2 4 1 5 2 1 2 49 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 18 2 10 1 2 2 2 3 23 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 2 12 2 8 2 4 2 4 1 23 3 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 4 30 4 58 1 4 2 2 3 25 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 3 16 4 5 3 4 4 55 3 2 2 1 1 0 0 1 0 0 0 0 0 1 2 + 1 24 2 13 5 4 2 4 4 32 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 + 3 6 4 13 1 3 3 1 1 74 3 3 2 1 2 1 0 1 0 0 1 1 0 0 1 + 3 15 4 13 5 3 3 4 4 39 3 2 1 2 1 0 0 1 0 0 0 0 0 1 2 + 4 24 2 14 1 3 3 2 1 31 3 1 1 2 1 1 0 0 0 0 1 0 0 1 1 + 1 12 4 7 1 5 3 3 2 35 3 2 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 15 4 50 5 5 2 4 3 59 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 1 18 4 21 1 3 2 4 1 24 3 2 1 1 1 0 0 1 0 1 0 0 0 1 2 + 1 12 2 22 1 3 3 3 2 24 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 21 4 127 5 5 3 4 4 30 3 1 1 2 1 1 0 1 0 0 0 0 0 0 2 + 4 24 4 25 2 4 4 3 2 27 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 2 12 2 12 1 5 4 3 1 40 1 2 1 1 1 0 0 0 0 0 1 0 1 0 1 + 1 30 2 31 1 2 1 4 2 31 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 4 10 2 29 5 2 2 4 1 31 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 + 2 12 4 36 1 5 3 4 3 28 3 3 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 12 4 17 1 5 3 4 1 63 3 2 1 2 1 0 0 1 0 0 1 0 1 0 1 + 1 24 2 28 5 5 2 4 1 26 3 1 1 1 1 0 1 1 0 1 0 0 0 1 1 + 1 36 4 81 1 3 2 2 4 25 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 + 4 21 4 33 1 5 3 4 3 36 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 4 24 4 22 2 5 3 4 2 52 1 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 12 4 15 3 1 3 4 4 66 1 3 1 1 1 1 0 1 0 0 0 1 0 0 1 + 1 24 2 14 5 3 2 4 1 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 + 4 36 4 35 1 4 3 4 3 37 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 1 18 2 35 1 4 2 1 1 25 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 + 4 36 4 57 4 5 3 2 3 38 3 2 1 2 1 0 1 1 0 0 1 0 0 0 1 + 2 18 2 39 1 1 2 4 3 67 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 39 4 49 1 4 3 2 1 25 3 2 1 1 1 0 0 0 0 0 1 0 0 1 2 + 4 24 4 19 4 5 3 4 1 60 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 2 12 0 14 1 3 3 2 1 31 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 + 2 12 2 8 2 2 2 2 2 23 1 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 2 20 2 65 5 1 1 4 1 60 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 2 18 2 19 4 3 3 2 2 35 3 1 1 2 1 0 0 1 0 0 1 0 1 0 1 + 4 22 2 27 3 5 3 4 3 40 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 48 4 28 5 5 3 3 3 38 3 2 2 2 1 0 1 1 0 0 1 0 0 1 1 + 2 48 3 62 1 5 3 4 4 50 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 + 1 40 4 60 1 3 3 3 4 27 1 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 2 21 2 12 1 5 2 4 2 39 3 1 2 1 1 0 0 1 0 0 1 0 0 1 2 + 4 24 2 63 5 5 3 4 3 41 3 1 2 2 1 0 1 1 0 0 1 0 0 0 1 + 4 6 4 12 5 3 4 2 2 27 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 3 24 2 29 1 5 1 4 4 51 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 + 4 24 2 31 3 5 3 3 4 32 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 9 2 23 2 2 2 4 2 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 1 18 2 75 5 5 3 4 2 51 3 1 2 2 1 0 1 1 0 0 0 0 0 1 2 + 4 12 4 13 1 2 2 4 2 22 3 2 1 1 1 0 0 1 0 1 0 0 1 0 1 + 4 24 3 7 5 5 4 4 3 54 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 2 9 2 15 5 2 3 2 1 35 3 1 1 1 1 1 0 1 0 0 1 1 0 0 1 + 4 24 4 16 1 5 3 4 4 54 3 2 2 1 1 0 0 1 0 0 0 0 0 1 1 + 2 18 4 18 1 5 2 4 1 48 1 2 1 2 1 0 0 0 0 1 0 0 1 0 1 + 1 20 4 43 1 5 2 4 2 24 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 4 10 5 5 3 4 3 35 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 12 2 75 5 1 2 2 1 24 3 1 1 1 1 1 0 1 0 1 0 1 0 0 1 + 1 36 2 93 1 4 3 1 3 24 3 1 1 2 1 1 0 1 0 0 1 0 0 1 2 + 2 6 2 6 1 2 4 3 1 26 3 1 1 1 2 0 0 1 0 0 1 0 1 0 1 + 4 12 4 9 5 5 3 4 1 65 3 4 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 42 1 93 1 1 3 2 4 55 1 1 1 2 1 0 1 1 0 0 0 0 0 0 1 + 2 15 0 18 1 2 2 1 1 26 3 2 1 1 1 1 0 1 0 1 0 1 0 0 2 + 2 8 2 9 1 2 4 2 1 26 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 2 6 2 5 1 4 4 3 1 28 1 1 1 1 1 0 0 0 0 0 1 0 1 0 1 + 1 36 4 96 1 4 3 4 3 24 3 2 1 2 1 0 1 1 0 0 1 0 0 1 2 + 1 48 2 31 1 3 3 4 3 54 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 48 2 39 1 4 3 4 4 46 3 1 2 1 1 1 0 1 0 0 0 0 0 1 2 + 2 36 3 74 1 3 2 2 2 54 3 1 1 1 1 1 0 1 0 1 0 0 0 1 1 + 4 6 2 13 3 3 1 4 1 62 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 6 4 16 1 4 2 2 3 24 3 2 1 2 1 0 0 1 0 1 0 0 0 1 1 + 1 36 2 159 1 1 1 3 3 43 3 1 1 1 1 0 0 0 1 0 1 0 0 0 1 + 1 18 2 13 1 3 4 3 1 26 1 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 12 2 11 1 3 4 2 1 27 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 3 12 2 30 1 3 4 1 3 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 36 2 27 1 5 3 2 2 41 1 1 2 1 1 0 0 1 0 0 1 0 0 1 2 + 1 8 4 7 1 5 3 4 1 47 3 2 1 1 1 1 0 1 0 0 1 0 1 0 1 + 4 18 4 38 1 2 1 2 3 35 3 2 1 2 1 0 0 1 0 0 1 0 0 0 1 + 1 21 4 16 1 5 4 3 3 30 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 1 18 4 40 1 5 2 4 1 33 1 3 1 2 1 1 0 1 0 1 0 0 0 1 2 + 4 18 0 42 1 3 3 2 3 36 2 2 2 1 1 0 0 1 0 0 1 0 0 1 2 + 1 36 2 83 5 5 3 4 4 47 3 1 1 1 1 0 1 1 0 0 0 0 0 1 2 + 2 48 3 67 5 3 3 4 4 38 3 1 2 2 1 0 0 1 0 0 0 0 0 1 1 + 4 24 3 24 3 3 3 2 3 44 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 + 1 18 2 12 1 2 2 3 3 23 3 1 1 2 1 1 0 1 0 1 0 0 0 1 2 + 1 45 0 118 1 5 3 4 3 29 3 2 1 1 1 0 0 1 0 1 0 0 0 1 2 + 2 24 2 51 5 5 2 4 3 42 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 3 15 2 23 1 2 2 3 1 25 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 1 12 0 11 1 3 3 4 3 48 1 2 1 1 1 1 0 1 0 0 1 0 0 1 2 + 4 12 2 9 5 3 2 2 3 21 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 4 2 6 1 2 2 3 1 23 3 1 2 1 1 0 0 1 0 1 0 0 1 0 1 + 1 24 4 30 1 5 3 4 2 63 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 + 4 24 4 26 1 5 4 3 1 46 3 2 1 1 1 0 0 0 1 0 1 0 0 1 1 + 1 36 2 52 1 4 3 2 2 29 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 21 3 30 1 3 3 2 1 28 2 2 1 1 1 0 1 1 0 0 1 0 1 0 1 + 4 18 2 19 1 2 2 4 1 23 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 24 1 16 1 4 3 4 3 50 1 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 34 1 5 3 4 2 47 1 3 2 2 1 0 0 1 0 0 1 0 0 1 1 + 2 21 2 40 5 4 3 3 3 35 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 18 2 68 5 3 3 4 3 68 3 2 1 1 1 1 0 1 0 1 0 0 0 1 2 + 4 24 2 12 1 2 4 2 1 28 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 9 2 14 1 4 3 4 1 59 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 2 7 1 5 3 4 1 57 2 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 1 20 4 22 1 3 4 2 2 33 1 2 1 1 2 1 0 0 0 1 0 0 0 1 2 + 4 24 4 40 5 4 3 4 2 43 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 + 4 15 4 15 1 3 3 4 4 35 3 2 1 2 1 0 0 1 0 0 0 0 0 1 1 + 1 18 1 14 1 4 3 4 4 32 3 2 2 1 1 1 0 1 0 0 0 0 1 0 2 + 4 36 3 109 1 5 3 2 3 45 3 2 2 2 1 1 0 1 0 0 1 0 0 1 1 + 4 24 2 15 2 2 4 3 1 33 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 10 2 9 5 4 2 3 2 40 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 15 4 33 1 3 3 2 4 28 3 1 1 2 1 0 0 1 0 0 0 0 0 1 1 + 1 15 2 40 1 3 2 2 2 29 3 1 1 2 1 1 0 1 0 0 1 0 0 1 2 + 4 9 2 36 2 3 3 2 1 26 3 1 2 1 2 1 0 0 0 1 0 0 0 1 1 + 4 24 4 58 4 3 3 2 1 27 3 2 1 1 1 0 1 1 0 0 1 0 0 1 1 + 4 18 3 22 1 3 4 2 3 28 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 1 24 2 24 1 2 2 4 1 35 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 27 4 45 4 2 3 2 1 32 2 2 2 2 1 0 0 1 0 0 1 0 1 0 1 + 4 10 2 22 1 3 3 2 1 25 1 1 1 1 1 0 0 1 0 1 0 0 1 0 2 + 4 15 2 22 3 3 2 4 3 20 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 1 18 2 24 1 2 2 1 3 27 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 12 4 33 1 5 3 4 2 42 2 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 36 2 74 5 5 3 2 2 37 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 2 7 1 5 2 4 2 24 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 4 36 3 77 3 4 2 4 3 40 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 3 6 4 13 1 5 3 4 1 46 3 2 2 1 2 1 0 1 0 0 1 0 0 1 1 + 1 24 4 14 2 4 3 1 1 26 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 15 2 9 5 2 2 1 1 24 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 12 2 36 1 3 3 2 2 29 3 1 2 1 1 0 0 0 1 0 1 0 1 0 1 + 2 11 4 13 4 3 2 4 3 40 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 18 1 19 1 2 3 4 4 36 1 1 1 2 1 0 0 0 1 0 0 0 0 0 1 + 4 36 2 36 1 5 3 2 3 28 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 9 2 14 1 2 3 2 4 27 3 1 1 2 1 1 0 1 0 0 0 0 0 0 2 + 4 30 4 67 5 4 3 3 2 36 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 2 78 1 4 3 3 3 38 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 4 24 2 93 5 3 1 4 4 48 3 1 1 2 1 0 1 1 0 0 0 0 0 1 1 + 2 30 4 22 5 5 3 4 1 36 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 18 4 11 1 1 2 4 3 65 3 2 1 1 1 0 0 1 0 0 1 1 0 0 1 + 2 24 2 41 1 4 1 3 3 43 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 1 12 2 8 1 2 2 4 2 53 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 24 4 28 5 4 3 3 4 34 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 + 2 48 2 157 1 3 3 2 3 23 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 36 4 66 1 5 3 4 3 34 3 2 1 2 1 1 0 1 0 0 1 0 0 0 1 + 4 28 1 78 5 2 3 4 1 40 1 2 2 2 1 0 1 0 0 1 0 0 0 1 1 + 1 27 4 24 1 5 3 4 3 43 2 4 2 2 1 0 0 1 0 0 1 0 0 0 1 + 4 15 4 18 1 5 3 4 3 46 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 12 4 22 1 3 3 4 2 38 1 2 1 1 2 1 0 1 0 0 1 0 1 0 1 + 2 36 4 58 1 3 3 4 3 34 3 2 1 2 1 0 1 1 0 0 1 0 0 1 1 + 4 18 4 12 5 3 3 3 2 29 3 2 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 36 3 89 5 4 3 2 3 31 2 1 2 2 1 0 1 1 0 0 1 0 0 0 1 + 1 21 2 26 1 2 2 4 2 28 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 + 4 12 4 16 4 4 2 2 2 35 3 1 1 1 2 0 0 1 0 0 1 0 0 1 1 + 4 15 2 22 5 4 2 4 1 33 1 1 1 1 1 0 0 1 0 1 0 0 1 0 1 + 1 18 2 42 1 3 3 3 3 42 3 1 1 1 1 0 0 0 1 0 1 0 0 1 2 + 1 16 4 26 1 5 3 4 2 43 1 1 1 2 1 1 0 0 0 1 0 0 0 1 2 + 4 20 4 35 5 2 1 4 1 44 3 2 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 36 4 105 5 5 3 4 4 42 3 2 1 1 1 0 1 1 0 0 0 0 0 1 1 + 4 15 2 14 5 3 4 2 1 40 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 4 24 2 13 1 5 3 1 1 36 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 1 12 2 11 1 3 3 2 1 20 3 1 2 2 1 0 0 1 0 1 0 0 0 0 1 + 1 21 2 38 5 4 3 2 1 24 3 1 1 1 2 1 0 0 1 0 1 0 1 0 1 + 2 36 2 37 5 3 4 2 3 27 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 15 3 36 1 2 2 2 2 46 3 2 1 1 1 0 1 1 0 0 1 0 1 0 1 + 2 9 2 32 5 3 2 2 1 33 3 1 1 1 1 1 0 1 0 0 1 0 1 0 1 + 4 36 3 45 1 3 2 4 1 34 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 24 4 47 1 2 2 4 3 25 1 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 2 30 2 30 5 5 2 4 3 25 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 11 2 21 4 5 1 2 1 28 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 24 1 32 1 3 3 2 2 31 3 1 1 2 1 0 0 1 0 1 0 0 0 1 2 + 2 48 0 184 1 3 2 2 2 32 1 1 1 2 2 0 0 1 0 0 1 0 0 0 2 + 4 10 2 28 2 3 3 2 1 32 3 1 2 1 1 0 1 0 1 0 1 0 0 1 1 + 1 6 2 149 1 5 3 4 4 68 1 1 1 2 1 1 0 1 0 0 1 0 0 0 2 + 1 24 2 24 2 1 1 1 2 33 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 1 24 2 33 1 5 3 2 2 39 3 1 1 2 1 0 0 1 0 1 0 0 0 0 2 + 4 18 4 18 1 3 2 2 4 28 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 48 3 127 3 4 3 1 3 37 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 1 9 2 14 1 2 2 4 2 22 3 1 1 1 1 0 0 1 0 1 0 0 0 1 2 + 2 12 2 20 1 4 3 4 2 30 3 1 2 2 1 1 0 1 0 1 0 0 0 1 1 + 1 24 1 69 1 2 1 1 2 55 1 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 1 12 1 7 1 2 3 2 3 46 1 2 1 2 1 1 0 1 0 0 1 0 0 1 2 + 1 18 4 10 1 2 2 4 2 21 3 1 1 1 1 0 0 1 0 1 0 0 0 1 1 + 1 48 2 103 1 4 3 4 4 39 2 3 2 2 1 0 1 1 0 0 0 0 0 1 2 + 4 30 2 19 5 5 3 4 3 58 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 1 12 3 13 1 3 3 2 1 43 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 + 1 24 2 17 1 2 3 1 2 24 3 1 1 1 2 0 0 0 1 0 1 0 1 0 1 + 2 9 2 17 1 2 2 2 3 22 3 1 1 2 1 0 0 1 0 0 1 0 0 1 2 + 4 9 4 12 1 3 3 1 1 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 12 4 5 3 5 3 4 2 42 3 2 2 2 1 0 0 1 0 0 1 0 0 1 1 + 1 12 2 15 1 3 2 1 3 23 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 30 3 19 2 2 3 3 4 30 2 2 1 1 1 0 0 1 0 0 1 0 0 0 2 + 3 9 2 7 1 3 2 2 1 28 3 1 1 1 1 0 0 1 0 0 1 0 1 0 2 + 2 6 2 21 1 2 4 3 3 30 3 1 1 2 1 0 0 1 0 1 0 0 0 0 1 + 2 60 2 63 1 3 3 4 4 42 3 1 1 1 1 0 0 1 0 0 0 0 0 1 2 + 4 24 4 68 5 3 3 4 2 46 3 2 2 2 1 0 1 1 0 0 1 0 0 0 1 + 4 12 2 35 5 2 3 3 2 45 3 1 2 2 1 1 0 1 0 0 1 0 0 0 1 + 4 10 2 15 1 3 3 2 1 31 3 1 2 1 2 1 0 1 0 0 1 0 1 0 1 + 4 24 2 9 5 4 3 2 3 31 2 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 4 4 4 15 1 4 3 1 1 42 3 3 2 1 1 1 0 1 0 0 1 0 1 0 1 + 1 15 2 18 1 2 2 1 2 46 3 1 1 1 1 0 0 0 0 1 0 0 0 1 1 + 2 48 0 84 3 2 2 1 3 30 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 1 24 1 33 3 2 3 4 4 30 3 1 2 2 1 0 0 1 0 0 0 0 0 1 2 + 4 12 2 29 5 1 3 4 4 38 3 1 1 2 1 1 0 1 0 0 1 0 0 0 1 + 4 18 2 15 1 2 4 1 2 43 3 1 2 1 1 0 0 0 1 0 1 0 1 0 2 + 4 24 2 36 2 5 3 4 3 31 3 2 1 1 1 0 0 1 0 0 1 0 0 1 2 + 2 18 4 36 1 1 4 3 3 40 3 3 2 2 1 0 0 1 0 0 1 1 0 0 1 + 1 36 3 21 1 4 3 1 3 24 3 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 2 24 2 41 3 2 2 4 3 28 3 1 1 1 1 0 1 1 0 1 0 0 0 1 2 + 4 36 2 110 1 1 2 2 3 26 3 2 1 2 1 0 0 1 0 0 1 0 0 0 2 + 1 12 2 19 1 3 2 4 2 29 3 1 1 2 1 1 0 0 0 0 1 0 0 1 1 + 1 24 4 12 4 5 2 4 2 57 3 2 1 2 1 0 0 1 0 1 0 0 0 0 1 + 3 30 4 37 5 5 3 4 2 49 2 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 9 4 12 1 5 3 4 1 37 3 3 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 28 2 40 1 3 3 2 3 45 3 1 1 1 1 1 0 1 0 0 1 0 1 0 2 + 2 24 2 31 2 5 3 4 4 30 3 1 1 1 1 0 0 1 0 0 0 0 0 1 1 + 4 6 4 17 1 5 4 2 1 30 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 + 2 21 3 24 1 3 1 4 2 47 3 2 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 15 2 36 5 3 3 2 4 29 3 1 1 1 1 1 0 1 0 0 1 0 0 1 1 + 4 24 2 24 3 5 3 2 3 35 1 2 1 2 1 0 0 1 0 0 1 0 0 1 2 + 2 6 2 5 1 2 4 1 2 22 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 30 2 17 5 3 2 1 3 26 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 2 27 4 25 3 3 3 2 2 23 3 2 1 1 1 0 0 1 0 0 1 0 1 0 2 + 4 15 2 36 1 5 2 2 3 54 1 1 1 2 1 0 0 1 0 1 0 0 0 0 1 + 4 42 2 72 5 4 4 4 2 29 3 1 1 2 1 0 0 1 0 1 0 0 0 1 1 + 1 11 4 39 1 3 3 2 1 40 3 2 2 1 1 1 0 1 0 0 1 0 1 0 1 + 2 15 2 15 2 3 3 2 1 22 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 + 4 24 2 74 1 3 3 4 2 43 3 1 2 1 1 1 0 1 0 0 1 0 1 0 1 + 1 24 1 12 1 1 2 4 4 29 3 2 1 1 1 1 0 0 1 1 0 1 0 0 2 + 1 60 2 73 1 5 3 4 4 36 3 1 1 1 1 0 0 0 1 1 0 0 0 1 2 + 4 30 4 28 1 3 2 2 3 33 3 1 1 2 1 0 0 1 0 0 1 0 0 1 1 + 3 24 2 13 3 3 2 3 3 57 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 2 6 2 8 1 3 2 3 1 64 3 1 1 1 1 0 0 0 0 0 1 0 0 1 1 + 2 18 3 24 5 5 3 2 2 42 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 24 3 25 1 5 3 4 3 47 3 2 2 1 1 1 0 1 0 0 1 0 1 0 2 + 2 15 1 13 2 3 4 2 2 25 3 1 1 1 1 1 0 1 0 1 0 0 0 1 2 + 2 30 4 84 1 4 3 2 2 49 3 1 1 1 1 0 0 1 0 0 1 0 0 1 2 + 4 48 2 48 1 1 3 2 3 33 1 1 1 2 1 0 0 1 0 1 0 0 0 0 2 + 3 21 2 29 2 3 2 1 3 28 1 1 1 2 1 1 0 1 0 0 1 0 0 0 1 + 1 36 2 82 1 3 3 2 2 26 3 1 2 1 1 0 1 1 0 0 1 0 0 1 2 + 4 24 4 20 1 4 3 2 2 30 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 15 4 14 1 3 2 3 2 25 3 2 1 1 1 0 0 1 0 1 0 0 0 1 1 + 3 42 0 63 1 2 1 1 2 33 3 2 1 1 1 0 0 1 0 0 1 0 0 1 1 + 4 13 2 14 2 1 2 4 1 64 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 24 2 66 1 1 3 2 4 29 3 1 1 2 1 0 1 1 0 0 0 0 0 0 1 + 2 24 4 17 1 5 3 2 2 48 3 2 1 1 1 0 0 1 0 0 1 0 1 0 1 + 4 12 4 36 5 2 3 1 2 37 3 2 2 1 1 0 0 1 0 0 1 0 1 0 1 + 4 15 1 16 2 5 3 4 3 34 1 1 2 1 1 0 0 1 0 0 1 0 1 0 1 + 1 18 2 19 5 4 4 4 3 23 3 2 1 1 1 0 0 1 0 1 0 0 1 0 1 + 1 36 2 40 1 1 3 3 2 30 3 1 1 2 1 0 0 1 0 0 1 0 0 0 1 + 4 12 2 24 5 5 3 3 3 50 3 1 1 2 1 1 0 1 0 0 1 0 0 1 1 + 4 12 2 17 1 4 2 4 1 31 3 1 1 1 1 0 0 1 0 0 1 0 1 0 1 + 1 30 2 39 1 3 1 4 2 40 3 1 1 2 1 0 1 1 0 0 1 0 0 0 1 + 4 12 2 8 1 5 3 4 3 38 3 1 1 1 1 0 0 1 0 0 1 0 0 1 1 + 1 45 2 18 1 3 3 4 4 23 3 1 1 2 1 0 0 1 0 0 0 0 0 1 2 + 2 45 4 46 2 1 3 4 3 27 3 1 1 1 1 0 1 1 0 0 1 0 0 1 1 diff --git a/benchmarks/models/sv_nuts.jl b/benchmarks/models/sv_nuts.jl index c54dade69..697625dce 100644 --- a/benchmarks/models/sv_nuts.jl +++ b/benchmarks/models/sv_nuts.jl @@ -6,26 +6,25 @@ if !haskey(BenchmarkSuite, "nuts") end fname = joinpath(dirname(@__FILE__), "sv_nuts.data") -y, header = readdlm(fname, ',', header=true) +y, header = readdlm(fname, ','; header=true) # Stochastic volatility (SV) @model function sv_nuts(y, dy, ::Type{T}=Vector{Float64}) where {T} - N = size(y,1) + N = size(y, 1) - τ ~ Exponential(1/100) - ν ~ Exponential(1/100) + τ ~ Exponential(1 / 100) + ν ~ Exponential(1 / 100) s = T(undef, N) - s[1] ~ Exponential(1/100) + s[1] ~ Exponential(1 / 100) for n in 2:N - s[n] ~ Normal(log(s[n-1]), τ) + s[n] ~ Normal(log(s[n - 1]), τ) s[n] = exp(s[n]) - dy = log(y[n] / y[n-1]) / s[n] - dy ~ TDist(ν) + dy = log(y[n] / y[n - 1]) / s[n] + dy ~ TDist(ν) end end - # Sampling parameter settings n_samples = 10_000 diff --git a/docs/README.md b/docs/README.md index 2bc2ad683..7a1ad9119 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,5 +1,5 @@ -Turing's documentation in this directory is in markdown format. +Turing's documentation in this directory is in markdown format. If you want to build the doc locally, please refer to the [README](https://github.com/TuringLang/turinglang.github.io) file in [turinglang.github.io](https://github.com/TuringLang/turinglang.github.io). -Please also visit [this repo](https://github.com/TuringLang/TuringTutorials/tree/master/tutorials) for the docs. +Please also visit [this repo](https://github.com/TuringLang/TuringTutorials/tree/master/tutorials) for the docs. diff --git a/docs/src/library/advancedhmc.md b/docs/src/library/advancedhmc.md index 5742e4cff..84f712f4d 100644 --- a/docs/src/library/advancedhmc.md +++ b/docs/src/library/advancedhmc.md @@ -22,4 +22,4 @@ Order = [:function] ```@autodocs Modules = [AdvancedHMC] Order = [:type] -``` \ No newline at end of file +``` diff --git a/docs/src/library/api.md b/docs/src/library/api.md index 53a946ceb..c598820b7 100644 --- a/docs/src/library/api.md +++ b/docs/src/library/api.md @@ -7,6 +7,7 @@ toc: true ```@meta CurrentModule = Turing ``` + ## Index ```@index diff --git a/docs/src/library/bijectors.md b/docs/src/library/bijectors.md index 9fb4eaecf..471da45fe 100644 --- a/docs/src/library/bijectors.md +++ b/docs/src/library/bijectors.md @@ -22,4 +22,4 @@ Order = [:function] ```@autodocs Modules = [Bijectors] Order = [:type] -``` \ No newline at end of file +``` diff --git a/ext/TuringDynamicHMCExt.jl b/ext/TuringDynamicHMCExt.jl index 27bca72e1..c82f237c0 100644 --- a/ext/TuringDynamicHMCExt.jl +++ b/ext/TuringDynamicHMCExt.jl @@ -3,9 +3,8 @@ module TuringDynamicHMCExt ### DynamicHMC backend - https://github.com/tpapp/DynamicHMC.jl ### - if isdefined(Base, :get_extension) - import DynamicHMC + using DynamicHMC: DynamicHMC using Turing using Turing: AbstractMCMC, Random, LogDensityProblems, DynamicPPL using Turing.Inference: ADTypes, LogDensityProblemsAD, TYPEDFIELDS @@ -13,7 +12,7 @@ else import ..DynamicHMC using ..Turing using ..Turing: AbstractMCMC, Random, LogDensityProblems, DynamicPPL - using ..Turing.Inference: ADTypes, LogDensityProblemsAD, TYPEDFIELDS + using ..Turing.Inference: ADTypes, LogDensityProblemsAD, TYPEDFIELDS end """ @@ -25,22 +24,22 @@ To use it, make sure you have DynamicHMC package (version >= 2) loaded: ```julia using DynamicHMC ``` -""" +""" struct DynamicNUTS{AD,space,T<:DynamicHMC.NUTS} <: Turing.Inference.Hamiltonian sampler::T adtype::AD end function DynamicNUTS( - spl::DynamicHMC.NUTS = DynamicHMC.NUTS(), - space::Tuple = (); - adtype::ADTypes.AbstractADType = Turing.DEFAULT_ADTYPE + spl::DynamicHMC.NUTS=DynamicHMC.NUTS(), + space::Tuple=(); + adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) return DynamicNUTS{typeof(adtype),space,typeof(spl)}(spl, adtype) end Turing.externalsampler(spl::DynamicHMC.NUTS) = DynamicNUTS(spl) -DynamicPPL.getspace(::DynamicNUTS{<:Any, space}) where {space} = space +DynamicPPL.getspace(::DynamicNUTS{<:Any,space}) where {space} = space """ DynamicNUTSState @@ -59,14 +58,16 @@ struct DynamicNUTSState{L,V<:DynamicPPL.AbstractVarInfo,C,M,S} stepsize::S end -DynamicPPL.initialsampler(::DynamicPPL.Sampler{<:DynamicNUTS}) = DynamicPPL.SampleFromUniform() +function DynamicPPL.initialsampler(::DynamicPPL.Sampler{<:DynamicNUTS}) + return DynamicPPL.SampleFromUniform() +end function DynamicPPL.initialstep( rng::Random.AbstractRNG, model::DynamicPPL.Model, spl::DynamicPPL.Sampler{<:DynamicNUTS}, vi::DynamicPPL.AbstractVarInfo; - kwargs... + kwargs..., ) # Ensure that initial sample is in unconstrained space. if !DynamicPPL.islinked(vi, spl) @@ -75,15 +76,13 @@ function DynamicPPL.initialstep( end # Define log-density function. - ℓ = LogDensityProblemsAD.ADgradient(Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext())) + ℓ = LogDensityProblemsAD.ADgradient( + Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext()) + ) # Perform initial step. results = DynamicHMC.mcmc_keep_warmup( - rng, - ℓ, - 0; - initialization = (q = vi[spl],), - reporter = DynamicHMC.NoProgressReport(), + rng, ℓ, 0; initialization=(q=vi[spl],), reporter=DynamicHMC.NoProgressReport() ) steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state) Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q) @@ -104,18 +103,12 @@ function AbstractMCMC.step( model::DynamicPPL.Model, spl::DynamicPPL.Sampler{<:DynamicNUTS}, state::DynamicNUTSState; - kwargs... + kwargs..., ) # Compute next sample. vi = state.vi ℓ = state.logdensity - steps = DynamicHMC.mcmc_steps( - rng, - spl.alg.sampler, - state.metric, - ℓ, - state.stepsize, - ) + steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize) Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache) # Update the variables. diff --git a/ext/TuringOptimExt.jl b/ext/TuringOptimExt.jl index c30078024..3d31af6f2 100644 --- a/ext/TuringOptimExt.jl +++ b/ext/TuringOptimExt.jl @@ -1,106 +1,15 @@ module TuringOptimExt if isdefined(Base, :get_extension) - import Turing - import Turing: Distributions, DynamicPPL, ForwardDiff, NamedArrays, Printf, Accessors, Statistics, StatsAPI, StatsBase - import Optim + using Turing: Turing + import Turing: DynamicPPL, NamedArrays, Accessors, Optimisation + using Optim: Optim else import ..Turing - import ..Turing: Distributions, DynamicPPL, ForwardDiff, NamedArrays, Printf, Accessors, Statistics, StatsAPI, StatsBase + import ..Turing: DynamicPPL, NamedArrays, Accessors, Optimisation import ..Optim end -""" - ModeResult{ - V<:NamedArrays.NamedArray, - M<:NamedArrays.NamedArray, - O<:Optim.MultivariateOptimizationResults, - S<:NamedArrays.NamedArray - } - -A wrapper struct to store various results from a MAP or MLE estimation. -""" -struct ModeResult{ - V<:NamedArrays.NamedArray, - O<:Optim.MultivariateOptimizationResults, - M<:Turing.OptimLogDensity -} <: StatsBase.StatisticalModel - "A vector with the resulting point estimates." - values::V - "The stored Optim.jl results." - optim_result::O - "The final log likelihood or log joint, depending on whether `MAP` or `MLE` was run." - lp::Float64 - "The evaluation function used to calculate the output." - f::M -end -############################# -# Various StatsBase methods # -############################# - - - -function Base.show(io::IO, ::MIME"text/plain", m::ModeResult) - print(io, "ModeResult with maximized lp of ") - Printf.@printf(io, "%.2f", m.lp) - println(io) - show(io, m.values) -end - -function Base.show(io::IO, m::ModeResult) - show(io, m.values.array) -end - -function StatsBase.coeftable(m::ModeResult; level::Real=0.95) - # Get columns for coeftable. - terms = string.(StatsBase.coefnames(m)) - estimates = m.values.array[:, 1] - stderrors = StatsBase.stderror(m) - zscore = estimates ./ stderrors - p = map(z -> StatsAPI.pvalue(Distributions.Normal(), z; tail=:both), zscore) - - # Confidence interval (CI) - q = Statistics.quantile(Distributions.Normal(), (1 + level) / 2) - ci_low = estimates .- q .* stderrors - ci_high = estimates .+ q .* stderrors - - level_ = 100*level - level_percentage = isinteger(level_) ? Int(level_) : level_ - - StatsBase.CoefTable( - [estimates, stderrors, zscore, p, ci_low, ci_high], - ["Coef.", "Std. Error", "z", "Pr(>|z|)", "Lower $(level_percentage)%", "Upper $(level_percentage)%"], - terms) -end - -function StatsBase.informationmatrix(m::ModeResult; hessian_function=ForwardDiff.hessian, kwargs...) - # Calculate Hessian and information matrix. - - # Convert the values to their unconstrained states to make sure the - # Hessian is computed with respect to the untransformed parameters. - linked = DynamicPPL.istrans(m.f.varinfo) - if linked - m = Accessors.@set m.f.varinfo = DynamicPPL.invlink!!(m.f.varinfo, m.f.model) - end - - # Calculate the Hessian, which is the information matrix because the negative of the log likelihood was optimized - varnames = StatsBase.coefnames(m) - info = hessian_function(m.f, m.values.array[:, 1]) - - # Link it back if we invlinked it. - if linked - m = Accessors.@set m.f.varinfo = DynamicPPL.link!!(m.f.varinfo, m.f.model) - end - - return NamedArrays.NamedArray(info, (varnames, varnames)) -end - -StatsBase.coef(m::ModeResult) = m.values -StatsBase.coefnames(m::ModeResult) = names(m.values)[1] -StatsBase.params(m::ModeResult) = StatsBase.coefnames(m) -StatsBase.vcov(m::ModeResult) = inv(StatsBase.informationmatrix(m)) -StatsBase.loglikelihood(m::ModeResult) = m.lp - #################### # Optim.jl methods # #################### @@ -125,37 +34,54 @@ mle = optimize(model, MLE()) mle = optimize(model, MLE(), NelderMead()) ``` """ -function Optim.optimize(model::DynamicPPL.Model, ::Turing.MLE, options::Optim.Options=Optim.Options(); kwargs...) - ctx = Turing.OptimizationContext(DynamicPPL.LikelihoodContext()) - f = Turing.OptimLogDensity(model, ctx) +function Optim.optimize( + model::DynamicPPL.Model, + ::Optimisation.MLE, + options::Optim.Options=Optim.Options(); + kwargs..., +) + ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext()) + f = Optimisation.OptimLogDensity(model, ctx) init_vals = DynamicPPL.getparams(f) optimizer = Optim.LBFGS() return _mle_optimize(model, init_vals, optimizer, options; kwargs...) end -function Optim.optimize(model::DynamicPPL.Model, ::Turing.MLE, init_vals::AbstractArray, options::Optim.Options=Optim.Options(); kwargs...) +function Optim.optimize( + model::DynamicPPL.Model, + ::Optimisation.MLE, + init_vals::AbstractArray, + options::Optim.Options=Optim.Options(); + kwargs..., +) optimizer = Optim.LBFGS() return _mle_optimize(model, init_vals, optimizer, options; kwargs...) end -function Optim.optimize(model::DynamicPPL.Model, ::Turing.MLE, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); kwargs...) - ctx = Turing.OptimizationContext(DynamicPPL.LikelihoodContext()) - f = Turing.OptimLogDensity(model, ctx) +function Optim.optimize( + model::DynamicPPL.Model, + ::Optimisation.MLE, + optimizer::Optim.AbstractOptimizer, + options::Optim.Options=Optim.Options(); + kwargs..., +) + ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext()) + f = Optimisation.OptimLogDensity(model, ctx) init_vals = DynamicPPL.getparams(f) return _mle_optimize(model, init_vals, optimizer, options; kwargs...) end function Optim.optimize( model::DynamicPPL.Model, - ::Turing.MLE, + ::Optimisation.MLE, init_vals::AbstractArray, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); - kwargs... + kwargs..., ) return _mle_optimize(model, init_vals, optimizer, options; kwargs...) end function _mle_optimize(model::DynamicPPL.Model, args...; kwargs...) - ctx = Turing.OptimizationContext(DynamicPPL.LikelihoodContext()) - return _optimize(model, Turing.OptimLogDensity(model, ctx), args...; kwargs...) + ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext()) + return _optimize(model, Optimisation.OptimLogDensity(model, ctx), args...; kwargs...) end """ @@ -178,37 +104,54 @@ map_est = optimize(model, MAP()) map_est = optimize(model, MAP(), NelderMead()) ``` """ -function Optim.optimize(model::DynamicPPL.Model, ::Turing.MAP, options::Optim.Options=Optim.Options(); kwargs...) - ctx = Turing.OptimizationContext(DynamicPPL.DefaultContext()) - f = Turing.OptimLogDensity(model, ctx) +function Optim.optimize( + model::DynamicPPL.Model, + ::Optimisation.MAP, + options::Optim.Options=Optim.Options(); + kwargs..., +) + ctx = Optimisation.OptimizationContext(DynamicPPL.DefaultContext()) + f = Optimisation.OptimLogDensity(model, ctx) init_vals = DynamicPPL.getparams(f) optimizer = Optim.LBFGS() return _map_optimize(model, init_vals, optimizer, options; kwargs...) end -function Optim.optimize(model::DynamicPPL.Model, ::Turing.MAP, init_vals::AbstractArray, options::Optim.Options=Optim.Options(); kwargs...) +function Optim.optimize( + model::DynamicPPL.Model, + ::Optimisation.MAP, + init_vals::AbstractArray, + options::Optim.Options=Optim.Options(); + kwargs..., +) optimizer = Optim.LBFGS() return _map_optimize(model, init_vals, optimizer, options; kwargs...) end -function Optim.optimize(model::DynamicPPL.Model, ::Turing.MAP, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); kwargs...) - ctx = Turing.OptimizationContext(DynamicPPL.DefaultContext()) - f = Turing.OptimLogDensity(model, ctx) +function Optim.optimize( + model::DynamicPPL.Model, + ::Optimisation.MAP, + optimizer::Optim.AbstractOptimizer, + options::Optim.Options=Optim.Options(); + kwargs..., +) + ctx = Optimisation.OptimizationContext(DynamicPPL.DefaultContext()) + f = Optimisation.OptimLogDensity(model, ctx) init_vals = DynamicPPL.getparams(f) return _map_optimize(model, init_vals, optimizer, options; kwargs...) end function Optim.optimize( model::DynamicPPL.Model, - ::Turing.MAP, + ::Optimisation.MAP, init_vals::AbstractArray, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); - kwargs... + kwargs..., ) return _map_optimize(model, init_vals, optimizer, options; kwargs...) end function _map_optimize(model::DynamicPPL.Model, args...; kwargs...) - ctx = Turing.OptimizationContext(DynamicPPL.DefaultContext()) - return _optimize(model, Turing.OptimLogDensity(model, ctx), args...; kwargs...) + ctx = Optimisation.OptimizationContext(DynamicPPL.DefaultContext()) + return _optimize(model, Optimisation.OptimLogDensity(model, ctx), args...; kwargs...) end """ @@ -218,12 +161,12 @@ Estimate a mode, i.e., compute a MLE or MAP estimate. """ function _optimize( model::DynamicPPL.Model, - f::Turing.OptimLogDensity, + f::Optimisation.OptimLogDensity, init_vals::AbstractArray=DynamicPPL.getparams(f), optimizer::Optim.AbstractOptimizer=Optim.LBFGS(), options::Optim.Options=Optim.Options(), args...; - kwargs... + kwargs..., ) # Convert the initial values, since it is assumed that users provide them # in the constrained space. @@ -236,25 +179,19 @@ function _optimize( # Warn the user if the optimization did not converge. if !Optim.converged(M) - @warn "Optimization did not converge! You may need to correct your model or adjust the Optim parameters." + @warn """ + Optimization did not converge! You may need to correct your model or adjust the + Optim parameters. + """ end - # Get the VarInfo at the MLE/MAP point, and run the model to ensure - # correct dimensionality. + # Get the optimum in unconstrained space. `getparams` does the invlinking. f = Accessors.@set f.varinfo = DynamicPPL.unflatten(f.varinfo, M.minimizer) - f = Accessors.@set f.varinfo = DynamicPPL.invlink(f.varinfo, model) - vals = DynamicPPL.getparams(f) - f = Accessors.@set f.varinfo = DynamicPPL.link(f.varinfo, model) - - # Make one transition to get the parameter names. vns_vals_iter = Turing.Inference.getparams(model, f.varinfo) varnames = map(Symbol ∘ first, vns_vals_iter) vals = map(last, vns_vals_iter) - - # Store the parameters and their names in an array. vmat = NamedArrays.NamedArray(vals, varnames) - - return ModeResult(vmat, M, -M.minimum, f) + return Optimisation.ModeResult(vmat, M, -M.minimum, f) end end # module diff --git a/src/Turing.jl b/src/Turing.jl index 5ef60aca5..07cdac75d 100644 --- a/src/Turing.jl +++ b/src/Turing.jl @@ -5,20 +5,21 @@ using DistributionsAD, Bijectors, StatsFuns, SpecialFunctions using Statistics, LinearAlgebra using Libtask @reexport using Distributions, MCMCChains, Libtask, AbstractMCMC, Bijectors +using Compat: pkgversion -import AdvancedVI +using AdvancedVI: AdvancedVI using DynamicPPL: DynamicPPL, LogDensityFunction import DynamicPPL: getspace, NoDist, NamedDist -import LogDensityProblems -import NamedArrays -import Accessors -import StatsAPI -import StatsBase +using LogDensityProblems: LogDensityProblems +using NamedArrays: NamedArrays +using Accessors: Accessors +using StatsAPI: StatsAPI +using StatsBase: StatsBase using Accessors: Accessors -import Printf -import Random +using Printf: Printf +using Random: Random using ADTypes: ADTypes @@ -61,91 +62,85 @@ include("deprecated.jl") # to be removed in the next minor version release # Exports # ########### # `using` statements for stuff to re-export -using DynamicPPL: pointwise_loglikelihoods, generated_quantities, logprior, logjoint, condition, decondition, fix, unfix, conditioned +using DynamicPPL: + pointwise_loglikelihoods, + generated_quantities, + logprior, + logjoint, + condition, + decondition, + fix, + unfix, + conditioned using StatsBase: predict using Bijectors: ordered using OrderedCollections: OrderedDict # Turing essentials - modelling macros and inference algorithms -export @model, # modelling - @varname, - @submodel, - DynamicPPL, - - Prior, # Sampling from the prior - - MH, # classic sampling - Emcee, - ESS, - Gibbs, - GibbsConditional, - - HMC, # Hamiltonian-like sampling - SGLD, - SGHMC, - HMCDA, - NUTS, - DynamicNUTS, - ANUTS, - - PolynomialStepsize, - - IS, # particle-based sampling - SMC, - CSMC, - PG, - - vi, # variational inference - ADVI, - - sample, # inference - @logprob_str, - @prob_str, - externalsampler, - - AutoForwardDiff, # ADTypes - AutoReverseDiff, - AutoZygote, - AutoEnzyme, - AutoTracker, - AutoTapir, - - setprogress!, # debugging - - Flat, - FlatPos, - BinomialLogit, - BernoulliLogit, # Part of Distributions >= 0.25.77 - OrderedLogistic, - LogPoisson, - filldist, - arraydist, - - NamedDist, # Exports from DynamicPPL - predict, - pointwise_loglikelihoods, - elementwise_loglikelihoods, - generated_quantities, - logprior, - logjoint, - LogDensityFunction, - - condition, - decondition, - fix, - unfix, - conditioned, - OrderedDict, - - ordered, # Exports from Bijectors - - constrained_space, # optimisation interface - MAP, - MLE, - get_parameter_bounds, - optim_objective, - optim_function, - optim_problem +export @model, # modelling + @varname, + @submodel, + DynamicPPL, + Prior, # Sampling from the prior + MH, # classic sampling + Emcee, + ESS, + Gibbs, + GibbsConditional, + HMC, # Hamiltonian-like sampling + SGLD, + SGHMC, + HMCDA, + NUTS, + PolynomialStepsize, + IS, # particle-based sampling + SMC, + CSMC, + PG, + vi, # variational inference + ADVI, + sample, # inference + @logprob_str, + @prob_str, + externalsampler, + AutoForwardDiff, # ADTypes + AutoReverseDiff, + AutoZygote, + AutoEnzyme, + AutoTracker, + setprogress!, # debugging + Flat, + FlatPos, + BinomialLogit, + BernoulliLogit, # Part of Distributions >= 0.25.77 + OrderedLogistic, + LogPoisson, + filldist, + arraydist, + NamedDist, # Exports from DynamicPPL + predict, + pointwise_loglikelihoods, + generated_quantities, + logprior, + logjoint, + LogDensityFunction, + condition, + decondition, + fix, + unfix, + conditioned, + OrderedDict, + ordered, # Exports from Bijectors + maximum_a_posteriori, + maximum_likelihood, + # The MAP and MLE exports are only needed for the Optim.jl interface. + MAP, + MLE + +# AutoTapir is only supported by ADTypes v1.0 and above. +@static if VERSION >= v"1.10" && pkgversion(ADTypes) >= v"1" + export AutoTapir +end if !isdefined(Base, :get_extension) using Requires @@ -153,9 +148,13 @@ end function __init__() @static if !isdefined(Base, :get_extension) - @require Optim="429524aa-4258-5aef-a3af-852621145aeb" include("../ext/TuringOptimExt.jl") - @require DynamicHMC="bbc10e6e-7c05-544b-b16e-64fede858acb" include("../ext/TuringDynamicHMCExt.jl") - end + @require Optim = "429524aa-4258-5aef-a3af-852621145aeb" include( + "../ext/TuringOptimExt.jl" + ) + @require DynamicHMC = "bbc10e6e-7c05-544b-b16e-64fede858acb" include( + "../ext/TuringDynamicHMCExt.jl" + ) + end end end diff --git a/src/deprecated.jl b/src/deprecated.jl index 76f5854cf..34305d442 100644 --- a/src/deprecated.jl +++ b/src/deprecated.jl @@ -1,23 +1,39 @@ export setadbackend, setchunksize, setadsafe -function setadbackend(::Union{Symbol, Val}) - Base.depwarn("`ADBACKEND` and `setbackend` are deprecated. Please specify the chunk size directly in the sampler constructor, e.g., `HMC(0.1, 5; adtype=AutoForwardDiff())`.\n This function has no effects.", :setbackend; force=true) - nothing +function setadbackend(::Union{Symbol,Val}) + Base.depwarn( + "`ADBACKEND` and `setbackend` are deprecated. Please specify the chunk size directly in the sampler constructor, e.g., `HMC(0.1, 5; adtype=AutoForwardDiff())`.\n This function has no effects.", + :setbackend; + force=true, + ) + return nothing end function setchunksize(::Int) - Base.depwarn("`CHUNKSIZE` and `setchunksize` are deprecated. Please specify the chunk size directly in the sampler constructor, e.g., `HMC(0.1, 5; adtype=AutoForwardDiff())`.\n This function has no effects.", :setchunksize; force=true) - nothing + Base.depwarn( + "`CHUNKSIZE` and `setchunksize` are deprecated. Please specify the chunk size directly in the sampler constructor, e.g., `HMC(0.1, 5; adtype=AutoForwardDiff())`.\n This function has no effects.", + :setchunksize; + force=true, + ) + return nothing end -function setrdcache(::Union{Bool, Val}) - Base.depwarn("`RDCACHE` and `setrdcache` are deprecated. Please specify if you wish to use compiled tape for ReverseDiff directly in the sampler constructor, e.g., `HMC(0.1, 5; adtype=AutoReverseDiff(false))`.\n This function has no effects.", :setrdcache; force=true) - nothing +function setrdcache(::Union{Bool,Val}) + Base.depwarn( + "`RDCACHE` and `setrdcache` are deprecated. Please specify if you wish to use compiled tape for ReverseDiff directly in the sampler constructor, e.g., `HMC(0.1, 5; adtype=AutoReverseDiff(false))`.\n This function has no effects.", + :setrdcache; + force=true, + ) + return nothing end function setadsafe(::Bool) - Base.depwarn("`ADSAFE` and `setadsafe` are outdated and no longer in use.", :setadsafe; force=true) - nothing + Base.depwarn( + "`ADSAFE` and `setadsafe` are outdated and no longer in use.", + :setadsafe; + force=true, + ) + return nothing end Base.@deprecate_binding Core Essential false diff --git a/src/essential/Essential.jl b/src/essential/Essential.jl index dcad7bfbf..6df98fe94 100644 --- a/src/essential/Essential.jl +++ b/src/essential/Essential.jl @@ -11,37 +11,27 @@ using Bijectors: PDMatDistribution using AdvancedVI using StatsFuns: logsumexp, softmax @reexport using DynamicPPL -using ADTypes: ADTypes, AutoForwardDiff, AutoEnzyme, AutoTracker, AutoReverseDiff, AutoZygote +using ADTypes: + ADTypes, AutoForwardDiff, AutoEnzyme, AutoTracker, AutoReverseDiff, AutoZygote -import AdvancedPS +using AdvancedPS: AdvancedPS include("container.jl") -export @model, - @varname, - generate_observe, - translate_tilde!, - get_vars, - get_data, - get_default_values, - ParticleContainer, - Particle, - Trace, - fork, - forkr, - current_trace, - getweights, - getweight, - effectiveSampleSize, - sweep!, - ResampleWithESSThreshold, - AutoForwardDiff, - AutoEnzyme, - AutoTracker, - AutoZygote, - AutoReverseDiff, - value, - @logprob_str, - @prob_str +export @model, + @varname, + AutoEnzyme, + AutoForwardDiff, + AutoTracker, + AutoZygote, + AutoReverseDiff, + @logprob_str, + @prob_str + +# AutoTapir is only supported by ADTypes v1.0 and above. +@static if VERSION >= v"1.10" && pkgversion(ADTypes) >= v"1" + using ADTypes: AutoTapir + export AutoTapir +end end # module diff --git a/src/essential/container.jl b/src/essential/container.jl index 483270c9f..bd4e21f6b 100644 --- a/src/essential/container.jl +++ b/src/essential/container.jl @@ -1,4 +1,5 @@ -struct TracedModel{S<:AbstractSampler,V<:AbstractVarInfo,M<:Model,E<:Tuple} <: AdvancedPS.AbstractGenericModel +struct TracedModel{S<:AbstractSampler,V<:AbstractVarInfo,M<:Model,E<:Tuple} <: + AdvancedPS.AbstractGenericModel model::M sampler::S varinfo::V @@ -14,26 +15,24 @@ function TracedModel( context = SamplingContext(rng, sampler, DefaultContext()) args, kwargs = DynamicPPL.make_evaluate_args_and_kwargs(model, varinfo, context) if kwargs !== nothing && !isempty(kwargs) - error("Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.") + error( + "Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.", + ) end return TracedModel{AbstractSampler,AbstractVarInfo,Model,Tuple}( - model, - sampler, - varinfo, - (model.f, args...) + model, sampler, varinfo, (model.f, args...) ) end function AdvancedPS.advance!( - trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}, - isref::Bool=false + trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}, isref::Bool=false ) # Make sure we load/reset the rng in the new replaying mechanism DynamicPPL.increment_num_produce!(trace.model.f.varinfo) isref ? AdvancedPS.load_state!(trace.rng) : AdvancedPS.save_state!(trace.rng) score = consume(trace.model.ctask) if score === nothing - return + return nothing else return score + DynamicPPL.getlogp(trace.model.f.varinfo) end @@ -54,7 +53,9 @@ function AdvancedPS.reset_logprob!(trace::TracedModel) return trace end -function AdvancedPS.update_rng!(trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}) +function AdvancedPS.update_rng!( + trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}} +) # Extract the `args`. args = trace.model.ctask.args # From `args`, extract the `SamplingContext`, which contains the RNG. diff --git a/src/mcmc/Inference.jl b/src/mcmc/Inference.jl index 311632a3b..7a0c541da 100644 --- a/src/mcmc/Inference.jl +++ b/src/mcmc/Inference.jl @@ -38,7 +38,6 @@ import StatsBase: predict export InferenceAlgorithm, Hamiltonian, - GibbsComponent, StaticHamiltonian, AdaptiveHamiltonian, SampleFromUniform, @@ -54,7 +53,6 @@ export InferenceAlgorithm, SGHMC, HMCDA, NUTS, # Hamiltonian-like sampling - DynamicNUTS, IS, SMC, CSMC, @@ -318,7 +316,7 @@ Return a named tuple of parameters. getparams(model, t) = t.θ function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo) # NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used. - # Unfortunately, using `invlink` can cause issues in scenarios where the constraints + # Unfortunately, using `invlink` can cause issues in scenarios where the constraints # of the parameters change depending on the realizations. Hence we have to use # `values_as_in_model`, which re-runs the model and extracts the parameters # as they are seen in the model, i.e. in the constrained space. Moreover, diff --git a/src/mcmc/emcee.jl b/src/mcmc/emcee.jl index 49b9aab57..4ebe49ea5 100644 --- a/src/mcmc/emcee.jl +++ b/src/mcmc/emcee.jl @@ -2,7 +2,7 @@ ### Sampler states ### -struct Emcee{space, E<:AMH.Ensemble} <: InferenceAlgorithm +struct Emcee{space,E<:AMH.Ensemble} <: InferenceAlgorithm ensemble::E end @@ -12,7 +12,7 @@ function Emcee(n_walkers::Int, stretch_length=2.0) # ensemble sampling. prop = AMH.StretchProposal(nothing, stretch_length) ensemble = AMH.Ensemble(n_walkers, prop) - return Emcee{(), typeof(ensemble)}(ensemble) + return Emcee{(),typeof(ensemble)}(ensemble) end struct EmceeState{V<:AbstractVarInfo,S} @@ -24,9 +24,9 @@ function AbstractMCMC.step( rng::Random.AbstractRNG, model::Model, spl::Sampler{<:Emcee}; - resume_from = nothing, - initial_params = nothing, - kwargs... + resume_from=nothing, + initial_params=nothing, + kwargs..., ) if resume_from !== nothing state = loadstate(resume_from) @@ -39,9 +39,8 @@ function AbstractMCMC.step( # Update the parameters if provided. if initial_params !== nothing - length(initial_params) == n || throw( - ArgumentError("initial parameters have to be specified for each walker") - ) + length(initial_params) == n || + throw(ArgumentError("initial parameters have to be specified for each walker")) vis = map(vis, initial_params) do vi, init vi = DynamicPPL.initialize_parameters!!(vi, init, spl, model) @@ -59,18 +58,14 @@ function AbstractMCMC.step( map(vis) do vi vi = DynamicPPL.link!!(vi, spl, model) AMH.Transition(vi[spl], getlogp(vi), false) - end + end, ) return transition, state end function AbstractMCMC.step( - rng::AbstractRNG, - model::Model, - spl::Sampler{<:Emcee}, - state::EmceeState; - kwargs... + rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs... ) # Generate a log joint function. vi = state.vi @@ -98,11 +93,11 @@ function AbstractMCMC.bundle_samples( spl::Sampler{<:Emcee}, state::EmceeState, chain_type::Type{MCMCChains.Chains}; - save_state = false, - sort_chain = false, - discard_initial = 0, - thinning = 1, - kwargs... + save_state=false, + sort_chain=false, + discard_initial=0, + thinning=1, + kwargs..., ) # Convert transitions to array format. # Also retrieve the variable names. @@ -134,9 +129,9 @@ function AbstractMCMC.bundle_samples( le = getlogevidence(samples, state, spl) # Set up the info tuple. - info = (varname_to_symbol = OrderedDict(zip(varnames, varnames_symbol)),) + info = (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),) if save_state - info = merge(info, (model = model, sampler = spl, samplerstate = state)) + info = merge(info, (model=model, sampler=spl, samplerstate=state)) end # Concretize the array before giving it to MCMCChains. @@ -146,7 +141,7 @@ function AbstractMCMC.bundle_samples( chain = MCMCChains.Chains( parray, nms, - (internals = extra_params,); + (internals=extra_params,); evidence=le, info=info, start=discard_initial + 1, diff --git a/src/mcmc/ess.jl b/src/mcmc/ess.jl index a12d683f5..2910a7efd 100644 --- a/src/mcmc/ess.jl +++ b/src/mcmc/ess.jl @@ -27,11 +27,7 @@ ESS(space::Symbol) = ESS{(space,)}() # always accept in the first step function DynamicPPL.initialstep( - rng::AbstractRNG, - model::Model, - spl::Sampler{<:ESS}, - vi::AbstractVarInfo; - kwargs... + rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs... ) # Sanity check vns = _getvns(vi, spl) @@ -47,11 +43,7 @@ function DynamicPPL.initialstep( end function AbstractMCMC.step( - rng::AbstractRNG, - model::Model, - spl::Sampler{<:ESS}, - vi::AbstractVarInfo; - kwargs... + rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs... ) # obtain previous sample f = vi[spl] @@ -64,7 +56,8 @@ function AbstractMCMC.step( sample, state = AbstractMCMC.step( rng, EllipticalSliceSampling.ESSModel( - ESSPrior(model, spl, vi), Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext()), + ESSPrior(model, spl, vi), + Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext()), ), EllipticalSliceSampling.ESS(), oldstate, @@ -83,10 +76,10 @@ struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T} sampler::S varinfo::V μ::T - - function ESSPrior{M,S,V}(model::M, sampler::S, varinfo::V) where { - M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo - } + + function ESSPrior{M,S,V}( + model::M, sampler::S, varinfo::V + ) where {M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo} vns = _getvns(varinfo, sampler) μ = mapreduce(vcat, vns[1]) do vn dist = getdist(varinfo, vn) @@ -99,9 +92,7 @@ struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T} end function ESSPrior(model::Model, sampler::Sampler{<:ESS}, varinfo::AbstractVarInfo) - return ESSPrior{typeof(model),typeof(sampler),typeof(varinfo)}( - model, sampler, varinfo, - ) + return ESSPrior{typeof(model),typeof(sampler),typeof(varinfo)}(model, sampler, varinfo) end # Ensure that the prior is a Gaussian distribution (checked in the constructor) @@ -124,7 +115,9 @@ end Distributions.mean(p::ESSPrior) = p.μ # Evaluate log-likelihood of proposals -const ESSLogLikelihood{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo} = Turing.LogDensityFunction{V,M,<:DynamicPPL.SamplingContext{<:S}} +const ESSLogLikelihood{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo} = Turing.LogDensityFunction{ + V,M,<:DynamicPPL.SamplingContext{<:S} +} function (ℓ::ESSLogLikelihood)(f::AbstractVector) sampler = DynamicPPL.getsampler(ℓ) @@ -133,7 +126,9 @@ function (ℓ::ESSLogLikelihood)(f::AbstractVector) return getlogp(varinfo) end -function DynamicPPL.tilde_assume(rng::Random.AbstractRNG, ctx::DefaultContext, sampler::Sampler{<:ESS}, right, vn, vi) +function DynamicPPL.tilde_assume( + rng::Random.AbstractRNG, ctx::DefaultContext, sampler::Sampler{<:ESS}, right, vn, vi +) return if inspace(vn, sampler) DynamicPPL.tilde_assume(rng, LikelihoodContext(), SampleFromPrior(), right, vn, vi) else @@ -141,19 +136,33 @@ function DynamicPPL.tilde_assume(rng::Random.AbstractRNG, ctx::DefaultContext, s end end -function DynamicPPL.tilde_observe(ctx::DefaultContext, sampler::Sampler{<:ESS}, right, left, vi) +function DynamicPPL.tilde_observe( + ctx::DefaultContext, sampler::Sampler{<:ESS}, right, left, vi +) return DynamicPPL.tilde_observe(ctx, SampleFromPrior(), right, left, vi) end -function DynamicPPL.dot_tilde_assume(rng::Random.AbstractRNG, ctx::DefaultContext, sampler::Sampler{<:ESS}, right, left, vns, vi) +function DynamicPPL.dot_tilde_assume( + rng::Random.AbstractRNG, + ctx::DefaultContext, + sampler::Sampler{<:ESS}, + right, + left, + vns, + vi, +) # TODO: Or should we do `all(Base.Fix2(inspace, sampler), vns)`? return if inspace(first(vns), sampler) - DynamicPPL.dot_tilde_assume(rng, LikelihoodContext(), SampleFromPrior(), right, left, vns, vi) + DynamicPPL.dot_tilde_assume( + rng, LikelihoodContext(), SampleFromPrior(), right, left, vns, vi + ) else DynamicPPL.dot_tilde_assume(rng, ctx, SampleFromPrior(), right, left, vns, vi) end end -function DynamicPPL.dot_tilde_observe(ctx::DefaultContext, sampler::Sampler{<:ESS}, right, left, vi) +function DynamicPPL.dot_tilde_observe( + ctx::DefaultContext, sampler::Sampler{<:ESS}, right, left, vi +) return DynamicPPL.dot_tilde_observe(ctx, SampleFromPrior(), right, left, vi) end diff --git a/src/mcmc/gibbs.jl b/src/mcmc/gibbs.jl index 2a7572ffa..815566437 100644 --- a/src/mcmc/gibbs.jl +++ b/src/mcmc/gibbs.jl @@ -2,7 +2,6 @@ ### Gibbs samplers / compositional samplers. ### - """ isgibbscomponent(alg) @@ -16,7 +15,7 @@ isgibbscomponent(::Hamiltonian) = true isgibbscomponent(::MH) = true isgibbscomponent(::PG) = true -const TGIBBS = Union{InferenceAlgorithm, GibbsConditional} +const TGIBBS = Union{InferenceAlgorithm,GibbsConditional} """ Gibbs(algs...) @@ -47,12 +46,15 @@ Tips: methods like Particle Gibbs. You can increase the effectiveness of particle sampling by including more particles in the particle sampler. """ -struct Gibbs{space, N, A<:NTuple{N, TGIBBS}, B<:NTuple{N, Int}} <: InferenceAlgorithm +struct Gibbs{space,N,A<:NTuple{N,TGIBBS},B<:NTuple{N,Int}} <: InferenceAlgorithm algs::A # component sampling algorithms iterations::B - function Gibbs{space, N, A, B}(algs::A, iterations::B) where {space, N, A<:NTuple{N, TGIBBS}, B<:NTuple{N, Int}} - all(isgibbscomponent, algs) || error("all algorithms have to support Gibbs sampling") - return new{space, N, A, B}(algs, iterations) + function Gibbs{space,N,A,B}( + algs::A, iterations::B + ) where {space,N,A<:NTuple{N,TGIBBS},B<:NTuple{N,Int}} + all(isgibbscomponent, algs) || + error("all algorithms have to support Gibbs sampling") + return new{space,N,A,B}(algs, iterations) end end @@ -61,19 +63,18 @@ function Gibbs(alg1::TGIBBS, algrest::Vararg{TGIBBS,N}) where {N} iterations = ntuple(Returns(1), Val(N + 1)) # obtain space for sampling algorithms space = Tuple(union(getspace.(algs)...)) - return Gibbs{space, N + 1, typeof(algs), typeof(iterations)}(algs, iterations) + return Gibbs{space,N + 1,typeof(algs),typeof(iterations)}(algs, iterations) end function Gibbs( - arg1::Tuple{<:TGIBBS,Int}, - argrest::Vararg{<:Tuple{<:TGIBBS,Int}, N}, + arg1::Tuple{<:TGIBBS,Int}, argrest::Vararg{<:Tuple{<:TGIBBS,Int},N} ) where {N} allargs = (arg1, argrest...) algs = map(first, allargs) iterations = map(last, allargs) # obtain space for sampling algorithms space = Tuple(union(getspace.(algs)...)) - return Gibbs{space, N + 1, typeof(algs), typeof(iterations)}(algs, iterations) + return Gibbs{space,N + 1,typeof(algs),typeof(iterations)}(algs, iterations) end """ @@ -110,14 +111,13 @@ Return an updated state, taking into account the variables sampled by other Gibb - `varinfo`: the variables, including the ones sampled by other Gibbs components. """ gibbs_state(model, sampler, state::AbstractVarInfo, varinfo::AbstractVarInfo) = varinfo -gibbs_state(model, sampler, state::PGState, varinfo::AbstractVarInfo) = PGState(varinfo, state.rng) +function gibbs_state(model, sampler, state::PGState, varinfo::AbstractVarInfo) + return PGState(varinfo, state.rng) +end # Update state in Gibbs sampling function gibbs_state( - model::Model, - spl::Sampler{<:Hamiltonian}, - state::HMCState, - varinfo::AbstractVarInfo, + model::Model, spl::Sampler{<:Hamiltonian}, state::HMCState, varinfo::AbstractVarInfo ) # Update hamiltonian θ_old = varinfo[spl] @@ -159,11 +159,7 @@ gibbs_rerun(prev_alg, ::PG) = false # Initialize the Gibbs sampler. function DynamicPPL.initialstep( - rng::AbstractRNG, - model::Model, - spl::Sampler{<:Gibbs}, - vi::AbstractVarInfo; - kwargs... + rng::AbstractRNG, model::Model, spl::Sampler{<:Gibbs}, vi::AbstractVarInfo; kwargs... ) # TODO: Technically this only works for `VarInfo` or `ThreadSafeVarInfo{<:VarInfo}`. # Should we enforce this? @@ -176,7 +172,7 @@ function DynamicPPL.initialstep( if i == 1 prev_alg = algs[end] else - prev_alg = algs[i-1] + prev_alg = algs[i - 1] end rerun = gibbs_rerun(prev_alg, alg) selector = DynamicPPL.Selector(Symbol(typeof(alg)), rerun) @@ -202,7 +198,11 @@ function DynamicPPL.initialstep( states = map(samplers) do local_spl # Recompute `vi.logp` if needed. if local_spl.selector.rerun - vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, local_spl))) + vi = last( + DynamicPPL.evaluate!!( + model, vi, DynamicPPL.SamplingContext(rng, local_spl) + ), + ) end # Compute initial state. @@ -223,11 +223,7 @@ end # Subsequent steps function AbstractMCMC.step( - rng::AbstractRNG, - model::Model, - spl::Sampler{<:Gibbs}, - state::GibbsState; - kwargs... + rng::AbstractRNG, model::Model, spl::Sampler{<:Gibbs}, state::GibbsState; kwargs... ) # Iterate through each of the samplers. vi = state.vi diff --git a/src/mcmc/gibbs_conditional.jl b/src/mcmc/gibbs_conditional.jl index 5dd1b1787..fda79315b 100644 --- a/src/mcmc/gibbs_conditional.jl +++ b/src/mcmc/gibbs_conditional.jl @@ -49,11 +49,11 @@ m = inverse_gdemo(x) sample(m, Gibbs(GibbsConditional(:λ, cond_λ), GibbsConditional(:m, cond_m)), 10) ``` """ -struct GibbsConditional{S, C} +struct GibbsConditional{S,C} conditional::C function GibbsConditional(sym::Symbol, conditional::C) where {C} - return new{sym, C}(conditional) + return new{sym,C}(conditional) end end @@ -64,7 +64,7 @@ function DynamicPPL.initialstep( model::Model, spl::Sampler{<:GibbsConditional}, vi::AbstractVarInfo; - kwargs... + kwargs..., ) return nothing, vi end @@ -74,7 +74,7 @@ function AbstractMCMC.step( model::Model, spl::Sampler{<:GibbsConditional}, vi::AbstractVarInfo; - kwargs... + kwargs..., ) condvals = DynamicPPL.values_as(DynamicPPL.invlink(vi, model), NamedTuple) conddist = spl.alg.conditional(condvals) diff --git a/src/mcmc/hmc.jl b/src/mcmc/hmc.jl index c3c90eab3..f17bca2e7 100644 --- a/src/mcmc/hmc.jl +++ b/src/mcmc/hmc.jl @@ -61,13 +61,19 @@ sample(gdemo([1.5, 2]), HMC(0.1, 10), 1000) sample(gdemo([1.5, 2]), HMC(0.01, 10), 1000) ``` """ -struct HMC{AD, space, metricT <: AHMC.AbstractMetric} <: StaticHamiltonian +struct HMC{AD,space,metricT<:AHMC.AbstractMetric} <: StaticHamiltonian ϵ::Float64 # leapfrog step size n_leapfrog::Int # leapfrog step number adtype::AD end -function HMC(ϵ::Float64, n_leapfrog::Int, ::Type{metricT}, space::Tuple; adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE) where {metricT<:AHMC.AbstractMetric} +function HMC( + ϵ::Float64, + n_leapfrog::Int, + ::Type{metricT}, + space::Tuple; + adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, +) where {metricT<:AHMC.AbstractMetric} return HMC{typeof(adtype),space,metricT}(ϵ, n_leapfrog, adtype) end function HMC( @@ -77,7 +83,7 @@ function HMC( metricT=AHMC.UnitEuclideanMetric, adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) - return HMC(ϵ, n_leapfrog, metricT, space; adtype = adtype) + return HMC(ϵ, n_leapfrog, metricT, space; adtype=adtype) end DynamicPPL.initialsampler(::Sampler{<:Hamiltonian}) = SampleFromUniform() @@ -95,7 +101,7 @@ function AbstractMCMC.sample( nadapts=sampler.alg.n_adapts, discard_adapt=true, discard_initial=-1, - kwargs... + kwargs..., ) if resume_from === nothing # If `nadapts` is `-1`, then the user called a convenience @@ -114,15 +120,30 @@ function AbstractMCMC.sample( _discard_initial = discard_initial end - return AbstractMCMC.mcmcsample(rng, model, sampler, N; - chain_type=chain_type, progress=progress, - nadapts=_nadapts, discard_initial=_discard_initial, - kwargs...) + return AbstractMCMC.mcmcsample( + rng, + model, + sampler, + N; + chain_type=chain_type, + progress=progress, + nadapts=_nadapts, + discard_initial=_discard_initial, + kwargs..., + ) else return AbstractMCMC.mcmcsample( - rng, model, sampler, N; - chain_type=chain_type, initial_state=initial_state, progress=progress, - nadapts=0, discard_adapt=false, discard_initial=0, kwargs... + rng, + model, + sampler, + N; + chain_type=chain_type, + initial_state=initial_state, + progress=progress, + nadapts=0, + discard_adapt=false, + discard_initial=0, + kwargs..., ) end end @@ -134,7 +155,7 @@ function DynamicPPL.initialstep( vi_original::AbstractVarInfo; initial_params=nothing, nadapts=0, - kwargs... + kwargs..., ) # Transform the samples to unconstrained space and compute the joint log probability. vi = DynamicPPL.link(vi_original, spl, model) @@ -152,8 +173,8 @@ function DynamicPPL.initialstep( # Use the leaf-context from the `model` in case the user has # contextualized the model with something like `PriorContext` # to sample from the prior. - DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)) - ) + DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)), + ), ) logπ = Base.Fix1(LogDensityProblems.logdensity, ℓ) ∂logπ∂θ(x) = LogDensityProblems.logdensity_and_gradient(ℓ, x) @@ -203,9 +224,9 @@ function DynamicPPL.initialstep( # Adaptation adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ) if spl.alg isa AdaptiveHamiltonian - hamiltonian, kernel, _ = - AHMC.adapt!(hamiltonian, kernel, adaptor, - 1, nadapts, t.z.θ, t.stat.acceptance_rate) + hamiltonian, kernel, _ = AHMC.adapt!( + hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate + ) end # Update `vi` based on acceptance @@ -229,7 +250,7 @@ function AbstractMCMC.step( spl::Sampler{<:Hamiltonian}, state::HMCState; nadapts=0, - kwargs... + kwargs..., ) # Get step size @debug "current ϵ" getstepsize(spl, state) @@ -242,9 +263,15 @@ function AbstractMCMC.step( # Adaptation i = state.i + 1 if spl.alg isa AdaptiveHamiltonian - hamiltonian, kernel, _ = - AHMC.adapt!(hamiltonian, state.kernel, state.adaptor, - i, nadapts, t.z.θ, t.stat.acceptance_rate) + hamiltonian, kernel, _ = AHMC.adapt!( + hamiltonian, + state.kernel, + state.adaptor, + i, + nadapts, + t.z.θ, + t.stat.acceptance_rate, + ) else kernel = state.kernel end @@ -269,8 +296,8 @@ function get_hamiltonian(model, spl, vi, state, n) Turing.LogDensityFunction( vi, model, - DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context)) - ) + DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context)), + ), ) ℓπ = Base.Fix1(LogDensityProblems.logdensity, ℓ) ∂ℓπ∂θ = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ℓ) @@ -308,15 +335,23 @@ Hoffman, Matthew D., and Andrew Gelman. "The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo." Journal of Machine Learning Research 15, no. 1 (2014): 1593-1623. """ -struct HMCDA{AD, space, metricT <: AHMC.AbstractMetric} <: AdaptiveHamiltonian - n_adapts :: Int # number of samples with adaption for ϵ - δ :: Float64 # target accept rate - λ :: Float64 # target leapfrog length - ϵ :: Float64 # (initial) step size +struct HMCDA{AD,space,metricT<:AHMC.AbstractMetric} <: AdaptiveHamiltonian + n_adapts::Int # number of samples with adaption for ϵ + δ::Float64 # target accept rate + λ::Float64 # target leapfrog length + ϵ::Float64 # (initial) step size adtype::AD end -function HMCDA(n_adapts::Int, δ::Float64, λ::Float64, ϵ::Float64, ::Type{metricT}, space::Tuple; adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE) where {metricT<:AHMC.AbstractMetric} +function HMCDA( + n_adapts::Int, + δ::Float64, + λ::Float64, + ϵ::Float64, + ::Type{metricT}, + space::Tuple; + adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, +) where {metricT<:AHMC.AbstractMetric} return HMCDA{typeof(adtype),space,metricT}(n_adapts, δ, λ, ϵ, adtype) end @@ -327,16 +362,10 @@ function HMCDA( metricT=AHMC.UnitEuclideanMetric, adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) - return HMCDA(-1, δ, λ, init_ϵ, metricT, (); adtype = adtype) + return HMCDA(-1, δ, λ, init_ϵ, metricT, (); adtype=adtype) end -function HMCDA( - n_adapts::Int, - δ::Float64, - λ::Float64, - ::Tuple{}; - kwargs... -) +function HMCDA(n_adapts::Int, δ::Float64, λ::Float64, ::Tuple{}; kwargs...) return HMCDA(n_adapts, δ, λ; kwargs...) end @@ -349,10 +378,9 @@ function HMCDA( metricT=AHMC.UnitEuclideanMetric, adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) - return HMCDA(n_adapts, δ, λ, init_ϵ, metricT, space; adtype = adtype) + return HMCDA(n_adapts, δ, λ, init_ϵ, metricT, space; adtype=adtype) end - """ NUTS(n_adapts::Int, δ::Float64; max_depth::Int=10, Δ_max::Float64=1000.0, init_ϵ::Float64=0.0; adtype::ADTypes.AbstractADType=AutoForwardDiff() @@ -398,13 +426,8 @@ function NUTS( return NUTS{typeof(adtype),space,metricT}(n_adapts, δ, max_depth, Δ_max, ϵ, adtype) end -function NUTS( - n_adapts::Int, - δ::Float64, - ::Tuple{}; - kwargs... -) - NUTS(n_adapts, δ; kwargs...) +function NUTS(n_adapts::Int, δ::Float64, ::Tuple{}; kwargs...) + return NUTS(n_adapts, δ; kwargs...) end function NUTS( @@ -417,7 +440,7 @@ function NUTS( metricT=AHMC.DiagEuclideanMetric, adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) - NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT, space; adtype=adtype) + return NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT, space; adtype=adtype) end function NUTS( @@ -428,15 +451,15 @@ function NUTS( metricT=AHMC.DiagEuclideanMetric, adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) - NUTS(-1, δ, max_depth, Δ_max, init_ϵ, metricT, (); adtype=adtype) + return NUTS(-1, δ, max_depth, Δ_max, init_ϵ, metricT, (); adtype=adtype) end function NUTS(; kwargs...) - NUTS(-1, 0.65; kwargs...) + return NUTS(-1, 0.65; kwargs...) end for alg in (:HMC, :HMCDA, :NUTS) - @eval getmetricT(::$alg{<:Any, <:Any, metricT}) where {metricT} = metricT + @eval getmetricT(::$alg{<:Any,<:Any,metricT}) where {metricT} = metricT end ##### @@ -452,23 +475,28 @@ function gen_metric(dim::Int, spl::Sampler{<:AdaptiveHamiltonian}, state) end function make_ahmc_kernel(alg::HMC, ϵ) - return AHMC.HMCKernel(AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedNSteps(alg.n_leapfrog))) + return AHMC.HMCKernel( + AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedNSteps(alg.n_leapfrog)) + ) end function make_ahmc_kernel(alg::HMCDA, ϵ) - return AHMC.HMCKernel(AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedIntegrationTime(alg.λ))) + return AHMC.HMCKernel( + AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedIntegrationTime(alg.λ)) + ) +end +function make_ahmc_kernel(alg::NUTS, ϵ) + return AHMC.HMCKernel( + AHMC.Trajectory{AHMC.MultinomialTS}( + AHMC.Leapfrog(ϵ), AHMC.GeneralisedNoUTurn(alg.max_depth, alg.Δ_max) + ), + ) end -make_ahmc_kernel(alg::NUTS, ϵ) = - AHMC.HMCKernel(AHMC.Trajectory{AHMC.MultinomialTS}(AHMC.Leapfrog(ϵ), AHMC.GeneralisedNoUTurn(alg.max_depth, alg.Δ_max))) #### #### Compiler interface, i.e. tilde operators. #### function DynamicPPL.assume( - rng, - spl::Sampler{<:Hamiltonian}, - dist::Distribution, - vn::VarName, - vi, + rng, spl::Sampler{<:Hamiltonian}, dist::Distribution, vn::VarName, vi ) DynamicPPL.updategid!(vi, vn, spl) return DynamicPPL.assume(dist, vn, vi) @@ -488,7 +516,7 @@ end function DynamicPPL.dot_assume( rng, spl::Sampler{<:Hamiltonian}, - dists::Union{Distribution, AbstractArray{<:Distribution}}, + dists::Union{Distribution,AbstractArray{<:Distribution}}, vns::AbstractArray{<:VarName}, var::AbstractArray, vi, @@ -497,18 +525,13 @@ function DynamicPPL.dot_assume( return DynamicPPL.dot_assume(dists, var, vns, vi) end -function DynamicPPL.observe( - spl::Sampler{<:Hamiltonian}, - d::Distribution, - value, - vi, -) +function DynamicPPL.observe(spl::Sampler{<:Hamiltonian}, d::Distribution, value, vi) return DynamicPPL.observe(d, value, vi) end function DynamicPPL.dot_observe( spl::Sampler{<:Hamiltonian}, - ds::Union{Distribution, AbstractArray{<:Distribution}}, + ds::Union{Distribution,AbstractArray{<:Distribution}}, value::AbstractArray, vi, ) @@ -537,7 +560,9 @@ function AHMCAdaptor(alg::AdaptiveHamiltonian, metric::AHMC.AbstractMetric; ϵ=a return adaptor end -AHMCAdaptor(::Hamiltonian, ::AHMC.AbstractMetric; kwargs...) = AHMC.Adaptation.NoAdaptation() +function AHMCAdaptor(::Hamiltonian, ::AHMC.AbstractMetric; kwargs...) + return AHMC.Adaptation.NoAdaptation() +end ########################## # HMC State Constructors # @@ -548,7 +573,7 @@ function HMCState( model::Model, spl::Sampler{<:Hamiltonian}, vi::AbstractVarInfo; - kwargs... + kwargs..., ) # Link everything if needed. waslinked = islinked(vi, spl) @@ -561,10 +586,9 @@ function HMCState( logπ = Turing.LogDensityFunction( vi, model, - DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)) + DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)), ) - # Get the metric type. metricT = getmetricT(spl.alg) diff --git a/src/mcmc/is.jl b/src/mcmc/is.jl index ac05e9aaf..0fe6e1053 100644 --- a/src/mcmc/is.jl +++ b/src/mcmc/is.jl @@ -31,21 +31,13 @@ IS() = IS{()}() DynamicPPL.initialsampler(sampler::Sampler{<:IS}) = sampler function DynamicPPL.initialstep( - rng::AbstractRNG, - model::Model, - spl::Sampler{<:IS}, - vi::AbstractVarInfo; - kwargs... + rng::AbstractRNG, model::Model, spl::Sampler{<:IS}, vi::AbstractVarInfo; kwargs... ) return Transition(model, vi), nothing end function AbstractMCMC.step( - rng::Random.AbstractRNG, - model::Model, - spl::Sampler{<:IS}, - ::Nothing; - kwargs... + rng::Random.AbstractRNG, model::Model, spl::Sampler{<:IS}, ::Nothing; kwargs... ) vi = VarInfo(rng, model, spl) return Transition(model, vi), nothing diff --git a/src/mcmc/mh.jl b/src/mcmc/mh.jl index 6cc9f9a21..9b7f3ff09 100644 --- a/src/mcmc/mh.jl +++ b/src/mcmc/mh.jl @@ -2,7 +2,7 @@ ### Sampler states ### -struct MH{space, P} <: InferenceAlgorithm +struct MH{space,P} <: InferenceAlgorithm proposals::P end @@ -173,7 +173,7 @@ function MH(space...) prop = proposal(s) # Return early, we got a covariance matrix. - return MH{(), typeof(prop)}(prop) + return MH{(),typeof(prop)}(prop) else # Try to convert it to a proposal anyways, # throw an error if not acceptable. @@ -185,7 +185,7 @@ function MH(space...) proposals = NamedTuple{tuple(prop_syms...)}(tuple(props...)) syms = vcat(syms, prop_syms) - return MH{tuple(syms...), typeof(proposals)}(proposals) + return MH{tuple(syms...),typeof(proposals)}(proposals) end # Some of the proposals require working in unconstrained space. @@ -225,11 +225,11 @@ function set_namedtuple!(vi::DynamicPPL.VarInfoOrThreadSafeVarInfo, nt::NamedTup # assign the unpacked values if length(vals) == 1 vi[vns[1]] = [vals[1];] - # otherwise just assign the values + # otherwise just assign the values else vi[vns[1]] = [vals;] end - # if there are multiple variables + # if there are multiple variables elseif vals isa AbstractArray nvals = length(vals) # if values are provided as an array with a single element @@ -238,7 +238,7 @@ function set_namedtuple!(vi::DynamicPPL.VarInfoOrThreadSafeVarInfo, nt::NamedTup for (vn, val) in zip(vns, vals[1]) vi[vn] = [val;] end - # otherwise number of variables and number of values have to be equal + # otherwise number of variables and number of values have to be equal elseif nvals == nvns # iterate over variables and values for (vn, val) in zip(vns, vals) @@ -260,7 +260,9 @@ A log density function for the MH sampler. This variant uses the `set_namedtuple!` function to update the `VarInfo`. """ -const MHLogDensityFunction{M<:Model,S<:Sampler{<:MH},V<:AbstractVarInfo} = Turing.LogDensityFunction{V,M,<:DynamicPPL.SamplingContext{<:S}} +const MHLogDensityFunction{M<:Model,S<:Sampler{<:MH},V<:AbstractVarInfo} = Turing.LogDensityFunction{ + V,M,<:DynamicPPL.SamplingContext{<:S} +} function LogDensityProblems.logdensity(f::MHLogDensityFunction, x::NamedTuple) # TODO: Make this work with immutable `f.varinfo` too. @@ -284,16 +286,10 @@ unvectorize(dists::AbstractVector) = length(dists) == 1 ? first(dists) : dists # possibly unpack and reshape samples according to the prior distribution reconstruct(dist::Distribution, val::AbstractVector) = DynamicPPL.reconstruct(dist, val) -function reconstruct( - dist::AbstractVector{<:UnivariateDistribution}, - val::AbstractVector -) +function reconstruct(dist::AbstractVector{<:UnivariateDistribution}, val::AbstractVector) return val end -function reconstruct( - dist::AbstractVector{<:MultivariateDistribution}, - val::AbstractVector -) +function reconstruct(dist::AbstractVector{<:MultivariateDistribution}, val::AbstractVector) offset = 0 return map(dist) do d n = length(d) @@ -319,23 +315,22 @@ function dist_val_tuple(spl::Sampler{<:MH}, vi::DynamicPPL.VarInfoOrThreadSafeVa return dt, vt end -@generated function _val_tuple( - vi::VarInfo, - vns::NamedTuple{names} -) where {names} +@generated function _val_tuple(vi::VarInfo, vns::NamedTuple{names}) where {names} isempty(names) === 0 && return :(NamedTuple()) expr = Expr(:tuple) expr.args = Any[ - :($name = reconstruct(unvectorize(DynamicPPL.getdist.(Ref(vi), vns.$name)), - DynamicPPL.getval(vi, vns.$name))) - for name in names] + :( + $name = reconstruct( + unvectorize(DynamicPPL.getdist.(Ref(vi), vns.$name)), + DynamicPPL.getval(vi, vns.$name), + ) + ) for name in names + ] return expr end @generated function _dist_tuple( - props::NamedTuple{propnames}, - vi::VarInfo, - vns::NamedTuple{names} + props::NamedTuple{propnames}, vi::VarInfo, vns::NamedTuple{names} ) where {names,propnames} isempty(names) === 0 && return :(NamedTuple()) expr = Expr(:tuple) @@ -345,14 +340,19 @@ end :($name = props.$name) else # Otherwise, use the default proposal. - :($name = AMH.StaticProposal(unvectorize(DynamicPPL.getdist.(Ref(vi), vns.$name)))) - end for name in names] + :( + $name = AMH.StaticProposal( + unvectorize(DynamicPPL.getdist.(Ref(vi), vns.$name)) + ) + ) + end for name in names + ] return expr end # Utility functions to link should_link(varinfo, sampler, proposal) = false -function should_link(varinfo, sampler, proposal::NamedTuple{(), Tuple{}}) +function should_link(varinfo, sampler, proposal::NamedTuple{(),Tuple{}}) # If it's an empty `NamedTuple`, we're using the priors as proposals # in which case we shouldn't link. return false @@ -362,24 +362,22 @@ function should_link(varinfo, sampler, proposal::AdvancedMH.RandomWalkProposal) end # FIXME: This won't be hit unless `vals` are all the exactly same concrete type of `AdvancedMH.RandomWalkProposal`! function should_link( - varinfo, - sampler, - proposal::NamedTuple{names, vals} -) where {names, vals<:NTuple{<:Any, <:AdvancedMH.RandomWalkProposal}} + varinfo, sampler, proposal::NamedTuple{names,vals} +) where {names,vals<:NTuple{<:Any,<:AdvancedMH.RandomWalkProposal}} return true end function maybe_link!!(varinfo, sampler, proposal, model) - return should_link(varinfo, sampler, proposal) ? link!!(varinfo, sampler, model) : varinfo + return if should_link(varinfo, sampler, proposal) + link!!(varinfo, sampler, model) + else + varinfo + end end # Make a proposal if we don't have a covariance proposal matrix (the default). function propose!!( - rng::AbstractRNG, - vi::AbstractVarInfo, - model::Model, - spl::Sampler{<:MH}, - proposal + rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal ) # Retrieve distribution and value NamedTuples. dt, vt = dist_val_tuple(spl, vi) @@ -395,9 +393,9 @@ function propose!!( Turing.LogDensityFunction( vi, model, - DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)) - ) - ) + DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)), + ), + ), ) trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans) @@ -413,7 +411,7 @@ function propose!!( vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, - proposal::AdvancedMH.RandomWalkProposal + proposal::AdvancedMH.RandomWalkProposal, ) # If this is the case, we can just draw directly from the proposal # matrix. @@ -430,9 +428,9 @@ function propose!!( Turing.LogDensityFunction( vi, model, - DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)) - ) - ) + DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)), + ), + ), ) trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans) @@ -444,7 +442,7 @@ function DynamicPPL.initialstep( model::AbstractModel, spl::Sampler{<:MH}, vi::AbstractVarInfo; - kwargs... + kwargs..., ) # If we're doing random walk with a covariance matrix, # just link everything before sampling. @@ -454,11 +452,7 @@ function DynamicPPL.initialstep( end function AbstractMCMC.step( - rng::AbstractRNG, - model::Model, - spl::Sampler{<:MH}, - vi::AbstractVarInfo; - kwargs... + rng::AbstractRNG, model::Model, spl::Sampler{<:MH}, vi::AbstractVarInfo; kwargs... ) # Cases: # 1. A covariance proposal matrix @@ -471,13 +465,7 @@ end #### #### Compiler interface, i.e. tilde operators. #### -function DynamicPPL.assume( - rng, - spl::Sampler{<:MH}, - dist::Distribution, - vn::VarName, - vi, -) +function DynamicPPL.assume(rng, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi) DynamicPPL.updategid!(vi, vn, spl) r = vi[vn] return r, logpdf_with_trans(dist, r, istrans(vi, vn)), vi @@ -502,7 +490,7 @@ end function DynamicPPL.dot_assume( rng, spl::Sampler{<:MH}, - dists::Union{Distribution, AbstractArray{<:Distribution}}, + dists::Union{Distribution,AbstractArray{<:Distribution}}, vn::VarName, var::AbstractArray, vi, @@ -515,18 +503,13 @@ function DynamicPPL.dot_assume( return var, sum(logpdf_with_trans.(dists, r, istrans(vi, vns[1]))), vi end -function DynamicPPL.observe( - spl::Sampler{<:MH}, - d::Distribution, - value, - vi, -) +function DynamicPPL.observe(spl::Sampler{<:MH}, d::Distribution, value, vi) return DynamicPPL.observe(SampleFromPrior(), d, value, vi) end function DynamicPPL.dot_observe( spl::Sampler{<:MH}, - ds::Union{Distribution, AbstractArray{<:Distribution}}, + ds::Union{Distribution,AbstractArray{<:Distribution}}, value::AbstractArray, vi, ) diff --git a/src/mcmc/particle_mcmc.jl b/src/mcmc/particle_mcmc.jl index 7d0714c41..8d38660c6 100644 --- a/src/mcmc/particle_mcmc.jl +++ b/src/mcmc/particle_mcmc.jl @@ -15,7 +15,7 @@ Sequential Monte Carlo sampler. $(TYPEDFIELDS) """ -struct SMC{space, R} <: ParticleInference +struct SMC{space,R} <: ParticleInference resampler::R end @@ -29,15 +29,17 @@ Create a sequential Monte Carlo sampler of type [`SMC`](@ref) for the variables If the algorithm for the resampling step is not specified explicitly, systematic resampling is performed if the estimated effective sample size per particle drops below 0.5. """ -function SMC(resampler = AdvancedPS.ResampleWithESSThreshold(), space::Tuple = ()) - return SMC{space, typeof(resampler)}(resampler) +function SMC(resampler=AdvancedPS.ResampleWithESSThreshold(), space::Tuple=()) + return SMC{space,typeof(resampler)}(resampler) end # Convenient constructors with ESS threshold -function SMC(resampler, threshold::Real, space::Tuple = ()) +function SMC(resampler, threshold::Real, space::Tuple=()) return SMC(AdvancedPS.ResampleWithESSThreshold(resampler, threshold), space) end -SMC(threshold::Real, space::Tuple = ()) = SMC(AdvancedPS.resample_systematic, threshold, space) +function SMC(threshold::Real, space::Tuple=()) + return SMC(AdvancedPS.resample_systematic, threshold, space) +end # If only the space is defined SMC(space::Symbol...) = SMC(space) @@ -62,7 +64,7 @@ function SMCTransition(model::DynamicPPL.Model, vi::AbstractVarInfo, weight) return SMCTransition(theta, lp, weight) end -metadata(t::SMCTransition) = (lp = t.lp, weight = t.weight) +metadata(t::SMCTransition) = (lp=t.lp, weight=t.weight) DynamicPPL.getlogp(t::SMCTransition) = t.lp @@ -86,18 +88,30 @@ function AbstractMCMC.sample( resume_from=nothing, initial_state=DynamicPPL.loadstate(resume_from), progress=PROGRESS[], - kwargs... + kwargs..., ) if resume_from === nothing - return AbstractMCMC.mcmcsample(rng, model, sampler, N; - chain_type=chain_type, - progress=progress, - nparticles=N, - kwargs...) + return AbstractMCMC.mcmcsample( + rng, + model, + sampler, + N; + chain_type=chain_type, + progress=progress, + nparticles=N, + kwargs..., + ) else return AbstractMCMC.mcmcsample( - rng, model, sampler, N; chain_type, initial_state, progress=progress, - nparticles=N, kwargs... + rng, + model, + sampler, + N; + chain_type, + initial_state, + progress=progress, + nparticles=N, + kwargs..., ) end end @@ -108,7 +122,7 @@ function DynamicPPL.initialstep( spl::Sampler{<:SMC}, vi::AbstractVarInfo; nparticles::Int, - kwargs... + kwargs..., ) # Reset the VarInfo. reset_num_produce!(vi) @@ -120,7 +134,7 @@ function DynamicPPL.initialstep( particles = AdvancedPS.ParticleContainer( [AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles], AdvancedPS.TracedRNG(), - rng + rng, ) # Perform particle sweep. @@ -138,11 +152,7 @@ function DynamicPPL.initialstep( end function AbstractMCMC.step( - ::AbstractRNG, - model::AbstractModel, - spl::Sampler{<:SMC}, - state::SMCState; - kwargs... + ::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs... ) # Extract the index of the current particle. index = state.particleindex @@ -191,18 +201,16 @@ If the algorithm for the resampling step is not specified explicitly, systematic is performed if the estimated effective sample size per particle drops below 0.5. """ function PG( - nparticles::Int, - resampler = AdvancedPS.ResampleWithESSThreshold(), - space::Tuple = (), + nparticles::Int, resampler=AdvancedPS.ResampleWithESSThreshold(), space::Tuple=() ) - return PG{space, typeof(resampler)}(nparticles, resampler) + return PG{space,typeof(resampler)}(nparticles, resampler) end # Convenient constructors with ESS threshold -function PG(nparticles::Int, resampler, threshold::Real, space::Tuple = ()) +function PG(nparticles::Int, resampler, threshold::Real, space::Tuple=()) return PG(nparticles, AdvancedPS.ResampleWithESSThreshold(resampler, threshold), space) end -function PG(nparticles::Int, threshold::Real, space::Tuple = ()) +function PG(nparticles::Int, threshold::Real, space::Tuple=()) return PG(nparticles, AdvancedPS.resample_systematic, threshold, space) end @@ -238,7 +246,7 @@ function PGTransition(model::DynamicPPL.Model, vi::AbstractVarInfo, logevidence) return PGTransition(theta, lp, logevidence) end -metadata(t::PGTransition) = (lp = t.lp, logevidence = t.logevidence) +metadata(t::PGTransition) = (lp=t.lp, logevidence=t.logevidence) DynamicPPL.getlogp(t::PGTransition) = t.lp @@ -251,7 +259,7 @@ function DynamicPPL.initialstep( model::AbstractModel, spl::Sampler{<:PG}, vi::AbstractVarInfo; - kwargs... + kwargs..., ) # Reset the VarInfo before new sweep reset_num_produce!(vi) @@ -263,7 +271,7 @@ function DynamicPPL.initialstep( particles = AdvancedPS.ParticleContainer( [AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles], AdvancedPS.TracedRNG(), - rng + rng, ) # Perform a particle sweep. @@ -282,11 +290,7 @@ function DynamicPPL.initialstep( end function AbstractMCMC.step( - rng::AbstractRNG, - model::AbstractModel, - spl::Sampler{<:PG}, - state::PGState; - kwargs... + rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs... ) # Reset the VarInfo before new sweep. vi = state.vi @@ -325,14 +329,18 @@ function AbstractMCMC.step( return transition, PGState(_vi, newreference.rng) end -DynamicPPL.use_threadsafe_eval(::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, ::AbstractVarInfo) = false +function DynamicPPL.use_threadsafe_eval( + ::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, ::AbstractVarInfo +) + return false +end function trace_local_varinfo_maybe(varinfo) - try + try trace = AdvancedPS.current_trace() return trace.model.f.varinfo catch e - # NOTE: this heuristic allows Libtask evaluating a model outside a `Trace`. + # NOTE: this heuristic allows Libtask evaluating a model outside a `Trace`. if e == KeyError(:__trace) || current_task().storage isa Nothing return varinfo else @@ -360,7 +368,7 @@ function DynamicPPL.assume( spl::Sampler{<:Union{PG,SMC}}, dist::Distribution, vn::VarName, - _vi::AbstractVarInfo + _vi::AbstractVarInfo, ) vi = trace_local_varinfo_maybe(_vi) trng = trace_local_rng_maybe(rng) @@ -399,18 +407,14 @@ function DynamicPPL.observe(spl::Sampler{<:Union{PG,SMC}}, dist::Distribution, v end function DynamicPPL.acclogp!!( - context::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, - varinfo::AbstractVarInfo, - logp + context::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, varinfo::AbstractVarInfo, logp ) varinfo_trace = trace_local_varinfo_maybe(varinfo) - DynamicPPL.acclogp!!(DynamicPPL.childcontext(context), varinfo_trace, logp) + return DynamicPPL.acclogp!!(DynamicPPL.childcontext(context), varinfo_trace, logp) end function DynamicPPL.acclogp_observe!!( - context::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, - varinfo::AbstractVarInfo, - logp + context::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, varinfo::AbstractVarInfo, logp ) Libtask.produce(logp) return trace_local_varinfo_maybe(varinfo) @@ -421,7 +425,7 @@ function AdvancedPS.Trace( model::Model, sampler::Sampler{<:Union{SMC,PG}}, varinfo::AbstractVarInfo, - rng::AdvancedPS.TracedRNG + rng::AdvancedPS.TracedRNG, ) newvarinfo = deepcopy(varinfo) DynamicPPL.reset_num_produce!(newvarinfo) diff --git a/src/mcmc/sghmc.jl b/src/mcmc/sghmc.jl index 61cffa7b3..84a9c18f3 100644 --- a/src/mcmc/sghmc.jl +++ b/src/mcmc/sghmc.jl @@ -44,10 +44,12 @@ function SGHMC( adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) _learning_rate, _momentum_decay = promote(learning_rate, momentum_decay) - return SGHMC{typeof(adtype),space,typeof(_learning_rate)}(_learning_rate, _momentum_decay, adtype) + return SGHMC{typeof(adtype),space,typeof(_learning_rate)}( + _learning_rate, _momentum_decay, adtype + ) end -struct SGHMCState{L,V<:AbstractVarInfo, T<:AbstractVector{<:Real}} +struct SGHMCState{L,V<:AbstractVarInfo,T<:AbstractVector{<:Real}} logdensity::L vi::V velocity::T @@ -68,7 +70,9 @@ function DynamicPPL.initialstep( # Compute initial sample and state. sample = Transition(model, vi) - ℓ = LogDensityProblemsAD.ADgradient(Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext())) + ℓ = LogDensityProblemsAD.ADgradient( + Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext()) + ) state = SGHMCState(ℓ, vi, zero(vi[spl])) return sample, state @@ -79,7 +83,7 @@ function AbstractMCMC.step( model::Model, spl::Sampler{<:SGHMC}, state::SGHMCState; - kwargs... + kwargs..., ) # Compute gradient of log density. ℓ = state.logdensity @@ -134,7 +138,7 @@ struct PolynomialStepsize{T<:Real} "Decay rate of step size in (0.5, 1]." γ::T - function PolynomialStepsize{T}(a::T, b::T, γ::T) where T + function PolynomialStepsize{T}(a::T, b::T, γ::T) where {T} 0.5 < γ ≤ 1 || error("the decay rate `γ` has to be in (0.5, 1]") return new{T}(a, b, γ) end @@ -153,7 +157,7 @@ a (b + t)^{-γ}. function PolynomialStepsize(a::T, b::T, γ::T) where {T<:Real} return PolynomialStepsize{T}(a, b, γ) end -function PolynomialStepsize(a::Real, b::Real = 0, γ::Real = 0.55) +function PolynomialStepsize(a::Real, b::Real=0, γ::Real=0.55) return PolynomialStepsize(promote(a, b, γ)...) end @@ -183,8 +187,8 @@ See also: [`PolynomialStepsize`](@ref) """ function SGLD( space::Symbol...; - stepsize = PolynomialStepsize(0.01), - adtype::ADTypes.AbstractADType = Turing.DEFAULT_ADTYPE, + stepsize=PolynomialStepsize(0.01), + adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE, ) return SGLD{typeof(adtype),space,typeof(stepsize)}(stepsize, adtype) end @@ -204,7 +208,7 @@ function SGLDTransition(model::DynamicPPL.Model, vi::AbstractVarInfo, stepsize) return SGLDTransition(theta, lp, stepsize) end -metadata(t::SGLDTransition) = (lp = t.lp, SGLD_stepsize = t.stepsize) +metadata(t::SGLDTransition) = (lp=t.lp, SGLD_stepsize=t.stepsize) DynamicPPL.getlogp(t::SGLDTransition) = t.lp @@ -219,7 +223,7 @@ function DynamicPPL.initialstep( model::Model, spl::Sampler{<:SGLD}, vi::AbstractVarInfo; - kwargs... + kwargs..., ) # Transform the samples to unconstrained space and compute the joint log probability. if !DynamicPPL.islinked(vi, spl) @@ -229,18 +233,16 @@ function DynamicPPL.initialstep( # Create first sample and state. sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0))) - ℓ = LogDensityProblemsAD.ADgradient(Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext())) + ℓ = LogDensityProblemsAD.ADgradient( + Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext()) + ) state = SGLDState(ℓ, vi, 1) return sample, state end function AbstractMCMC.step( - rng::Random.AbstractRNG, - model::Model, - spl::Sampler{<:SGLD}, - state::SGLDState; - kwargs... + rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs... ) # Perform gradient step. ℓ = state.logdensity diff --git a/src/optimisation/Optimisation.jl b/src/optimisation/Optimisation.jl index 639b576c6..682e664a6 100644 --- a/src/optimisation/Optimisation.jl +++ b/src/optimisation/Optimisation.jl @@ -1,34 +1,35 @@ module Optimisation using ..Turing -using Bijectors -using Random -using SciMLBase: OptimizationFunction, OptimizationProblem, AbstractADType, NoAD - +using NamedArrays: NamedArrays +using DynamicPPL: DynamicPPL +using LogDensityProblems: LogDensityProblems +using LogDensityProblemsAD: LogDensityProblemsAD +using Optimization: Optimization +using OptimizationOptimJL: OptimizationOptimJL +using Random: Random +using SciMLBase: SciMLBase +using ADTypes: ADTypes +using StatsBase: StatsBase using Accessors: Accessors -using DynamicPPL -using DynamicPPL: Model, AbstractContext, VarInfo, VarName, - _getindex, getsym, getfield, setorder!, - get_and_set_val!, istrans - -import LogDensityProblems -import LogDensityProblemsAD - -export constrained_space, - MAP, - MLE, - OptimLogDensity, - OptimizationContext, - get_parameter_bounds, - optim_objective, - optim_function, - optim_problem +using Printf: Printf +using ForwardDiff: ForwardDiff +using StatsAPI: StatsAPI +using Statistics: Statistics -struct constrained_space{x} end +export maximum_a_posteriori, maximum_likelihood +# The MAP and MLE exports are only needed for the Optim.jl interface. +export MAP, MLE -struct MLE end -struct MAP end +""" + ModeEstimator +An abstract type to mark whether mode estimation is to be done with maximum a posteriori +(MAP) or maximum likelihood estimation (MLE). +""" +abstract type ModeEstimator end +struct MLE <: ModeEstimator end +struct MAP <: ModeEstimator end """ OptimizationContext{C<:AbstractContext} <: AbstractContext @@ -37,25 +38,35 @@ The `OptimizationContext` transforms variables to their constrained space, but does not use the density with respect to the transformation. This context is intended to allow an optimizer to sample in R^n freely. """ -struct OptimizationContext{C<:AbstractContext} <: AbstractContext +struct OptimizationContext{C<:DynamicPPL.AbstractContext} <: DynamicPPL.AbstractContext context::C - function OptimizationContext{C}(context::C) where {C<:AbstractContext} - if !(context isa Union{DefaultContext,LikelihoodContext}) - throw(ArgumentError("`OptimizationContext` supports only leaf contexts of type `DynamicPPL.DefaultContext` and `DynamicPPL.LikelihoodContext` (given: `$(typeof(context)))`")) + function OptimizationContext{C}(context::C) where {C<:DynamicPPL.AbstractContext} + if !( + context isa Union{ + DynamicPPL.DefaultContext, + DynamicPPL.LikelihoodContext, + DynamicPPL.PriorContext, + } + ) + msg = """ + `OptimizationContext` supports only leaf contexts of type + `DynamicPPL.DefaultContext`, `DynamicPPL.LikelihoodContext`, + and `DynamicPPL.PriorContext` (given: `$(typeof(context)))` + """ + throw(ArgumentError(msg)) end return new{C}(context) end end -OptimizationContext(context::AbstractContext) = OptimizationContext{typeof(context)}(context) +OptimizationContext(ctx::DynamicPPL.AbstractContext) = OptimizationContext{typeof(ctx)}(ctx) DynamicPPL.NodeTrait(::OptimizationContext) = DynamicPPL.IsLeaf() -# assume function DynamicPPL.tilde_assume(ctx::OptimizationContext, dist, vn, vi) r = vi[vn, dist] - lp = if ctx.context isa DefaultContext + lp = if ctx.context isa Union{DynamicPPL.DefaultContext,DynamicPPL.PriorContext} # MAP Distributions.logpdf(dist, r) else @@ -65,15 +76,20 @@ function DynamicPPL.tilde_assume(ctx::OptimizationContext, dist, vn, vi) return r, lp, vi end -# dot assume -_loglikelihood(dist::Distribution, x) = loglikelihood(dist, x) -_loglikelihood(dists::AbstractArray{<:Distribution}, x) = loglikelihood(arraydist(dists), x) +_loglikelihood(dist::Distribution, x) = StatsAPI.loglikelihood(dist, x) + +function _loglikelihood(dists::AbstractArray{<:Distribution}, x) + return StatsAPI.loglikelihood(arraydist(dists), x) +end + function DynamicPPL.dot_tilde_assume(ctx::OptimizationContext, right, left, vns, vi) - # Values should be set and we're using `SampleFromPrior`, hence the `rng` argument shouldn't - # affect anything. + # Values should be set and we're using `SampleFromPrior`, hence the `rng` argument + # shouldn't affect anything. # TODO: Stop using `get_and_set_val!`. - r = DynamicPPL.get_and_set_val!(Random.default_rng(), vi, vns, right, SampleFromPrior()) - lp = if ctx.context isa DefaultContext + r = DynamicPPL.get_and_set_val!( + Random.default_rng(), vi, vns, right, DynamicPPL.SampleFromPrior() + ) + lp = if ctx.context isa Union{DynamicPPL.DefaultContext,DynamicPPL.PriorContext} # MAP _loglikelihood(right, r) else @@ -83,41 +99,65 @@ function DynamicPPL.dot_tilde_assume(ctx::OptimizationContext, right, left, vns, return r, lp, vi end +function DynamicPPL.tilde_observe( + ctx::OptimizationContext{<:DynamicPPL.PriorContext}, args... +) + return DynamicPPL.tilde_observe(ctx.context, args...) +end + +function DynamicPPL.dot_tilde_observe( + ctx::OptimizationContext{<:DynamicPPL.PriorContext}, args... +) + return DynamicPPL.dot_tilde_observe(ctx.context, args...) +end + """ - OptimLogDensity{M<:Model,C<:Context,V<:VarInfo} + OptimLogDensity{M<:DynamicPPL.Model,C<:Context,V<:DynamicPPL.VarInfo} A struct that stores the negative log density function of a `DynamicPPL` model. """ -const OptimLogDensity{M<:Model,C<:OptimizationContext,V<:VarInfo} = Turing.LogDensityFunction{V,M,C} +const OptimLogDensity{M<:DynamicPPL.Model,C<:OptimizationContext,V<:DynamicPPL.VarInfo} = Turing.LogDensityFunction{ + V,M,C +} """ - OptimLogDensity(model::Model, context::OptimizationContext) + OptimLogDensity(model::DynamicPPL.Model, context::OptimizationContext) Create a callable `OptimLogDensity` struct that evaluates a model using the given `context`. """ -function OptimLogDensity(model::Model, context::OptimizationContext) - init = VarInfo(model) +function OptimLogDensity(model::DynamicPPL.Model, context::OptimizationContext) + init = DynamicPPL.VarInfo(model) return Turing.LogDensityFunction(init, model, context) end """ - LogDensityProblems.logdensity(f::OptimLogDensity, z) + (f::OptimLogDensity)(z) + (f::OptimLogDensity)(z, _) -Evaluate the negative log joint (with `DefaultContext`) or log likelihood (with `LikelihoodContext`) -at the array `z`. +Evaluate the negative log joint or log likelihood at the array `z`. Which one is evaluated +depends on the context of `f`. + +Any second argument is ignored. The two-argument method only exists to match interface the +required by Optimization.jl. """ function (f::OptimLogDensity)(z::AbstractVector) varinfo = DynamicPPL.unflatten(f.varinfo, z) - return -getlogp(last(DynamicPPL.evaluate!!(f.model, varinfo, f.context))) + return -DynamicPPL.getlogp(last(DynamicPPL.evaluate!!(f.model, varinfo, f.context))) end +(f::OptimLogDensity)(z, _) = f(z) + # NOTE: This seems a bit weird IMO since this is the _negative_ log-likelihood. LogDensityProblems.logdensity(f::OptimLogDensity, z::AbstractVector) = f(z) +# NOTE: The format of this function is dictated by Optim. The first argument sets whether to +# compute the function value, the second whether to compute the gradient (and stores the +# gradient). The last one is the actual argument of the objective function. function (f::OptimLogDensity)(F, G, z) if G !== nothing # Calculate negative log joint and its gradient. - # TODO: Make OptimLogDensity already an LogDensityProblems.ADgradient? Allow to specify AD? + # TODO: Make OptimLogDensity already an LogDensityProblems.ADgradient? Allow to + # specify AD? ℓ = LogDensityProblemsAD.ADgradient(f) neglogp, ∇neglogp = LogDensityProblems.logdensity_and_gradient(ℓ, z) @@ -139,186 +179,328 @@ function (f::OptimLogDensity)(F, G, z) return nothing end +""" + ModeResult{ + V<:NamedArrays.NamedArray, + M<:NamedArrays.NamedArray, + O<:Optim.MultivariateOptimizationResults, + S<:NamedArrays.NamedArray + } + +A wrapper struct to store various results from a MAP or MLE estimation. +""" +struct ModeResult{V<:NamedArrays.NamedArray,O<:Any,M<:OptimLogDensity} <: + StatsBase.StatisticalModel + "A vector with the resulting point estimates." + values::V + "The stored optimiser results." + optim_result::O + "The final log likelihood or log joint, depending on whether `MAP` or `MLE` was run." + lp::Float64 + "The evaluation function used to calculate the output." + f::M +end +function Base.show(io::IO, ::MIME"text/plain", m::ModeResult) + print(io, "ModeResult with maximized lp of ") + Printf.@printf(io, "%.2f", m.lp) + println(io) + return show(io, m.values) +end -################################################# -# Generic optimisation objective initialisation # -################################################# - -function transform!!(f::OptimLogDensity) - ## Check link status of vi in OptimLogDensity - linked = DynamicPPL.istrans(f.varinfo) - - ## transform into constrained or unconstrained space depending on current state of vi - f = Accessors.@set f.varinfo = if !linked - DynamicPPL.link!!(f.varinfo, f.model) - else - DynamicPPL.invlink!!(f.varinfo, f.model) - end +function Base.show(io::IO, m::ModeResult) + return show(io, m.values.array) +end - return f +# Various StatsBase methods for ModeResult + +function StatsBase.coeftable(m::ModeResult; level::Real=0.95) + # Get columns for coeftable. + terms = string.(StatsBase.coefnames(m)) + estimates = m.values.array[:, 1] + stderrors = StatsBase.stderror(m) + zscore = estimates ./ stderrors + p = map(z -> StatsAPI.pvalue(Distributions.Normal(), z; tail=:both), zscore) + + # Confidence interval (CI) + q = Statistics.quantile(Distributions.Normal(), (1 + level) / 2) + ci_low = estimates .- q .* stderrors + ci_high = estimates .+ q .* stderrors + + level_ = 100 * level + level_percentage = isinteger(level_) ? Int(level_) : level_ + + cols = [estimates, stderrors, zscore, p, ci_low, ci_high] + colnms = [ + "Coef.", + "Std. Error", + "z", + "Pr(>|z|)", + "Lower $(level_percentage)%", + "Upper $(level_percentage)%", + ] + return StatsBase.CoefTable(cols, colnms, terms) end -function transform!!(p::AbstractArray, vi::DynamicPPL.VarInfo, model::DynamicPPL.Model, ::constrained_space{true}) - linked = DynamicPPL.istrans(vi) - - !linked && return identity(p) # TODO: why do we do `identity` here? - vi = DynamicPPL.unflatten(vi, p) - vi = DynamicPPL.invlink!!(vi, model) - p .= vi[:] +function StatsBase.informationmatrix( + m::ModeResult; hessian_function=ForwardDiff.hessian, kwargs... +) + # Calculate Hessian and information matrix. - # If linking mutated, we need to link once more. - linked && DynamicPPL.link!!(vi, model) + # Convert the values to their unconstrained states to make sure the + # Hessian is computed with respect to the untransformed parameters. + linked = DynamicPPL.istrans(m.f.varinfo) + if linked + m = Accessors.@set m.f.varinfo = DynamicPPL.invlink!!(m.f.varinfo, m.f.model) + end - return p -end + # Calculate the Hessian, which is the information matrix because the negative of the log + # likelihood was optimized + varnames = StatsBase.coefnames(m) + info = hessian_function(m.f, m.values.array[:, 1]) -function transform!!(p::AbstractArray, vi::DynamicPPL.VarInfo, model::DynamicPPL.Model, ::constrained_space{false}) - linked = DynamicPPL.istrans(vi) + # Link it back if we invlinked it. if linked - vi = DynamicPPL.invlink!!(vi, model) + m = Accessors.@set m.f.varinfo = DynamicPPL.link!!(m.f.varinfo, m.f.model) end - vi = DynamicPPL.unflatten(vi, p) - vi = DynamicPPL.link!!(vi, model) - p .= vi[:] - - # If linking mutated, we need to link once more. - !linked && DynamicPPL.invlink!!(vi, model) - return p + return NamedArrays.NamedArray(info, (varnames, varnames)) end -function transform(p::AbstractArray, vi::DynamicPPL.VarInfo, model::DynamicPPL.Model, con::constrained_space) - return transform!!(copy(p), vi, model, con) -end +StatsBase.coef(m::ModeResult) = m.values +StatsBase.coefnames(m::ModeResult) = names(m.values)[1] +StatsBase.params(m::ModeResult) = StatsBase.coefnames(m) +StatsBase.vcov(m::ModeResult) = inv(StatsBase.informationmatrix(m)) +StatsBase.loglikelihood(m::ModeResult) = m.lp -abstract type AbstractTransform end +""" + ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution) -struct ParameterTransform{T<:DynamicPPL.VarInfo,M<:DynamicPPL.Model, S<:constrained_space} <: AbstractTransform - vi::T - model::M - space::S -end +Create a `ModeResult` for a given `log_density` objective and a `solution` given by `solve`. -struct Init{T<:DynamicPPL.VarInfo,M<:DynamicPPL.Model, S<:constrained_space} <: AbstractTransform - vi::T - model::M - space::S +`Optimization.solve` returns its own result type. This function converts that into the +richer format of `ModeResult`. It also takes care of transforming them back to the original +parameter space in case the optimization was done in a transformed space. +""" +function ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution) + varinfo_new = DynamicPPL.unflatten(log_density.varinfo, solution.u) + # `getparams` performs invlinking if needed + vns_vals_iter = Turing.Inference.getparams(log_density.model, varinfo_new) + syms = map(Symbol ∘ first, vns_vals_iter) + vals = map(last, vns_vals_iter) + return ModeResult( + NamedArrays.NamedArray(vals, syms), solution, -solution.objective, log_density + ) end -function (t::AbstractTransform)(p::AbstractArray) - return transform(p, t.vi, t.model, t.space) -end +""" + ModeEstimationConstraints -function (t::Init)() - return t.vi[DynamicPPL.SampleFromPrior()] -end +A struct that holds constraints for mode estimation problems. -function get_parameter_bounds(model::DynamicPPL.Model) - vi = DynamicPPL.VarInfo(model) +The fields are the same as possible constraints supported by the Optimization.jl: +`ub` and `lb` specify lower and upper bounds of box constraints. `cons` is a function that +takes the parameters of the model and returns a list of derived quantities, which are then +constrained by the lower and upper bounds set by `lcons` and `ucons`. We refer to these +as generic constraints. Please see the documentation of +[Optimization.jl](https://docs.sciml.ai/Optimization/stable/) for more details. - ## Check link status of vi - linked = DynamicPPL.istrans(vi) - - ## transform into unconstrained - if !linked - vi = DynamicPPL.link!!(vi, model) - end - - d = length(vi[:]) - lb = transform(fill(-Inf, d), vi, model, constrained_space{true}()) - ub = transform(fill(Inf, d), vi, model, constrained_space{true}()) +Any of the fields can be `nothing`, disabling the corresponding constraints. +""" +struct ModeEstimationConstraints{ + Lb<:Union{Nothing,AbstractVector}, + Ub<:Union{Nothing,AbstractVector}, + Cons, + LCons<:Union{Nothing,AbstractVector}, + UCons<:Union{Nothing,AbstractVector}, +} + lb::Lb + ub::Ub + cons::Cons + lcons::LCons + ucons::UCons +end - return lb, ub +has_box_constraints(c::ModeEstimationConstraints) = c.ub !== nothing || c.lb !== nothing +function has_generic_constraints(c::ModeEstimationConstraints) + return (c.cons !== nothing || c.lcons !== nothing || c.ucons !== nothing) end +has_constraints(c) = has_box_constraints(c) || has_generic_constraints(c) -function _optim_objective(model::DynamicPPL.Model, ::MAP, ::constrained_space{false}) - ctx = OptimizationContext(DynamicPPL.DefaultContext()) - obj = OptimLogDensity(model, ctx) +""" + generate_initial_params(model::DynamicPPL.Model, initial_params, constraints) - obj = transform!!(obj) - init = Init(obj.varinfo, model, constrained_space{false}()) - t = ParameterTransform(obj.varinfo, model, constrained_space{true}()) +Generate an initial value for the optimization problem. - return (obj=obj, init = init, transform=t) -end +If `initial_params` is not `nothing`, a copy of it is returned. Otherwise initial parameter +values are generated either by sampling from the prior (if no constraints are present) or +uniformly from the box constraints. If generic constraints are set, an error is thrown. +""" +function generate_initial_params(model::DynamicPPL.Model, initial_params, constraints) + if initial_params === nothing && has_generic_constraints(constraints) + throw( + ArgumentError( + "You must provide an initial value when using generic constraints." + ), + ) + end -function _optim_objective(model::DynamicPPL.Model, ::MAP, ::constrained_space{true}) - ctx = OptimizationContext(DynamicPPL.DefaultContext()) - obj = OptimLogDensity(model, ctx) - - init = Init(obj.varinfo, model, constrained_space{true}()) - t = ParameterTransform(obj.varinfo, model, constrained_space{true}()) - - return (obj=obj, init = init, transform=t) + return if initial_params !== nothing + copy(initial_params) + elseif has_box_constraints(constraints) + [ + rand(Distributions.Uniform(lower, upper)) for + (lower, upper) in zip(constraints.lb, constraints.ub) + ] + else + rand(Vector, model) + end end -function _optim_objective(model::DynamicPPL.Model, ::MLE, ::constrained_space{false}) - ctx = OptimizationContext(DynamicPPL.LikelihoodContext()) - obj = OptimLogDensity(model, ctx) - - obj = transform!!(obj) - init = Init(obj.varinfo, model, constrained_space{false}()) - t = ParameterTransform(obj.varinfo, model, constrained_space{true}()) - - return (obj=obj, init = init, transform=t) +function default_solver(constraints::ModeEstimationConstraints) + return if has_generic_constraints(constraints) + OptimizationOptimJL.IPNewton() + else + OptimizationOptimJL.LBFGS() + end end -function _optim_objective(model::DynamicPPL.Model, ::MLE, ::constrained_space{true}) - ctx = OptimizationContext(DynamicPPL.LikelihoodContext()) - obj = OptimLogDensity(model, ctx) - - init = Init(obj.varinfo, model, constrained_space{true}()) - t = ParameterTransform(obj.varinfo, model, constrained_space{true}()) - - return (obj=obj, init = init, transform=t) -end +""" + OptimizationProblem(log_density::OptimLogDensity, adtype, constraints) -function optim_objective(model::DynamicPPL.Model, estimator::Union{MLE, MAP}; constrained::Bool=true) - return _optim_objective(model, estimator, constrained_space{constrained}()) +Create an `OptimizationProblem` for the objective function defined by `log_density`. +""" +function Optimization.OptimizationProblem(log_density::OptimLogDensity, adtype, constraints) + # Note that OptimLogDensity is a callable that evaluates the model with given + # parameters. Hence we can use it in the objective function as below. + f = Optimization.OptimizationFunction(log_density, adtype; cons=constraints.cons) + initial_params = log_density.varinfo[:] + prob = if !has_constraints(constraints) + Optimization.OptimizationProblem(f, initial_params) + else + Optimization.OptimizationProblem( + f, + initial_params; + lcons=constraints.lcons, + ucons=constraints.ucons, + lb=constraints.lb, + ub=constraints.ub, + ) + end + return prob end - -function optim_function( - model::Model, - estimator::Union{MLE, MAP}; - constrained::Bool=true, - adtype::Union{Nothing, AbstractADType}=NoAD(), +""" + estimate_mode( + model::DynamicPPL.Model, + estimator::ModeEstimator, + [solver]; + kwargs... + ) + +Find the mode of the probability distribution of a model. + +Under the hood this function calls `Optimization.solve`. + +# Arguments +- `model::DynamicPPL.Model`: The model for which to estimate the mode. +- `estimator::ModeEstimator`: Can be either `MLE()` for maximum likelihood estimation or + `MAP()` for maximum a posteriori estimation. +- `solver=nothing`. The optimization algorithm to use. Optional. Can be any solver + recognised by Optimization.jl. If omitted a default solver is used: LBFGS, or IPNewton + if non-box constraints are present. + +# Keyword arguments +- `initial_params::Union{AbstractVector,Nothing}=nothing`: Initial value for the + optimization. Optional, unless non-box constraints are specified. If omitted it is + generated by either sampling from the prior distribution or uniformly from the box + constraints, if any. +- `adtype::AbstractADType=AutoForwardDiff()`: The automatic differentiation type to use. +- Keyword arguments `lb`, `ub`, `cons`, `lcons`, and `ucons` define constraints for the + optimization problem. Please see [`ModeEstimationConstraints`](@ref) for more details. +- Any extra keyword arguments are passed to `Optimization.solve`. +""" +function estimate_mode( + model::DynamicPPL.Model, + estimator::ModeEstimator, + solver=nothing; + initial_params=nothing, + adtype=ADTypes.AutoForwardDiff(), + cons=nothing, + lcons=nothing, + ucons=nothing, + lb=nothing, + ub=nothing, + kwargs..., ) - if adtype === nothing - Base.depwarn("the use of `adtype=nothing` is deprecated, please use `adtype=SciMLBase.NoAD()`", :optim_function) + constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons) + initial_params = generate_initial_params(model, initial_params, constraints) + if solver === nothing + solver = default_solver(constraints) end - obj, init, t = optim_objective(model, estimator; constrained=constrained) - - l(x, _) = obj(x) - f = if adtype isa AbstractADType && adtype !== NoAD() - OptimizationFunction(l, adtype) - else - OptimizationFunction( - l; - grad = (G,x,p) -> obj(nothing, G, x), - ) + # Create an OptimLogDensity object that can be used to evaluate the objective function, + # i.e. the negative log density. Set its VarInfo to the initial parameters. + log_density = let + inner_context = if estimator isa MAP + DynamicPPL.DefaultContext() + else + DynamicPPL.LikelihoodContext() + end + ctx = OptimizationContext(inner_context) + ld = OptimLogDensity(model, ctx) + Accessors.@set ld.varinfo = DynamicPPL.unflatten(ld.varinfo, initial_params) + end + + # TODO(mhauru) We currently couple together the questions of whether the user specified + # bounds/constraints and whether we transform the objective function to an + # unconstrained space. These should be separate concerns, but for that we need to + # implement getting the bounds of the prior distributions. + optimise_in_unconstrained_space = !has_constraints(constraints) + if optimise_in_unconstrained_space + transformed_varinfo = DynamicPPL.link(log_density.varinfo, log_density.model) + log_density = Accessors.@set log_density.varinfo = transformed_varinfo end - - return (func=f, init=init, transform = t) + + prob = Optimization.OptimizationProblem(log_density, adtype, constraints) + solution = Optimization.solve(prob, solver; kwargs...) + # TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl + # interface. Might we want to break that and develop a better return type? + return ModeResult(log_density, solution) end +""" + maximum_a_posteriori( + model::DynamicPPL.Model, + [solver]; + kwargs... + ) -function optim_problem( - model::Model, - estimator::Union{MAP, MLE}; - constrained::Bool=true, - init_theta=nothing, - adtype::Union{Nothing, AbstractADType}=NoAD(), - kwargs..., -) - f, init, transform = optim_function(model, estimator; constrained=constrained, adtype=adtype) +Find the maximum a posteriori estimate of a model. - u0 = init_theta === nothing ? init() : init(init_theta) - prob = OptimizationProblem(f, u0; kwargs...) +This is a convenience function that calls `estimate_mode` with `MAP()` as the estimator. +Please see the documentation of [`Turing.Optimisation.estimate_mode`](@ref) for more +details. +""" +function maximum_a_posteriori(model::DynamicPPL.Model, args...; kwargs...) + return estimate_mode(model, MAP(), args...; kwargs...) +end - return (; prob, init, transform) +""" + maximum_likelihood( + model::DynamicPPL.Model, + [solver]; + kwargs... + ) + +Find the maximum likelihood estimate of a model. + +This is a convenience function that calls `estimate_mode` with `MLE()` as the estimator. +Please see the documentation of [`Turing.Optimisation.estimate_mode`](@ref) for more +details. +""" +function maximum_likelihood(model::DynamicPPL.Model, args...; kwargs...) + return estimate_mode(model, MLE(), args...; kwargs...) end end diff --git a/src/stdlib/RandomMeasures.jl b/src/stdlib/RandomMeasures.jl index 9858279e6..add452786 100644 --- a/src/stdlib/RandomMeasures.jl +++ b/src/stdlib/RandomMeasures.jl @@ -19,7 +19,8 @@ abstract type AbstractRandomProbabilityMeasure end The *Size-Biased Sampling Process* for random probability measures `rpm` with a surplus mass of `surplus`. """ -struct SizeBiasedSamplingProcess{T<:AbstractRandomProbabilityMeasure,V<:AbstractFloat} <: ContinuousUnivariateDistribution +struct SizeBiasedSamplingProcess{T<:AbstractRandomProbabilityMeasure,V<:AbstractFloat} <: + ContinuousUnivariateDistribution rpm::T surplus::V end @@ -34,7 +35,8 @@ maximum(d::SizeBiasedSamplingProcess) = d.surplus The *Stick-Breaking Process* for random probability measures `rpm`. """ -struct StickBreakingProcess{T<:AbstractRandomProbabilityMeasure} <: ContinuousUnivariateDistribution +struct StickBreakingProcess{T<:AbstractRandomProbabilityMeasure} <: + ContinuousUnivariateDistribution rpm::T end @@ -48,12 +50,13 @@ maximum(d::StickBreakingProcess) = 1.0 The *Chinese Restaurant Process* for random probability measures `rpm` with counts `m`. """ -struct ChineseRestaurantProcess{T<:AbstractRandomProbabilityMeasure,V<:AbstractVector{Int}} <: DiscreteUnivariateDistribution +struct ChineseRestaurantProcess{ + T<:AbstractRandomProbabilityMeasure,V<:AbstractVector{Int} +} <: DiscreteUnivariateDistribution rpm::T m::V end - """ _logpdf_table(d::AbstractRandomProbabilityMeasure, m::AbstractVector{Int}) @@ -81,7 +84,7 @@ function rand(rng::AbstractRNG, d::ChineseRestaurantProcess) end minimum(d::ChineseRestaurantProcess) = 1 -maximum(d::ChineseRestaurantProcess) = any(iszero, d.m) ? length(d.m) : length(d.m)+1 +maximum(d::ChineseRestaurantProcess) = any(iszero, d.m) ? length(d.m) : length(d.m) + 1 ## ################# ## ## Random partitions ## @@ -131,7 +134,7 @@ function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T< # construct the table first_zero = findfirst(iszero, m) - K = first_zero === nothing ? length(m)+1 : length(m) + K = first_zero === nothing ? length(m) + 1 : length(m) table = fill(T(-Inf), K) # exit if m is empty or contains only zeros @@ -141,7 +144,7 @@ function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T< end # compute logpdf for each occupied table - @inbounds for i in 1:(K-1) + @inbounds for i in 1:(K - 1) table[i] = T(log(m[i])) end @@ -187,7 +190,7 @@ end function distribution(d::StickBreakingProcess{<:PitmanYorProcess}) d_rpm = d.rpm d_rpm_d = d.rpm.d - return Beta(one(d_rpm_d)-d_rpm_d, d_rpm.θ + d_rpm.t*d_rpm_d) + return Beta(one(d_rpm_d) - d_rpm_d, d_rpm.θ + d_rpm.t * d_rpm_d) end @doc raw""" @@ -216,14 +219,15 @@ function stickbreak(v) K = length(v) + 1 cumprod_one_minus_v = cumprod(1 .- v) - eta = [if k == 1 - v[1] - elseif k == K - cumprod_one_minus_v[K - 1] - else - v[k] * cumprod_one_minus_v[k - 1] - end - for k in 1:K] + eta = [ + if k == 1 + v[1] + elseif k == K + cumprod_one_minus_v[K - 1] + else + v[k] * cumprod_one_minus_v[k - 1] + end for k in 1:K + ] return eta end @@ -231,7 +235,7 @@ end function distribution(d::SizeBiasedSamplingProcess{<:PitmanYorProcess}) d_rpm = d.rpm d_rpm_d = d.rpm.d - dist = Beta(one(d_rpm_d)-d_rpm_d, d_rpm.θ + d_rpm.t*d_rpm_d) + dist = Beta(one(d_rpm_d) - d_rpm_d, d_rpm.θ + d_rpm.t * d_rpm_d) return LocationScale(zero(d_rpm_d), d.surplus, dist) end @@ -241,7 +245,7 @@ function _logpdf_table(d::PitmanYorProcess{T}, m::AbstractVector{Int}) where {T< # construct table first_zero = findfirst(iszero, m) - K = first_zero === nothing ? length(m)+1 : length(m) + K = first_zero === nothing ? length(m) + 1 : length(m) table = fill(T(-Inf), K) # exit if m is empty or contains only zeros @@ -251,8 +255,8 @@ function _logpdf_table(d::PitmanYorProcess{T}, m::AbstractVector{Int}) where {T< end # compute logpdf for each occupied table - @inbounds for i in 1:(K-1) - !iszero(m[i]) && ( table[i] = T(log(m[i] - d.d)) ) + @inbounds for i in 1:(K - 1) + !iszero(m[i]) && (table[i] = T(log(m[i] - d.d))) end # logpdf for new table @@ -269,5 +273,4 @@ end export DirichletProcess, PitmanYorProcess export SizeBiasedSamplingProcess, StickBreakingProcess, ChineseRestaurantProcess - end # end module diff --git a/src/stdlib/distributions.jl b/src/stdlib/distributions.jl index f579876e8..c2b92c29d 100644 --- a/src/stdlib/distributions.jl +++ b/src/stdlib/distributions.jl @@ -79,9 +79,9 @@ struct BinomialLogit{T<:Real,S<:Real} <: DiscreteUnivariateDistribution logitp::T logconstant::S - function BinomialLogit{T}(n::Int, logitp::T) where T + function BinomialLogit{T}(n::Int, logitp::T) where {T} n >= 0 || error("parameter `n` has to be non-negative") - logconstant = - (log1p(n) + n * StatsFuns.log1pexp(logitp)) + logconstant = -(log1p(n) + n * StatsFuns.log1pexp(logitp)) return new{T,typeof(logconstant)}(n, logitp, logconstant) end end @@ -134,13 +134,13 @@ P(X = k) = \\begin{cases} ``` where `K = length(c) + 1`. """ -struct OrderedLogistic{T1, T2<:AbstractVector} <: DiscreteUnivariateDistribution +struct OrderedLogistic{T1,T2<:AbstractVector} <: DiscreteUnivariateDistribution η::T1 cutpoints::T2 function OrderedLogistic{T1,T2}(η::T1, cutpoints::T2) where {T1,T2} issorted(cutpoints) || error("cutpoints are not sorted") - return new{typeof(η), typeof(cutpoints)}(η, cutpoints) + return new{typeof(η),typeof(cutpoints)}(η, cutpoints) end end @@ -193,10 +193,10 @@ function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int) logp = if k == 1 -StatsFuns.log1pexp(η - cutpoints[k]) elseif k < K - tmp = StatsFuns.log1pexp(cutpoints[k-1] - η) + tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η) -tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η)) else - -StatsFuns.log1pexp(cutpoints[k-1] - η) + -StatsFuns.log1pexp(cutpoints[k - 1] - η) end end return logp @@ -221,7 +221,7 @@ struct LogPoisson{T<:Real,S} <: DiscreteUnivariateDistribution logλ::T λ::S - function LogPoisson{T}(logλ::T) where T + function LogPoisson{T}(logλ::T) where {T} λ = exp(logλ) return new{T,typeof(λ)}(logλ, λ) end diff --git a/src/variational/VariationalInference.jl b/src/variational/VariationalInference.jl index d601ae406..189d3f700 100644 --- a/src/variational/VariationalInference.jl +++ b/src/variational/VariationalInference.jl @@ -12,16 +12,9 @@ using Random: Random import AdvancedVI import Bijectors - # Reexports using AdvancedVI: vi, ADVI, ELBO, elbo, TruncatedADAGrad, DecayedADAGrad -export - vi, - ADVI, - ELBO, - elbo, - TruncatedADAGrad, - DecayedADAGrad +export vi, ADVI, ELBO, elbo, TruncatedADAGrad, DecayedADAGrad """ make_logjoint(model::Model; weight = 1.0) @@ -31,17 +24,10 @@ use `DynamicPPL.MiniBatch` context to run the `Model` with a weight `num_total_o ## Notes - For sake of efficiency, the returned function is closes over an instance of `VarInfo`. This means that you *might* run into some weird behaviour if you call this method sequentially using different types; if that's the case, just generate a new one for each type using `make_logjoint`. """ -function make_logjoint(model::DynamicPPL.Model; weight = 1.0) +function make_logjoint(model::DynamicPPL.Model; weight=1.0) # setup - ctx = DynamicPPL.MiniBatchContext( - DynamicPPL.DefaultContext(), - weight - ) - f = DynamicPPL.LogDensityFunction( - model, - DynamicPPL.VarInfo(model), - ctx - ) + ctx = DynamicPPL.MiniBatchContext(DynamicPPL.DefaultContext(), weight) + f = DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx) return Base.Fix1(LogDensityProblems.logdensity, f) end @@ -52,10 +38,10 @@ function (elbo::ELBO)( q, model::DynamicPPL.Model, num_samples; - weight = 1.0, - kwargs... + weight=1.0, + kwargs..., ) - return elbo(rng, alg, q, make_logjoint(model; weight = weight), num_samples; kwargs...) + return elbo(rng, alg, q, make_logjoint(model; weight=weight), num_samples; kwargs...) end # VI algorithms diff --git a/src/variational/advi.jl b/src/variational/advi.jl index cf2d4034a..ec3e6552e 100644 --- a/src/variational/advi.jl +++ b/src/variational/advi.jl @@ -14,7 +14,6 @@ function wrap_in_vec_reshape(f, in_size) return reshape_outer ∘ f ∘ reshape_inner end - """ bijector(model::Model[, sym2ranges = Val(false)]) @@ -22,26 +21,26 @@ Returns a `Stacked <: Bijector` which maps from the support of the posterior to denoting the dimensionality of the latent variables. """ function Bijectors.bijector( - model::DynamicPPL.Model, - ::Val{sym2ranges} = Val(false); - varinfo = DynamicPPL.VarInfo(model) + model::DynamicPPL.Model, ::Val{sym2ranges}=Val(false); varinfo=DynamicPPL.VarInfo(model) ) where {sym2ranges} - num_params = sum([size(varinfo.metadata[sym].vals, 1) - for sym ∈ keys(varinfo.metadata)]) + num_params = sum([ + size(varinfo.metadata[sym].vals, 1) for sym in keys(varinfo.metadata) + ]) - dists = vcat([varinfo.metadata[sym].dists for sym ∈ keys(varinfo.metadata)]...) + dists = vcat([varinfo.metadata[sym].dists for sym in keys(varinfo.metadata)]...) - num_ranges = sum([length(varinfo.metadata[sym].ranges) - for sym ∈ keys(varinfo.metadata)]) + num_ranges = sum([ + length(varinfo.metadata[sym].ranges) for sym in keys(varinfo.metadata) + ]) ranges = Vector{UnitRange{Int}}(undef, num_ranges) idx = 0 range_idx = 1 # ranges might be discontinuous => values are vectors of ranges rather than just ranges - sym_lookup = Dict{Symbol, Vector{UnitRange{Int}}}() - for sym ∈ keys(varinfo.metadata) + sym_lookup = Dict{Symbol,Vector{UnitRange{Int}}}() + for sym in keys(varinfo.metadata) sym_lookup[sym] = Vector{UnitRange{Int}}() - for r ∈ varinfo.metadata[sym].ranges + for r in varinfo.metadata[sym].ranges ranges[range_idx] = idx .+ r push!(sym_lookup[sym], ranges[range_idx]) range_idx += 1 @@ -117,27 +116,24 @@ function AdvancedVI.update( end function AdvancedVI.vi( - model::DynamicPPL.Model, - alg::AdvancedVI.ADVI; - optimizer = AdvancedVI.TruncatedADAGrad(), + model::DynamicPPL.Model, alg::AdvancedVI.ADVI; optimizer=AdvancedVI.TruncatedADAGrad() ) q = meanfield(model) - return AdvancedVI.vi(model, alg, q; optimizer = optimizer) + return AdvancedVI.vi(model, alg, q; optimizer=optimizer) end - function AdvancedVI.vi( model::DynamicPPL.Model, alg::AdvancedVI.ADVI, q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal}; - optimizer = AdvancedVI.TruncatedADAGrad(), + optimizer=AdvancedVI.TruncatedADAGrad(), ) # Initial parameters for mean-field approx μ, σs = StatsBase.params(q) θ = vcat(μ, StatsFuns.invsoftplus.(σs)) # Optimize - AdvancedVI.optimize!(elbo, alg, q, make_logjoint(model), θ; optimizer = optimizer) + AdvancedVI.optimize!(elbo, alg, q, make_logjoint(model), θ; optimizer=optimizer) # Return updated `Distribution` return AdvancedVI.update(q, θ) diff --git a/test/Aqua.jl b/test/Aqua.jl new file mode 100644 index 000000000..0b536770b --- /dev/null +++ b/test/Aqua.jl @@ -0,0 +1,10 @@ +module AquaTests + +using Aqua: Aqua +using Turing + +# TODO(mhauru) We skip testing for method ambiguities because it catches a lot of problems +# in dependencies. Would like to check it for just Turing.jl itself though. +Aqua.test_all(Turing; ambiguities=false) + +end diff --git a/test/Project.toml b/test/Project.toml index ccaf715cb..07b7f6ee4 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -3,6 +3,7 @@ AbstractMCMC = "80f14c24-f653-4e6a-9b94-39d6b0f70001" AdvancedMH = "5b7e9947-ddc0-4b3f-9b55-0d8042f74170" AdvancedPS = "576499cb-2369-40b2-a588-c64705576edc" AdvancedVI = "b5ca4192-6429-45e5-a2d9-87aec30a685c" +Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Clustering = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" DistributionsAD = "ced4e74d-a319-5a8a-b0ac-84af2272839c" @@ -19,6 +20,8 @@ MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d" NamedArrays = "86f7a689-2022-50b4-a561-43c23ac3c673" Optim = "429524aa-4258-5aef-a3af-852621145aeb" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBBO = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" +OptimizationNLopt = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" PDMats = "90014a1f-27ba-587c-ab20-58faa44d9150" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" @@ -38,6 +41,7 @@ AbstractMCMC = "5" AdvancedMH = "0.6, 0.7, 0.8" AdvancedPS = "0.6.0" AdvancedVI = "0.2" +Aqua = "0.8" Clustering = "0.14, 0.15" Distributions = "0.25" DistributionsAD = "0.6.3" @@ -47,14 +51,19 @@ DynamicPPL = "0.27" FiniteDifferences = "0.10.8, 0.11, 0.12" ForwardDiff = "0.10.12 - 0.10.32, 0.10" HypothesisTests = "0.11" +LinearAlgebra = "1" LogDensityProblems = "2" LogDensityProblemsAD = "1.4" MCMCChains = "5, 6" NamedArrays = "0.9.4, 0.10" Optim = "1" -Optimization = "3.5" -OptimizationOptimJL = "0.1" +Optimization = "3" +OptimizationBBO = "0.1, 0.2, 0.3" +OptimizationNLopt = "0.1, 0.2" +OptimizationOptimJL = "0.1, 0.2, 0.3" PDMats = "0.10, 0.11" +Pkg = "1" +Random = "1" ReverseDiff = "1.4.2" SpecialFunctions = "0.10.3, 1, 2" StableRNGs = "1" diff --git a/test/essential/ad.jl b/test/essential/ad.jl index 09655ad54..6583ed911 100644 --- a/test/essential/ad.jl +++ b/test/essential/ad.jl @@ -1,5 +1,61 @@ +module AdTests + +using ..Models: gdemo_default +using Distributions: logpdf +using DynamicPPL: getlogp, getval +using ForwardDiff +using LinearAlgebra +using LogDensityProblems: LogDensityProblems +using LogDensityProblemsAD: LogDensityProblemsAD +using ReverseDiff +using Test: @test, @testset +using Turing +using Turing: SampleFromPrior +using Zygote + +function test_model_ad(model, f, syms::Vector{Symbol}) + # Set up VI. + vi = Turing.VarInfo(model) + + # Collect symbols. + vnms = Vector(undef, length(syms)) + vnvals = Vector{Float64}() + for i in 1:length(syms) + s = syms[i] + vnms[i] = getfield(vi.metadata, s).vns[1] + + vals = getval(vi, vnms[i]) + for i in eachindex(vals) + push!(vnvals, vals[i]) + end + end + + # Compute primal. + x = vec(vnvals) + logp = f(x) + + # Call ForwardDiff's AD directly. + grad_FWAD = sort(ForwardDiff.gradient(f, x)) + + # Compare with `logdensity_and_gradient`. + z = vi[SampleFromPrior()] + for chunksize in (0, 1, 10), standardtag in (true, false, 0, 3) + ℓ = LogDensityProblemsAD.ADgradient( + Turing.AutoForwardDiff(; chunksize=chunksize, tag=standardtag), + Turing.LogDensityFunction( + vi, model, SampleFromPrior(), DynamicPPL.DefaultContext() + ), + ) + l, ∇E = LogDensityProblems.logdensity_and_gradient(ℓ, z) + + # Compare result + @test l ≈ logp + @test sort(∇E) ≈ grad_FWAD atol = 1e-9 + end +end + @testset "ad.jl" begin - @turing_testset "adr" begin + @testset "adr" begin ad_test_f = gdemo_default vi = Turing.VarInfo(ad_test_f) ad_test_f(vi, SampleFromPrior()) @@ -18,7 +74,7 @@ lik_dist = Normal(m, sqrt(s)) lp = logpdf(dist_s, s) + logpdf(Normal(0, sqrt(s)), m) lp += logpdf(lik_dist, 1.5) + logpdf(lik_dist, 2.0) - lp + return lp end # Call ForwardDiff's AD @@ -27,14 +83,20 @@ _x = [_m, _s] grad_FWAD = sort(g(_x)) - ℓ = Turing.LogDensityFunction(vi, ad_test_f, SampleFromPrior(), DynamicPPL.DefaultContext()) + ℓ = Turing.LogDensityFunction( + vi, ad_test_f, SampleFromPrior(), DynamicPPL.DefaultContext() + ) x = map(x -> Float64(x), vi[SampleFromPrior()]) trackerℓ = LogDensityProblemsAD.ADgradient(Turing.AutoTracker(), ℓ) if isdefined(Base, :get_extension) - @test trackerℓ isa Base.get_extension(LogDensityProblemsAD, :LogDensityProblemsADTrackerExt).TrackerGradientLogDensity + @test trackerℓ isa + Base.get_extension( + LogDensityProblemsAD, :LogDensityProblemsADTrackerExt + ).TrackerGradientLogDensity else - @test trackerℓ isa LogDensityProblemsAD.LogDensityProblemsADTrackerExt.TrackerGradientLogDensity + @test trackerℓ isa + LogDensityProblemsAD.LogDensityProblemsADTrackerExt.TrackerGradientLogDensity end @test trackerℓ.ℓ === ℓ ∇E1 = LogDensityProblems.logdensity_and_gradient(trackerℓ, x)[2] @@ -42,22 +104,29 @@ zygoteℓ = LogDensityProblemsAD.ADgradient(Turing.AutoZygote(), ℓ) if isdefined(Base, :get_extension) - @test zygoteℓ isa Base.get_extension(LogDensityProblemsAD, :LogDensityProblemsADZygoteExt).ZygoteGradientLogDensity + @test zygoteℓ isa + Base.get_extension( + LogDensityProblemsAD, :LogDensityProblemsADZygoteExt + ).ZygoteGradientLogDensity else - @test zygoteℓ isa LogDensityProblemsAD.LogDensityProblemsADZygoteExt.ZygoteGradientLogDensity + @test zygoteℓ isa + LogDensityProblemsAD.LogDensityProblemsADZygoteExt.ZygoteGradientLogDensity end @test zygoteℓ.ℓ === ℓ ∇E2 = LogDensityProblems.logdensity_and_gradient(zygoteℓ, x)[2] @test sort(∇E2) ≈ grad_FWAD atol = 1e-9 end - @turing_testset "general AD tests" begin + + @testset "general AD tests" begin # Tests gdemo gradient. function logp1(x::Vector) dist_s = InverseGamma(2, 3) s = x[2] m = x[1] lik_dist = Normal(m, sqrt(s)) - lp = Turing.logpdf_with_trans(dist_s, s, false) + Turing.logpdf_with_trans(Normal(0, sqrt(s)), m, false) + lp = + Turing.logpdf_with_trans(dist_s, s, false) + + Turing.logpdf_with_trans(Normal(0, sqrt(s)), m, false) lp += logpdf(lik_dist, 1.5) + logpdf(lik_dist, 2.0) return lp end @@ -67,7 +136,7 @@ # Test Wishart AD. @model function wishart_ad() v ~ Wishart(7, [1 0.5; 0.5 1]) - v + return v end # Hand-written logp @@ -82,7 +151,7 @@ end @testset "Simplex Tracker, Zygote and ReverseDiff (with and without caching) AD" begin @model function dir() - theta ~ Dirichlet(1 ./ fill(4, 4)) + return theta ~ Dirichlet(1 ./ fill(4, 4)) end sample(dir(), HMC(0.01, 1; adtype=AutoZygote()), 1000) sample(dir(), HMC(0.01, 1; adtype=AutoReverseDiff(false)), 1000) @@ -90,14 +159,14 @@ end @testset "PDMatDistribution AD" begin @model function wishart() - theta ~ Wishart(4, Matrix{Float64}(I, 4, 4)) + return theta ~ Wishart(4, Matrix{Float64}(I, 4, 4)) end sample(wishart(), HMC(0.01, 1; adtype=AutoReverseDiff(false)), 1000) sample(wishart(), HMC(0.01, 1; adtype=AutoZygote()), 1000) @model function invwishart() - theta ~ InverseWishart(4, Matrix{Float64}(I, 4, 4)) + return theta ~ InverseWishart(4, Matrix{Float64}(I, 4, 4)) end sample(invwishart(), HMC(0.01, 1; adtype=AutoReverseDiff(false)), 1000) @@ -108,7 +177,7 @@ params = TV(undef, 2) @. params ~ Normal(0, 1) - x ~ MvNormal(params, I) + return x ~ MvNormal(params, I) end function make_logjoint(model::DynamicPPL.Model, ctx::DynamicPPL.AbstractContext) @@ -125,7 +194,11 @@ if unlinked varinfo_init = DynamicPPL.invlink!!(varinfo_init, spl, model) end - varinfo = last(DynamicPPL.evaluate!!(model, varinfo, DynamicPPL.SamplingContext(spl, ctx))) + varinfo = last( + DynamicPPL.evaluate!!( + model, varinfo, DynamicPPL.SamplingContext(spl, ctx) + ), + ) if unlinked varinfo_init = DynamicPPL.link!!(varinfo_init, spl, model) end @@ -140,7 +213,7 @@ model = tst(data) likelihood = make_logjoint(model, DynamicPPL.LikelihoodContext()) - target(x) = likelihood(x, unlinked=true) + target(x) = likelihood(x; unlinked=true) H_f = ForwardDiff.hessian(target, zeros(2)) H_r = ReverseDiff.hessian(target, zeros(2)) @@ -149,10 +222,9 @@ end @testset "memoization: issue #1393" begin - @model function demo(data) sigma ~ Uniform(0.0, 20.0) - data ~ Normal(0, sigma) + return data ~ Normal(0, sigma) end N = 1000 @@ -162,7 +234,6 @@ chn = sample(demo(data), NUTS(0.65; adtype=AutoReverseDiff(true)), 1000) @test mean(Array(chn[:sigma])) ≈ std(data) atol = 0.5 end - end @testset "ReverseDiff compiled without linking" begin @@ -170,12 +241,18 @@ θ = DynamicPPL.getparams(f) f_rd = LogDensityProblemsAD.ADgradient(Turing.AutoReverseDiff(; compile=false), f) - f_rd_compiled = LogDensityProblemsAD.ADgradient(Turing.AutoReverseDiff(; compile=true), f) + f_rd_compiled = LogDensityProblemsAD.ADgradient( + Turing.AutoReverseDiff(; compile=true), f + ) ℓ, ℓ_grad = LogDensityProblems.logdensity_and_gradient(f_rd, θ) - ℓ_compiled, ℓ_grad_compiled = LogDensityProblems.logdensity_and_gradient(f_rd_compiled, θ) + ℓ_compiled, ℓ_grad_compiled = LogDensityProblems.logdensity_and_gradient( + f_rd_compiled, θ + ) @test ℓ == ℓ_compiled @test ℓ_grad == ℓ_grad_compiled end end + +end diff --git a/test/essential/container.jl b/test/essential/container.jl index 635d684b4..d1e1b21bd 100644 --- a/test/essential/container.jl +++ b/test/essential/container.jl @@ -1,3 +1,11 @@ +module ContainerTests + +using AdvancedPS: AdvancedPS +using Distributions: Bernoulli, Beta, Gamma, Normal +using DynamicPPL: @model, Sampler +using Test: @test, @testset +using Turing + @testset "container.jl" begin @model function test() a ~ Normal(0, 1) @@ -6,45 +14,47 @@ 1 ~ Bernoulli(x / 2) c ~ Beta() 0 ~ Bernoulli(x / 2) - x + return x end - @turing_testset "constructor" begin - vi = DynamicPPL.VarInfo() - sampler = Sampler(PG(10)) - model = test() - trace = AdvancedPS.Trace(model, sampler, vi, AdvancedPS.TracedRNG()) + @testset "constructor" begin + vi = DynamicPPL.VarInfo() + sampler = Sampler(PG(10)) + model = test() + trace = AdvancedPS.Trace(model, sampler, vi, AdvancedPS.TracedRNG()) - # Make sure we link the traces - @test haskey(trace.model.ctask.task.storage, :__trace) + # Make sure we link the traces + @test haskey(trace.model.ctask.task.storage, :__trace) - res = AdvancedPS.advance!(trace, false) - @test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 1 - @test res ≈ -log(2) + res = AdvancedPS.advance!(trace, false) + @test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 1 + @test res ≈ -log(2) - # Catch broken copy, espetially for RNG / VarInfo - newtrace = AdvancedPS.fork(trace) - res2 = AdvancedPS.advance!(trace) - @test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 2 - @test DynamicPPL.get_num_produce(newtrace.model.f.varinfo) == 1 + # Catch broken copy, espetially for RNG / VarInfo + newtrace = AdvancedPS.fork(trace) + res2 = AdvancedPS.advance!(trace) + @test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 2 + @test DynamicPPL.get_num_produce(newtrace.model.f.varinfo) == 1 end - @turing_testset "fork" begin - @model function normal() - a ~ Normal(0, 1) - 3 ~ Normal(a, 2) - b ~ Normal(a, 1) - 1.5 ~ Normal(b, 2) - a, b - end - vi = DynamicPPL.VarInfo() - sampler = Sampler(PG(10)) - model = normal() - - trace = AdvancedPS.Trace(model, sampler, vi, AdvancedPS.TracedRNG()) - - newtrace = AdvancedPS.forkr(trace) - # Catch broken replay mechanism - @test AdvancedPS.advance!(trace) ≈ AdvancedPS.advance!(newtrace) + @testset "fork" begin + @model function normal() + a ~ Normal(0, 1) + 3 ~ Normal(a, 2) + b ~ Normal(a, 1) + 1.5 ~ Normal(b, 2) + return a, b + end + vi = DynamicPPL.VarInfo() + sampler = Sampler(PG(10)) + model = normal() + + trace = AdvancedPS.Trace(model, sampler, vi, AdvancedPS.TracedRNG()) + + newtrace = AdvancedPS.forkr(trace) + # Catch broken replay mechanism + @test AdvancedPS.advance!(trace) ≈ AdvancedPS.advance!(newtrace) end end + +end diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 1fb27e705..c8d42e338 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -1,4 +1,12 @@ -using Test, Random, Turing, DynamicPPL +module ExperimentalGibbsTests + +using ..Models: MoGtest_default, MoGtest_default_z_vector, gdemo +using ..NumericalTests: check_MoGtest_default, check_MoGtest_default_z_vector, check_gdemo, + check_numerical +using DynamicPPL +using Random +using Test +using Turing function check_transition_varnames( transition::Turing.Inference.Transition, @@ -105,7 +113,7 @@ end @testset "gdemo with CSMC & ESS" begin Random.seed!(100) alg = Turing.Experimental.Gibbs(@varname(s) => CSMC(15), @varname(m) => ESS()) - chain = sample(gdemo(1.5, 2.0), alg, 10_000) + chain = sample(gdemo(1.5, 2.0), alg, 10_000; progress=false) check_gdemo(chain) end @@ -161,7 +169,7 @@ end end # Sample! - chain = sample(MoGtest_default, alg, 1000; progress=true) + chain = sample(MoGtest_default, alg, 1000; progress=false) check_MoGtest_default(chain, atol = 0.2) end @@ -183,7 +191,9 @@ end end # Sample! - chain = sample(model, alg, 1000; progress=true) + chain = sample(model, alg, 1000; progress=false) check_MoGtest_default_z_vector(chain, atol = 0.2) end end + +end diff --git a/test/optimisation/OptimInterface.jl b/test/ext/OptimInterface.jl similarity index 56% rename from test/optimisation/OptimInterface.jl rename to test/ext/OptimInterface.jl index ba9e2f78e..817d7a520 100644 --- a/test/optimisation/OptimInterface.jl +++ b/test/ext/OptimInterface.jl @@ -1,36 +1,24 @@ -# Used for testing how well it works with nested contexts. -struct OverrideContext{C,T1,T2} <: DynamicPPL.AbstractContext - context::C - logprior_weight::T1 - loglikelihood_weight::T2 -end -DynamicPPL.NodeTrait(::OverrideContext) = DynamicPPL.IsParent() -DynamicPPL.childcontext(parent::OverrideContext) = parent.context -DynamicPPL.setchildcontext(parent::OverrideContext, child) = OverrideContext( - child, - parent.logprior_weight, - parent.loglikelihood_weight -) - -# Only implement what we need for the models above. -function DynamicPPL.tilde_assume(context::OverrideContext, right, vn, vi) - value, logp, vi = DynamicPPL.tilde_assume(context.context, right, vn, vi) - return value, context.logprior_weight, vi -end -function DynamicPPL.tilde_observe(context::OverrideContext, right, left, vi) - logp, vi = DynamicPPL.tilde_observe(context.context, right, left, vi) - return context.loglikelihood_weight, vi -end - -@numerical_testset "OptimInterface.jl" begin +module OptimInterfaceTests + +using ..Models: gdemo_default +using Distributions.FillArrays: Zeros +using LinearAlgebra: I +using Optim: Optim +using Random: Random +using StatsBase: StatsBase +using StatsBase: coef, coefnames, coeftable, informationmatrix, stderror, vcov +using Test: @test, @testset +using Turing + +@testset "TuringOptimExt" begin @testset "MLE" begin Random.seed!(222) true_value = [0.0625, 1.75] - m1 = optimize(gdemo_default, MLE()) - m2 = optimize(gdemo_default, MLE(), NelderMead()) - m3 = optimize(gdemo_default, MLE(), true_value, LBFGS()) - m4 = optimize(gdemo_default, MLE(), true_value) + m1 = Optim.optimize(gdemo_default, MLE()) + m2 = Optim.optimize(gdemo_default, MLE(), Optim.NelderMead()) + m3 = Optim.optimize(gdemo_default, MLE(), true_value, Optim.LBFGS()) + m4 = Optim.optimize(gdemo_default, MLE(), true_value) @test all(isapprox.(m1.values.array - true_value, 0.0, atol=0.01)) @test all(isapprox.(m2.values.array - true_value, 0.0, atol=0.01)) @@ -42,10 +30,10 @@ end Random.seed!(222) true_value = [49 / 54, 7 / 6] - m1 = optimize(gdemo_default, MAP()) - m2 = optimize(gdemo_default, MAP(), NelderMead()) - m3 = optimize(gdemo_default, MAP(), true_value, LBFGS()) - m4 = optimize(gdemo_default, MAP(), true_value) + m1 = Optim.optimize(gdemo_default, MAP()) + m2 = Optim.optimize(gdemo_default, MAP(), Optim.NelderMead()) + m3 = Optim.optimize(gdemo_default, MAP(), true_value, Optim.LBFGS()) + m4 = Optim.optimize(gdemo_default, MAP(), true_value) @test all(isapprox.(m1.values.array - true_value, 0.0, atol=0.01)) @test all(isapprox.(m2.values.array - true_value, 0.0, atol=0.01)) @@ -55,7 +43,7 @@ end @testset "StatsBase integration" begin Random.seed!(54321) - mle_est = optimize(gdemo_default, MLE()) + mle_est = Optim.optimize(gdemo_default, MLE()) # Calculated based on the two data points in gdemo_default, [1.5, 2.0] true_values = [0.0625, 1.75] @@ -67,7 +55,7 @@ end infomat = [2/(2 * true_values[1]^2) 0.0; 0.0 2/true_values[1]] @test all(isapprox.(infomat - informationmatrix(mle_est), 0.0, atol=0.01)) - vcovmat = [2*true_values[1]^2 / 2 0.0; 0.0 true_values[1]/2] + vcovmat = [2 * true_values[1]^2/2 0.0; 0.0 true_values[1]/2] @test all(isapprox.(vcovmat - vcov(mle_est), 0.0, atol=0.01)) ctable = coeftable(mle_est) @@ -85,40 +73,40 @@ end @testset "Linear regression test" begin @model function regtest(x, y) beta ~ MvNormal(Zeros(2), I) - mu = x*beta - y ~ MvNormal(mu, I) + mu = x * beta + return y ~ MvNormal(mu, I) end - + Random.seed!(987) true_beta = [1.0, -2.2] x = rand(40, 2) - y = x*true_beta - + y = x * true_beta + model = regtest(x, y) - mle = optimize(model, MLE()) - + mle = Optim.optimize(model, MLE()) + vcmat = inv(x'x) vcmat_mle = vcov(mle).array - + @test isapprox(mle.values.array, true_beta) @test isapprox(vcmat, vcmat_mle) end @testset "Dot tilde test" begin @model function dot_gdemo(x) - s ~ InverseGamma(2,3) + s ~ InverseGamma(2, 3) m ~ Normal(0, sqrt(s)) - - (.~)(x, Normal(m, sqrt(s))) + + return (.~)(x, Normal(m, sqrt(s))) end - + model_dot = dot_gdemo([1.5, 2.0]) - mle1 = optimize(gdemo_default, MLE()) - mle2 = optimize(model_dot, MLE()) + mle1 = Optim.optimize(gdemo_default, MLE()) + mle2 = Optim.optimize(model_dot, MLE()) - map1 = optimize(gdemo_default, MAP()) - map2 = optimize(model_dot, MAP()) + map1 = Optim.optimize(gdemo_default, MAP()) + map2 = Optim.optimize(model_dot, MAP()) @test isapprox(mle1.values.array, mle2.values.array) @test isapprox(map1.values.array, map2.values.array) @@ -128,19 +116,19 @@ end @testset "MAP for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS result_true = DynamicPPL.TestUtils.posterior_optima(model) - @testset "$(nameof(typeof(optimizer)))" for optimizer in [LBFGS(), NelderMead()] - result = optimize(model, MAP(), optimizer) + optimizers = [Optim.LBFGS(), Optim.NelderMead()] + @testset "$(nameof(typeof(optimizer)))" for optimizer in optimizers + result = Optim.optimize(model, MAP(), optimizer) vals = result.values for vn in DynamicPPL.TestUtils.varnames(model) for vn_leaf in DynamicPPL.TestUtils.varname_leaves(vn, get(result_true, vn)) - @test get(result_true, vn_leaf) ≈ vals[Symbol(vn_leaf)] atol=0.05 + @test get(result_true, vn_leaf) ≈ vals[Symbol(vn_leaf)] atol = 0.05 end end end end - # Some of the models have one variance parameter per observation, and so # the MLE should have the variances set to 0. Since we're working in # transformed space, this corresponds to `-Inf`, which is of course not achievable. @@ -164,8 +152,9 @@ end result_true = DynamicPPL.TestUtils.likelihood_optima(model) # `NelderMead` seems to struggle with convergence here, so we exclude it. - @testset "$(nameof(typeof(optimizer)))" for optimizer in [LBFGS(),] - result = optimize(model, MLE(), optimizer, Optim.Options(g_tol=1e-3, f_tol=1e-3)) + @testset "$(nameof(typeof(optimizer)))" for optimizer in [Optim.LBFGS()] + options = Optim.Options(; g_tol=1e-3, f_tol=1e-3) + result = Optim.optimize(model, MLE(), optimizer, options) vals = result.values for vn in DynamicPPL.TestUtils.varnames(model) @@ -173,75 +162,31 @@ end if model.f in allowed_incorrect_mle @test isfinite(get(result_true, vn_leaf)) else - @test get(result_true, vn_leaf) ≈ vals[Symbol(vn_leaf)] atol=0.05 + @test get(result_true, vn_leaf) ≈ vals[Symbol(vn_leaf)] atol = 0.05 end end end end end - # Issue: https://discourse.julialang.org/t/two-equivalent-conditioning-syntaxes-giving-different-likelihood-values/100320 - @testset "OptimizationContext" begin - @model function model1(x) - μ ~ Uniform(0, 2) - x ~ LogNormal(μ, 1) - end - - @model function model2() - μ ~ Uniform(0, 2) - x ~ LogNormal(μ, 1) - end - - x = 1.0 - w = [1.0] - - @testset "With ConditionContext" begin - m1 = model1(x) - m2 = model2() | (x = x,) - ctx = Turing.OptimizationContext(DynamicPPL.LikelihoodContext()) - @test Turing.OptimLogDensity(m1, ctx)(w) == Turing.OptimLogDensity(m2, ctx)(w) - end - - @testset "With prefixes" begin - function prefix_μ(model) - return DynamicPPL.contextualize(model, DynamicPPL.PrefixContext{:inner}(model.context)) - end - m1 = prefix_μ(model1(x)) - m2 = prefix_μ(model2() | (var"inner.x" = x,)) - ctx = Turing.OptimizationContext(DynamicPPL.LikelihoodContext()) - @test Turing.OptimLogDensity(m1, ctx)(w) == Turing.OptimLogDensity(m2, ctx)(w) - end - - @testset "Weighted" begin - function override(model) - return DynamicPPL.contextualize( - model, - OverrideContext(model.context, 100, 1) - ) - end - m1 = override(model1(x)) - m2 = override(model2() | (x = x,)) - ctx = Turing.OptimizationContext(DynamicPPL.DefaultContext()) - @test Turing.OptimLogDensity(m1, ctx)(w) == Turing.OptimLogDensity(m2, ctx)(w) - end - - @testset "with :=" begin - @model function demo_track() - x ~ Normal() - y := 100 + x - end - model = demo_track() - result = optimize(model, MAP()) - @test result.values[:x] ≈ 0 atol=1e-1 - @test result.values[:y] ≈ 100 atol=1e-1 - end - end - # Issue: https://discourse.julialang.org/t/turing-mixture-models-with-dirichlet-weightings/112910 @testset "with different linked dimensionality" begin @model demo_dirichlet() = x ~ Dirichlet(2 * ones(3)) model = demo_dirichlet() - result = optimize(model, MAP()) - @test result.values ≈ mode(Dirichlet(2 * ones(3))) atol=0.2 + result = Optim.optimize(model, MAP()) + @test result.values ≈ mode(Dirichlet(2 * ones(3))) atol = 0.2 + end + + @testset "with :=" begin + @model function demo_track() + x ~ Normal() + return y := 100 + x + end + model = demo_track() + result = Optim.optimize(model, MAP()) + @test result.values[:x] ≈ 0 atol = 1e-1 + @test result.values[:y] ≈ 100 atol = 1e-1 end end + +end diff --git a/test/ext/Optimisation.jl b/test/ext/Optimisation.jl deleted file mode 100644 index 324a8a7de..000000000 --- a/test/ext/Optimisation.jl +++ /dev/null @@ -1,123 +0,0 @@ -@testset "ext/Optimisation.jl" begin - @testset "gdemo" begin - @testset "MLE" begin - Random.seed!(222) - true_value = [0.0625, 1.75] - - f1 = optim_function(gdemo_default, MLE();constrained=false) - p1 = OptimizationProblem(f1.func, f1.init(true_value)) - - p2 = optim_objective(gdemo_default, MLE();constrained=false) - - p3 = optim_problem(gdemo_default, MLE();constrained=false, init_theta=true_value) - - m1 = solve(p1, NelderMead()) - m2 = solve(p1, LBFGS()) - m3 = solve(p1, BFGS()) - m4 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), p2.init(true_value), NelderMead()) - m5 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), p2.init(true_value), LBFGS()) - m6 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), p2.init(true_value), BFGS()) - m7 = solve(p3.prob, NelderMead()) - m8 = solve(p3.prob, LBFGS()) - m9 = solve(p3.prob, BFGS()) - - @test all(isapprox.(f1.transform(m1.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(f1.transform(m2.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(f1.transform(m3.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m4.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m5.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m6.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m7.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m8.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m9.minimizer) - true_value, 0.0, atol=0.01)) - end - - @testset "MAP" begin - Random.seed!(222) - true_value = [49 / 54, 7 / 6] - - f1 = optim_function(gdemo_default, MAP();constrained=false) - p1 = OptimizationProblem(f1.func, f1.init(true_value)) - - p2 = optim_objective(gdemo_default, MAP();constrained=false) - - p3 = optim_problem(gdemo_default, MAP();constrained=false,init_theta=true_value) - - m1 = solve(p1, NelderMead()) - m2 = solve(p1, LBFGS()) - m3 = solve(p1, BFGS()) - m4 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), p2.init(true_value), NelderMead()) - m5 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), p2.init(true_value), LBFGS()) - m6 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), p2.init(true_value), BFGS()) - m7 = solve(p3.prob, NelderMead()) - m8 = solve(p3.prob, LBFGS()) - m9 = solve(p3.prob, BFGS()) - - @test all(isapprox.(f1.transform(m1.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(f1.transform(m2.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(f1.transform(m3.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m4.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m5.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m6.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m7.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m8.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m9.minimizer) - true_value, 0.0, atol=0.01)) - end - - @testset "MLE constrained" begin - Random.seed!(222) - true_value = [0.0625, 1.75] - lb = [0.0, 0.0] - ub = [2.0, 2.0] - - f1 = optim_function(gdemo_default, MLE();constrained=true) - p1 = OptimizationProblem(f1.func, f1.init(true_value); lb=lb, ub=ub) - - p2 = optim_objective(gdemo_default, MLE();constrained=true) - - p3 = optim_problem(gdemo_default, MLE();constrained=true, init_theta=true_value, lb=lb, ub=ub) - - m1 = solve(p1, Fminbox(LBFGS())) - m2 = solve(p1, Fminbox(BFGS())) - m3 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), lb, ub, p2.init(true_value), Fminbox(LBFGS())) - m4 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), lb, ub, p2.init(true_value), Fminbox(BFGS())) - m5 = solve(p3.prob, Fminbox(LBFGS())) - m6 = solve(p3.prob, Fminbox(BFGS())) - - @test all(isapprox.(f1.transform(m1.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(f1.transform(m2.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m3.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m4.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m5.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m6.minimizer) - true_value, 0.0, atol=0.01)) - end - - @testset "MAP constrained" begin - Random.seed!(222) - true_value = [49 / 54, 7 / 6] - lb = [0.0, 0.0] - ub = [2.0, 2.0] - - f1 = optim_function(gdemo_default, MAP();constrained=true) - p1 = OptimizationProblem(f1.func, f1.init(true_value); lb=lb, ub=ub) - - p2 = optim_objective(gdemo_default, MAP();constrained=true) - - p3 = optim_problem(gdemo_default, MAP();constrained=true, init_theta=true_value, lb=lb, ub=ub) - - m1 = solve(p1, Fminbox(LBFGS())) - m2 = solve(p1, Fminbox(BFGS())) - m3 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), lb, ub, p2.init(true_value), Fminbox(LBFGS())) - m4 = optimize(p2.obj, (G,z) -> p2.obj(nothing,G,z), lb, ub, p2.init(true_value), Fminbox(BFGS())) - m5 = solve(p3.prob, Fminbox(LBFGS())) - m6 = solve(p3.prob, Fminbox(BFGS())) - - @test all(isapprox.(f1.transform(m1.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(f1.transform(m2.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m3.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p2.transform(m4.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m5.minimizer) - true_value, 0.0, atol=0.01)) - @test all(isapprox.(p3.transform(m6.minimizer) - true_value, 0.0, atol=0.01)) - end - end -end diff --git a/test/ext/dynamichmc.jl b/test/ext/dynamichmc.jl index 61027a196..aa52093bc 100644 --- a/test/ext/dynamichmc.jl +++ b/test/ext/dynamichmc.jl @@ -1,5 +1,16 @@ +module DynamicHMCTests + +using ..Models: gdemo_default +using ..NumericalTests: check_gdemo +using Test: @test, @testset +using Distributions: sample +using DynamicHMC: DynamicHMC +using DynamicPPL: DynamicPPL +using DynamicPPL: Sampler +using Random: Random +using Turing + @testset "TuringDynamicHMCExt" begin - import DynamicHMC Random.seed!(100) @test DynamicPPL.alg_str(Sampler(externalsampler(DynamicHMC.NUTS()))) == "DynamicNUTS" @@ -8,3 +19,5 @@ chn = sample(gdemo_default, spl, 10_000) check_gdemo(chn) end + +end diff --git a/test/mcmc/Inference.jl b/test/mcmc/Inference.jl index a8cfd3e5d..c11fcca08 100644 --- a/test/mcmc/Inference.jl +++ b/test/mcmc/Inference.jl @@ -1,5 +1,28 @@ +module InferenceTests + +using ..Models: gdemo_d, gdemo_default +using ..NumericalTests: check_gdemo, check_numerical +using Distributions: Bernoulli, Beta, InverseGamma, Normal +using Distributions: sample +import DynamicPPL +using DynamicPPL: Sampler, getlogp +import Enzyme +import ForwardDiff +using LinearAlgebra: I +import MCMCChains +import Random +import ReverseDiff +using Test: @test, @test_throws, @testset +using Turing + +# Disable Enzyme warnings +Enzyme.API.typeWarning!(false) + +# Enable runtime activity (workaround) +Enzyme.API.runtimeActivity!(true) + # @testset "Testing inference.jl with $adbackend" for adbackend in (AutoForwardDiff(; chunksize=0), AutoReverseDiff(false)) -@testset "Testing inference.jl with $adbackend" for adbackend in (AutoEnzyme(),) +@testset "Testing inference.jl with $adbackend" for adbackend in (AutoEnzyme(),) # Only test threading if 1.3+. if VERSION > v"1.2" @testset "threaded sampling" begin @@ -548,3 +571,5 @@ @test all(xs[:, 2] .=== [missing, 2, 4]) end end + +end diff --git a/test/mcmc/abstractmcmc.jl b/test/mcmc/abstractmcmc.jl index 378b22fba..10d202da3 100644 --- a/test/mcmc/abstractmcmc.jl +++ b/test/mcmc/abstractmcmc.jl @@ -1,3 +1,18 @@ +module AbstractMCMCTests + +using AdvancedMH: AdvancedMH +using Distributions: sample +using Distributions.FillArrays: Zeros +using DynamicPPL: DynamicPPL +using ForwardDiff: ForwardDiff +using LinearAlgebra: I +using LogDensityProblems: LogDensityProblems +using LogDensityProblemsAD: LogDensityProblemsAD +using Random: Random +using ReverseDiff: ReverseDiff +using StableRNGs: StableRNG +using Test: @test, @test_throws, @testset +using Turing using Turing.Inference: AdvancedHMC function initialize_nuts(model::Turing.Model) @@ -6,7 +21,9 @@ function initialize_nuts(model::Turing.Model) f = LogDensityProblemsAD.ADgradient(DynamicPPL.LogDensityFunction(model)) # Link the varinfo. - f = Turing.Inference.setvarinfo(f, DynamicPPL.link!!(Turing.Inference.getvarinfo(f), model)) + f = Turing.Inference.setvarinfo( + f, DynamicPPL.link!!(Turing.Inference.getvarinfo(f), model) + ) # Choose parameter dimensionality and initial parameter value D = LogDensityProblems.dimension(f) @@ -24,16 +41,18 @@ function initialize_nuts(model::Turing.Model) # - multinomial sampling scheme, # - generalised No-U-Turn criteria, and # - windowed adaption for step-size and diagonal mass matrix - proposal = AdvancedHMC.HMCKernel(AdvancedHMC.Trajectory{AdvancedHMC.MultinomialTS}(integrator, AdvancedHMC.GeneralisedNoUTurn())) + proposal = AdvancedHMC.HMCKernel( + AdvancedHMC.Trajectory{AdvancedHMC.MultinomialTS}( + integrator, AdvancedHMC.GeneralisedNoUTurn() + ), + ) adaptor = AdvancedHMC.StanHMCAdaptor( - AdvancedHMC.MassMatrixAdaptor(metric), - AdvancedHMC.StepSizeAdaptor(0.65, integrator) + AdvancedHMC.MassMatrixAdaptor(metric), AdvancedHMC.StepSizeAdaptor(0.65, integrator) ) return AdvancedHMC.HMCSampler(proposal, metric, adaptor) end - function initialize_mh_rw(model) f = DynamicPPL.LogDensityFunction(model) d = LogDensityProblems.dimension(f) @@ -42,17 +61,22 @@ end # TODO: Should this go somewhere else? # Convert a model into a `Distribution` to allow usage as a proposal in AdvancedMH.jl. -struct ModelDistribution{M<:DynamicPPL.Model,V<:DynamicPPL.VarInfo} <: ContinuousMultivariateDistribution +struct ModelDistribution{M<:DynamicPPL.Model,V<:DynamicPPL.VarInfo} <: + ContinuousMultivariateDistribution model::M varinfo::V end -ModelDistribution(model::DynamicPPL.Model) = ModelDistribution(model, DynamicPPL.VarInfo(model)) +function ModelDistribution(model::DynamicPPL.Model) + return ModelDistribution(model, DynamicPPL.VarInfo(model)) +end Base.length(d::ModelDistribution) = length(d.varinfo[:]) function Distributions._logpdf(d::ModelDistribution, x::AbstractVector) return logprior(d.model, DynamicPPL.unflatten(d.varinfo, x)) end -function Distributions._rand!(rng::Random.AbstractRNG, d::ModelDistribution, x::AbstractVector{<:Real}) +function Distributions._rand!( + rng::Random.AbstractRNG, d::ModelDistribution, x::AbstractVector{<:Real} +) model = d.model varinfo = deepcopy(d.varinfo) for vn in keys(varinfo) @@ -64,10 +88,14 @@ function Distributions._rand!(rng::Random.AbstractRNG, d::ModelDistribution, x:: end function initialize_mh_with_prior_proposal(model) - return AdvancedMH.MetropolisHastings(AdvancedMH.StaticProposal(ModelDistribution(model))) + return AdvancedMH.MetropolisHastings( + AdvancedMH.StaticProposal(ModelDistribution(model)) + ) end -function test_initial_params(model, sampler, initial_params=DynamicPPL.VarInfo(model)[:]; kwargs...) +function test_initial_params( + model, sampler, initial_params=DynamicPPL.VarInfo(model)[:]; kwargs... +) # Execute the transition with two different RNGs and check that the resulting # parameter values are the same. rng1 = Random.MersenneTwister(42) @@ -83,14 +111,16 @@ function test_initial_params(model, sampler, initial_params=DynamicPPL.VarInfo(m end @testset "External samplers" begin - @turing_testset "AdvancedHMC.jl" begin + @testset "AdvancedHMC.jl" begin # Try a few different AD backends. @testset "adtype=$adtype" for adtype in [AutoForwardDiff(), AutoReverseDiff()] @testset "$(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS # Need some functionality to initialize the sampler. # TODO: Remove this once the constructors in the respective packages become "lazy". - sampler = initialize_nuts(model); - sampler_ext = DynamicPPL.Sampler(externalsampler(sampler; adtype, unconstrained=true), model) + sampler = initialize_nuts(model) + sampler_ext = DynamicPPL.Sampler( + externalsampler(sampler; adtype, unconstrained=true), model + ) # FIXME: Once https://github.com/TuringLang/AdvancedHMC.jl/pull/366 goes through, uncomment. # @testset "initial_params" begin # test_initial_params(model, sampler_ext; n_adapts=0) @@ -104,9 +134,13 @@ end ) @testset "inference" begin - if adtype isa AutoReverseDiff && model.f === DynamicPPL.TestUtils.demo_assume_index_observe && VERSION < v"1.8" + if adtype isa AutoReverseDiff && + model.f === DynamicPPL.TestUtils.demo_assume_index_observe && + VERSION < v"1.8" # Ref: https://github.com/TuringLang/DynamicPPL.jl/issues/612 - @test_throws UndefRefError sample(model, sampler_ext, 5_000; sample_kwargs...) + @test_throws UndefRefError sample( + model, sampler_ext, 5_000; sample_kwargs... + ) else DynamicPPL.TestUtils.test_sampler( [model], @@ -125,23 +159,33 @@ end rng = Random.default_rng() model = DynamicPPL.TestUtils.DEMO_MODELS[1] sampler = initialize_nuts(model) - sampler_ext = externalsampler(sampler; unconstrained=true, adtype=AutoForwardDiff()) + sampler_ext = externalsampler( + sampler; unconstrained=true, adtype=AutoForwardDiff() + ) # Initial step. - state = last(AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler_ext); n_adapts=0)) + state = last( + AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler_ext); n_adapts=0) + ) @test state.logdensity isa LogDensityProblemsAD.ADGradientWrapper # Subsequent step. - state = last(AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler_ext), state; n_adapts=0)) + state = last( + AbstractMCMC.step( + rng, model, DynamicPPL.Sampler(sampler_ext), state; n_adapts=0 + ), + ) @test state.logdensity isa LogDensityProblemsAD.ADGradientWrapper end end - @turing_testset "AdvancedMH.jl" begin + @testset "AdvancedMH.jl" begin @testset "RWMH" begin @testset "$(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS # Need some functionality to initialize the sampler. # TODO: Remove this once the constructors in the respective packages become "lazy". - sampler = initialize_mh_rw(model); - sampler_ext = DynamicPPL.Sampler(externalsampler(sampler; unconstrained=true), model) + sampler = initialize_mh_rw(model) + sampler_ext = DynamicPPL.Sampler( + externalsampler(sampler; unconstrained=true), model + ) @testset "initial_params" begin test_initial_params(model, sampler_ext) end @@ -153,7 +197,7 @@ end discard_initial=1_000, thinning=10, rtol=0.2, - sampler_name="AdvancedMH" + sampler_name="AdvancedMH", ) end end @@ -181,3 +225,5 @@ end # end end end + +end diff --git a/test/mcmc/emcee.jl b/test/mcmc/emcee.jl index 929506f95..08ea16d2a 100644 --- a/test/mcmc/emcee.jl +++ b/test/mcmc/emcee.jl @@ -1,10 +1,21 @@ +module EmceeTests + +using ..Models: gdemo_default +using ..NumericalTests: check_gdemo +using Distributions: sample +using DynamicPPL: DynamicPPL +using DynamicPPL: Sampler +using Random: Random +using Test: @test, @test_throws, @testset +using Turing + @testset "emcee.jl" begin @testset "gdemo" begin Random.seed!(9876) n_samples = 1000 n_walkers = 250 - + spl = Emcee(n_walkers, 2.0) @test DynamicPPL.alg_str(Sampler(spl, gdemo_default)) == "Emcee" @@ -41,3 +52,5 @@ @test chain[:m] == fill(1.0, 1, nwalkers) end end + +end diff --git a/test/mcmc/ess.jl b/test/mcmc/ess.jl index a11068656..0a1c23a9e 100644 --- a/test/mcmc/ess.jl +++ b/test/mcmc/ess.jl @@ -1,18 +1,29 @@ +module ESSTests + +using ..Models: MoGtest, MoGtest_default, gdemo, gdemo_default +using ..NumericalTests: check_MoGtest_default, check_numerical +using Distributions: Normal, sample +using DynamicPPL: DynamicPPL +using DynamicPPL: Sampler +using Random: Random +using Test: @test, @testset +using Turing + @testset "ESS" begin @model function demo(x) m ~ Normal() - x ~ Normal(m, 0.5) + return x ~ Normal(m, 0.5) end demo_default = demo(1.0) @model function demodot(x) m = Vector{Float64}(undef, 2) @. m ~ Normal() - x ~ Normal(m[2], 0.5) + return x ~ Normal(m[2], 0.5) end demodot_default = demodot(1.0) - @turing_testset "ESS constructor" begin + @testset "ESS constructor" begin Random.seed!(0) N = 500 @@ -31,29 +42,25 @@ c5 = sample(gdemo_default, s3, N) end - @numerical_testset "ESS inference" begin + @testset "ESS inference" begin Random.seed!(1) chain = sample(demo_default, ESS(), 5_000) - check_numerical(chain, [:m], [0.8], atol = 0.1) + check_numerical(chain, [:m], [0.8]; atol=0.1) Random.seed!(1) chain = sample(demodot_default, ESS(), 5_000) - check_numerical(chain, ["m[1]", "m[2]"], [0.0, 0.8], atol = 0.1) + check_numerical(chain, ["m[1]", "m[2]"], [0.0, 0.8]; atol=0.1) Random.seed!(100) - alg = Gibbs( - CSMC(15, :s), - ESS(:m)) + alg = Gibbs(CSMC(15, :s), ESS(:m)) chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_numerical(chain, [:s, :m], [49 / 24, 7 / 6], atol = 0.1) + check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.1) # MoGtest Random.seed!(125) - alg = Gibbs( - CSMC(15, :z1, :z2, :z3, :z4), - ESS(:mu1), ESS(:mu2)) + alg = Gibbs(CSMC(15, :z1, :z2, :z3, :z4), ESS(:mu1), ESS(:mu2)) chain = sample(MoGtest_default, alg, 6000) - check_MoGtest_default(chain, atol = 0.1) + check_MoGtest_default(chain; atol=0.1) # Different "equivalent" models. # NOTE: Because `ESS` only supports "single" variables with @@ -61,13 +68,17 @@ # on the non-Gaussian variables in `DEMO_MODELS`. models_conditioned = map(DynamicPPL.TestUtils.DEMO_MODELS) do model # Condition on the non-Gaussian random variables. - model | (s = DynamicPPL.TestUtils.posterior_mean(model).s,) + model | (s=DynamicPPL.TestUtils.posterior_mean(model).s,) end DynamicPPL.TestUtils.test_sampler( - models_conditioned, DynamicPPL.Sampler(ESS()), 10_000; + models_conditioned, + DynamicPPL.Sampler(ESS()), + 10_000; # Filter out the varnames we've conditioned on. - varnames_filter=vn -> DynamicPPL.getsym(vn) != :s + varnames_filter=vn -> DynamicPPL.getsym(vn) != :s, ) end end + +end diff --git a/test/mcmc/gibbs.jl b/test/mcmc/gibbs.jl index ef2299dca..4d2053c14 100644 --- a/test/mcmc/gibbs.jl +++ b/test/mcmc/gibbs.jl @@ -1,5 +1,21 @@ -@testset "Testing gibbs.jl with $adbackend" for adbackend in (AutoForwardDiff(; chunksize=0), AutoReverseDiff(false)) - @turing_testset "gibbs constructor" begin +module GibbsTests + +using ..Models: MoGtest_default, gdemo, gdemo_default +using ..NumericalTests: check_MoGtest_default, check_gdemo, check_numerical +using Distributions: InverseGamma, Normal +using Distributions: sample +using ForwardDiff: ForwardDiff +using Random: Random +using ReverseDiff: ReverseDiff +using Test: @test, @testset +using Turing +using Turing: Inference +using Turing.RandomMeasures: ChineseRestaurantProcess, DirichletProcess + +@testset "Testing gibbs.jl with $adbackend" for adbackend in ( + AutoForwardDiff(; chunksize=0), AutoReverseDiff(false) +) + @testset "gibbs constructor" begin N = 500 s1 = Gibbs(HMC(0.1, 5, :s, :m; adtype=adbackend)) s2 = Gibbs(PG(10, :s, :m)) @@ -30,30 +46,32 @@ # it should return a Chains object @test sample(gdemo_default, g, N) isa MCMCChains.Chains end - @numerical_testset "gibbs inference" begin + @testset "gibbs inference" begin Random.seed!(100) alg = Gibbs(CSMC(15, :s), HMC(0.2, 4, :m; adtype=adbackend)) chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.15) + check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.15) Random.seed!(100) alg = Gibbs(MH(:s), HMC(0.2, 4, :m; adtype=adbackend)) chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.1) + check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.1) alg = Gibbs(CSMC(15, :s), ESS(:m)) chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.1) + check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.1) alg = CSMC(15) chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.1) + check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.1) Random.seed!(200) - gibbs = Gibbs(PG(15, :z1, :z2, :z3, :z4), HMC(0.15, 3, :mu1, :mu2; adtype=adbackend)) + gibbs = Gibbs( + PG(15, :z1, :z2, :z3, :z4), HMC(0.15, 3, :mu1, :mu2; adtype=adbackend) + ) chain = sample(MoGtest_default, gibbs, 10_000) - check_MoGtest_default(chain, atol=0.15) + check_MoGtest_default(chain; atol=0.15) Random.seed!(200) for alg in [ @@ -65,7 +83,7 @@ end end - @turing_testset "transitions" begin + @testset "transitions" begin @model function gdemo_copy() s ~ InverseGamma(2, 3) m ~ Normal(0, sqrt(s)) @@ -81,44 +99,45 @@ ::Turing.Sampler{<:Gibbs}, state, ::Type{MCMCChains.Chains}; - kwargs... + kwargs..., ) - samples isa Vector{<:Inference.Transition} || - error("incorrect transitions") - return + samples isa Vector{<:Inference.Transition} || error("incorrect transitions") + return nothing end function callback(rng, model, sampler, sample, state, i; kwargs...) sample isa Inference.Transition || error("incorrect sample") - return + return nothing end alg = Gibbs(MH(:s), HMC(0.2, 4, :m; adtype=adbackend)) - sample(model, alg, 100; callback = callback) + sample(model, alg, 100; callback=callback) end - @turing_testset "dynamic model" begin + @testset "dynamic model" begin @model function imm(y, alpha, ::Type{M}=Vector{Float64}) where {M} N = length(y) rpm = DirichletProcess(alpha) - + z = zeros(Int, N) cluster_counts = zeros(Int, N) fill!(cluster_counts, 0) - + for i in 1:N z[i] ~ ChineseRestaurantProcess(rpm, cluster_counts) cluster_counts[z[i]] += 1 end - + Kmax = findlast(!iszero, cluster_counts) m = M(undef, Kmax) - for k = 1:Kmax + for k in 1:Kmax m[k] ~ Normal(1.0, 1.0) end end - model = imm(randn(100), 1.0); + model = imm(randn(100), 1.0) # https://github.com/TuringLang/Turing.jl/issues/1725 # sample(model, Gibbs(MH(:z), HMC(0.01, 4, :m)), 100); sample(model, Gibbs(PG(10, :z), HMC(0.01, 4, :m; adtype=adbackend)), 100) end end + +end diff --git a/test/mcmc/gibbs_conditional.jl b/test/mcmc/gibbs_conditional.jl index d7752da64..3ba2fdbed 100644 --- a/test/mcmc/gibbs_conditional.jl +++ b/test/mcmc/gibbs_conditional.jl @@ -1,7 +1,26 @@ -@turing_testset "Testing gibbs conditionals.jl with $adbackend" for adbackend in (AutoForwardDiff(; chunksize=0), AutoReverseDiff(false)) - Random.seed!(1000); rng = StableRNG(123) - - @turing_testset "gdemo" begin +module GibbsConditionalTests + +using ..Models: gdemo, gdemo_default +using ..NumericalTests: check_gdemo, check_numerical +using Clustering: Clustering +using Distributions: Categorical, InverseGamma, Normal, sample +using ForwardDiff: ForwardDiff +using LinearAlgebra: Diagonal, I +using Random: Random +using ReverseDiff: ReverseDiff +using StableRNGs: StableRNG +using StatsBase: counts +using StatsFuns: StatsFuns +using Test: @test, @testset +using Turing + +@testset "Testing gibbs conditionals.jl with $adbackend" for adbackend in ( + AutoForwardDiff(; chunksize=0), AutoReverseDiff(false) +) + Random.seed!(1000) + rng = StableRNG(123) + + @testset "gdemo" begin # We consider the model # ```math # s ~ InverseGamma(2, 3) @@ -20,7 +39,7 @@ # ```math # m | s, x ~ Normal(m_n, sqrt(s / (N + 1))) # ``` - cond_m = let N=N, m_n=m_n + cond_m = let N = N, m_n = m_n c -> Normal(m_n, sqrt(c.s / (N + 1))) end @@ -29,29 +48,31 @@ # s | m, x ~ InverseGamma(2 + (N + 1) / 2, 3 + (m^2 + ∑ (xᵢ - m)^2) / 2) = # InverseGamma(2 + (N + 1) / 2, 3 + m^2 / 2 + N / 2 * (x_var + (x_mean - m)^2)) # ``` - cond_s = let N=N, x_mean=x_mean, x_var=x_var - c -> InverseGamma(2 + (N + 1) / 2, 3 + c.m^2 / 2 + N / 2 * (x_var + (x_mean - c.m)^2)) + cond_s = let N = N, x_mean = x_mean, x_var = x_var + c -> InverseGamma( + 2 + (N + 1) / 2, 3 + c.m^2 / 2 + N / 2 * (x_var + (x_mean - c.m)^2) + ) end # Three Gibbs samplers: # one for each variable fixed to the posterior mean - s_posterior_mean = 49/24 + s_posterior_mean = 49 / 24 sampler1 = Gibbs( GibbsConditional(:m, cond_m), GibbsConditional(:s, _ -> Normal(s_posterior_mean, 0)), ) chain = sample(rng, gdemo_default, sampler1, 10_000) - cond_m_mean = mean(cond_m((s = s_posterior_mean,))) + cond_m_mean = mean(cond_m((s=s_posterior_mean,))) check_numerical(chain, [:m, :s], [cond_m_mean, s_posterior_mean]) @test all(==(s_posterior_mean), chain[:s][2:end]) - m_posterior_mean = 7/6 + m_posterior_mean = 7 / 6 sampler2 = Gibbs( GibbsConditional(:m, _ -> Normal(m_posterior_mean, 0)), GibbsConditional(:s, cond_s), ) chain = sample(rng, gdemo_default, sampler2, 10_000) - cond_s_mean = mean(cond_s((m = m_posterior_mean,))) + cond_s_mean = mean(cond_s((m=m_posterior_mean,))) check_numerical(chain, [:m, :s], [m_posterior_mean, cond_s_mean]) @test all(==(m_posterior_mean), chain[:m][2:end]) @@ -61,8 +82,9 @@ check_gdemo(chain) end - @turing_testset "GMM" begin - Random.seed!(1000); rng = StableRNG(123) + @testset "GMM" begin + Random.seed!(1000) + rng = StableRNG(123) # We consider the model # ```math # μₖ ~ Normal(m, σ_μ), k = 1, …, K, @@ -71,7 +93,7 @@ # ``` # with ``K = 2`` clusters, ``N = 20`` observations, and the following parameters: K = 2 # number of clusters - π = fill(1/K, K) # uniform cluster weights + π = fill(1 / K, K) # uniform cluster weights m = 0.5 # prior mean of μₖ σ²_μ = 4.0 # prior variance of μₖ σ²_x = 0.01 # observation variance @@ -92,7 +114,7 @@ # Conditional distribution ``z | μ, x`` # see http://www.cs.columbia.edu/~blei/fogm/2015F/notes/mixtures-and-gibbs.pdf - cond_z = let x=x_data, log_π=log.(π), σ_x=sqrt(σ²_x) + cond_z = let x = x_data, log_π = log.(π), σ_x = sqrt(σ²_x) c -> begin dists = map(x) do xi logp = log_π .+ logpdf.(Normal.(c.μ, σ_x), xi) @@ -104,7 +126,7 @@ # Conditional distribution ``μ | z, x`` # see http://www.cs.columbia.edu/~blei/fogm/2015F/notes/mixtures-and-gibbs.pdf - cond_μ = let K=K, x_data=x_data, inv_σ²_μ=inv(σ²_μ), inv_σ²_x=inv(σ²_x) + cond_μ = let K = K, x_data = x_data, inv_σ²_μ = inv(σ²_μ), inv_σ²_x = inv(σ²_x) c -> begin # Convert cluster assignments to one-hot encodings z_onehot = c.z .== (1:K)' @@ -120,10 +142,10 @@ end end - estimate(chain, var) = dropdims(mean(Array(group(chain, var)), dims=1), dims=1) + estimate(chain, var) = dropdims(mean(Array(group(chain, var)); dims=1); dims=1) function estimatez(chain, var, range) z = Int.(Array(group(chain, var))) - return map(i -> findmax(counts(z[:,i], range))[2], 1:size(z,2)) + return map(i -> findmax(counts(z[:, i], range))[2], 1:size(z, 2)) end lμ_data, uμ_data = extrema(μ_data) @@ -145,3 +167,5 @@ end end end + +end diff --git a/test/mcmc/hmc.jl b/test/mcmc/hmc.jl index a9b85b841..7b4c2551b 100644 --- a/test/mcmc/hmc.jl +++ b/test/mcmc/hmc.jl @@ -1,8 +1,33 @@ +module HMCTests + +using ..Models: gdemo_default +#using ..Models: gdemo +using ..NumericalTests: check_gdemo, check_numerical +using Distributions: Bernoulli, Beta, Categorical, Dirichlet, Normal, Wishart, sample +import DynamicPPL +using DynamicPPL: Sampler +import Enzyme +import ForwardDiff +using HypothesisTests: ApproximateTwoSampleKSTest, pvalue +import ReverseDiff +using LinearAlgebra: I, dot, vec +import Random +using StableRNGs: StableRNG +using StatsFuns: logistic +using Test: @test, @test_logs, @testset +using Turing + +# Disable Enzyme warnings +Enzyme.API.typeWarning!(false) + +# Enable runtime activity (workaround) +Enzyme.API.runtimeActivity!(true) + # @testset "Testing hmc.jl with $adbackend" for adbackend in (AutoForwardDiff(; chunksize=0), AutoReverseDiff(false)) @testset "Testing hmc.jl with $adbackend" for adbackend in (AutoEnzyme(),) # Set a seed rng = StableRNG(123) - @numerical_testset "constrained bounded" begin + @testset "constrained bounded" begin obs = [0,1,0,1,1,1,1,1,1,1] @model function constrained_test(obs) @@ -21,7 +46,7 @@ check_numerical(chain, [:p], [10/14], atol=0.1) end - @numerical_testset "constrained simplex" begin + @testset "constrained simplex" begin obs12 = [1,2,1,2,2,2,2,2,2,2] @model function constrained_simplex_test(obs12) @@ -41,12 +66,12 @@ check_numerical(chain, ["ps[1]", "ps[2]"], [5/16, 11/16], atol=0.015) end - @numerical_testset "hmc reverse diff" begin + @testset "hmc reverse diff" begin alg = HMC(0.1, 10; adtype=adbackend) res = sample(rng, gdemo_default, alg, 4000) check_gdemo(res, rtol=0.1) end - @turing_testset "matrix support" begin + @testset "matrix support" begin @model function hmcmatrixsup() v ~ Wishart(7, [1 0.5; 0.5 1]) end @@ -61,7 +86,7 @@ @test maximum(abs, mean(vs) - (7 * [1 0.5; 0.5 1])) <= 0.5 end - @turing_testset "multivariate support" begin + @testset "multivariate support" begin # Define NN flow function nn(x, b1, w11, w12, w13, bo, wo) h = tanh.([w11 w12 w13]' * x .+ b1) @@ -107,7 +132,7 @@ chain = sample(rng, bnn(ts), HMC(0.1, 5; adtype=adbackend), 10) end - @numerical_testset "hmcda inference" begin + @testset "hmcda inference" begin alg1 = HMCDA(500, 0.8, 0.015; adtype=adbackend) # alg2 = Gibbs(HMCDA(200, 0.8, 0.35, :m; adtype=adbackend), HMC(0.25, 3, :s; adtype=adbackend)) @@ -123,7 +148,7 @@ # @test mean(res2[:m]) ≈ 7/6 atol=0.2 end - @numerical_testset "hmcda+gibbs inference" begin + @testset "hmcda+gibbs inference" begin rng = StableRNG(123) Random.seed!(12345) # particle samplers do not support user-provided `rng` yet alg3 = Gibbs(PG(20, :s), HMCDA(500, 0.8, 0.25, :m; init_ϵ=0.05, adtype=adbackend)) @@ -132,7 +157,7 @@ check_gdemo(res3) end - @turing_testset "hmcda constructor" begin + @testset "hmcda constructor" begin alg = HMCDA(0.8, 0.75; adtype=adbackend) println(alg) sampler = Sampler(alg, gdemo_default) @@ -151,12 +176,12 @@ @test isa(alg, HMCDA) @test isa(sampler, Sampler{<:Turing.Hamiltonian}) end - @numerical_testset "nuts inference" begin + @testset "nuts inference" begin alg = NUTS(1000, 0.8; adtype=adbackend) res = sample(rng, gdemo_default, alg, 6000) check_gdemo(res) end - @turing_testset "nuts constructor" begin + @testset "nuts constructor" begin alg = NUTS(200, 0.65; adtype=adbackend) sampler = Sampler(alg, gdemo_default) @test DynamicPPL.alg_str(sampler) == "NUTS" @@ -169,7 +194,7 @@ sampler = Sampler(alg, gdemo_default) @test DynamicPPL.alg_str(sampler) == "NUTS" end - @turing_testset "check discard" begin + @testset "check discard" begin alg = NUTS(100, 0.8; adtype=adbackend) c1 = sample(rng, gdemo_default, alg, 500, discard_adapt=true) @@ -178,7 +203,7 @@ @test size(c1, 1) == 500 @test size(c2, 1) == 500 end - @turing_testset "AHMC resize" begin + @testset "AHMC resize" begin alg1 = Gibbs(PG(10, :m), NUTS(100, 0.65, :s; adtype=adbackend)) alg2 = Gibbs(PG(10, :m), HMC(0.1, 3, :s; adtype=adbackend)) alg3 = Gibbs(PG(10, :m), HMCDA(100, 0.65, 0.3, :s; adtype=adbackend)) @@ -187,7 +212,7 @@ @test sample(rng, gdemo_default, alg3, 300) isa Chains end - @turing_testset "Regression tests" begin + @testset "Regression tests" begin # https://github.com/TuringLang/DynamicPPL.jl/issues/27 @model function mwe1(::Type{T}=Float64) where {T<:Real} m = Matrix{T}(undef, 2, 3) @@ -210,7 +235,7 @@ end # issue #1923 - @turing_testset "reproducibility" begin + @testset "reproducibility" begin alg = NUTS(1000, 0.8; adtype=adbackend) res1 = sample(StableRNG(123), gdemo_default, alg, 1000) res2 = sample(StableRNG(123), gdemo_default, alg, 1000) @@ -218,7 +243,7 @@ @test Array(res1) == Array(res2) == Array(res3) end - @turing_testset "prior" begin + @testset "prior" begin @model function demo_hmc_prior() # NOTE: Used to use `InverseGamma(2, 3)` but this has infinite variance # which means that it's _very_ difficult to find a good tolerance in the test below:) @@ -231,7 +256,7 @@ check_numerical(chain, [:s, :m], [mean(truncated(Normal(3, 1); lower=0)), 0], atol=0.2) end - @turing_testset "warning for difficult init params" begin + @testset "warning for difficult init params" begin attempt = 0 @model function demo_warn_initial_params() x ~ Normal() @@ -251,7 +276,7 @@ # Disable on Julia <1.8 due to https://github.com/TuringLang/Turing.jl/pull/2197. # TODO: Remove this block once https://github.com/JuliaFolds2/BangBang.jl/pull/22 has been released. if VERSION ≥ v"1.8" - @turing_testset "(partially) issue: #2095" begin + @testset "(partially) issue: #2095" begin @model function vector_of_dirichlet(::Type{TV}=Vector{Float64}) where {TV} xs = Vector{TV}(undef, 2) xs[1] ~ Dirichlet(ones(5)) @@ -263,7 +288,7 @@ end end - @turing_testset "issue: #2195" begin + @testset "issue: #2195" begin @model function buggy_model() lb ~ Uniform(0, 1) ub ~ Uniform(1.5, 2) @@ -291,7 +316,7 @@ initial_params=[0.5, 1.75, 1.0] ) chain_prior = sample(model, Prior(), num_samples) - + # Extract the `x` like this because running `generated_quantities` was how # the issue was discovered, hence we also want to make sure that it works. results = generated_quantities(model, chain) @@ -305,3 +330,5 @@ @test pvalue(ApproximateTwoSampleKSTest(vec(results), vec(results_prior))) > 0.01 end end + +end diff --git a/test/mcmc/is.jl b/test/mcmc/is.jl index e0bf80356..bd3186cd9 100644 --- a/test/mcmc/is.jl +++ b/test/mcmc/is.jl @@ -1,4 +1,13 @@ -@turing_testset "is.jl" begin +module ISTests + +using Distributions: Normal, sample +using DynamicPPL: logpdf +using Random: Random +using StatsFuns: logsumexp +using Test: @test, @testset +using Turing + +@testset "is.jl" begin function reference(n) as = Vector{Float64}(undef, n) bs = Vector{Float64}(undef, n) @@ -9,22 +18,22 @@ end logevidence = logsumexp(logps) - log(n) - return (as = as, bs = bs, logps = logps, logevidence = logevidence) + return (as=as, bs=bs, logps=logps, logevidence=logevidence) end function reference() - x = rand(Normal(4,5)) - y = rand(Normal(x,1)) - loglik = logpdf(Normal(x,2), 3) + logpdf(Normal(y,2), 1.5) + x = rand(Normal(4, 5)) + y = rand(Normal(x, 1)) + loglik = logpdf(Normal(x, 2), 3) + logpdf(Normal(y, 2), 1.5) return x, y, loglik end @model function normal() - a ~ Normal(4,5) - 3 ~ Normal(a,2) - b ~ Normal(a,1) - 1.5 ~ Normal(b,2) - a, b + a ~ Normal(4, 5) + 3 ~ Normal(a, 2) + b ~ Normal(a, 1) + 1.5 ~ Normal(b, 2) + return a, b end alg = IS() @@ -46,7 +55,7 @@ @test chain.logevidence == ref.logevidence end - @turing_testset "logevidence" begin + @testset "logevidence" begin Random.seed!(100) @model function test() @@ -56,12 +65,14 @@ 1 ~ Bernoulli(x / 2) c ~ Beta() 0 ~ Bernoulli(x / 2) - x + return x end chains = sample(test(), IS(), 10000) @test all(isone, chains[:x]) - @test chains.logevidence ≈ - 2 * log(2) + @test chains.logevidence ≈ -2 * log(2) end end + +end diff --git a/test/mcmc/mh.jl b/test/mcmc/mh.jl index e902aba47..0e3cc91f6 100644 --- a/test/mcmc/mh.jl +++ b/test/mcmc/mh.jl @@ -1,10 +1,27 @@ +module MHTests + +using AdvancedMH: AdvancedMH +using Distributions: + Bernoulli, Dirichlet, Exponential, InverseGamma, LogNormal, MvNormal, Normal, sample +using DynamicPPL: DynamicPPL +using DynamicPPL: Sampler +using LinearAlgebra: I +using Random: Random +using StableRNGs: StableRNG +using Test: @test, @testset +using Turing +using Turing.Inference: Inference + +using ..Models: gdemo_default, MoGtest_default +using ..NumericalTests: check_MoGtest_default, check_gdemo, check_numerical + +GKernel(var) = (x) -> Normal(x, sqrt.(var)) + @testset "mh.jl" begin - @turing_testset "mh constructor" begin + @testset "mh constructor" begin Random.seed!(10) N = 500 - s1 = MH( - (:s, InverseGamma(2,3)), - (:m, GKernel(3.0))) + s1 = MH((:s, InverseGamma(2, 3)), (:m, GKernel(3.0))) s2 = MH(:s, :m) s3 = MH() for s in (s1, s2, s3) @@ -26,38 +43,35 @@ # s6 = externalsampler(MH(gdemo_default, proposal_type=AdvancedMH.StaticProposal)) # c6 = sample(gdemo_default, s6, N) end - @numerical_testset "mh inference" begin + @testset "mh inference" begin Random.seed!(125) alg = MH() chain = sample(gdemo_default, alg, 10_000) - check_gdemo(chain, atol = 0.1) + check_gdemo(chain; atol=0.1) Random.seed!(125) # MH with Gaussian proposal - alg = MH( - (:s, InverseGamma(2,3)), - (:m, GKernel(1.0))) + alg = MH((:s, InverseGamma(2, 3)), (:m, GKernel(1.0))) chain = sample(gdemo_default, alg, 10_000) - check_gdemo(chain, atol = 0.1) + check_gdemo(chain; atol=0.1) Random.seed!(125) # MH within Gibbs alg = Gibbs(MH(:m), MH(:s)) chain = sample(gdemo_default, alg, 10_000) - check_gdemo(chain, atol = 0.1) + check_gdemo(chain; atol=0.1) Random.seed!(125) # MoGtest gibbs = Gibbs( - CSMC(15, :z1, :z2, :z3, :z4), - MH((:mu1,GKernel(1)), (:mu2,GKernel(1))) + CSMC(15, :z1, :z2, :z3, :z4), MH((:mu1, GKernel(1)), (:mu2, GKernel(1))) ) chain = sample(MoGtest_default, gibbs, 500) - check_MoGtest_default(chain, atol = 0.15) + check_MoGtest_default(chain; atol=0.15) end # Test MH shape passing. - @turing_testset "shape" begin + @testset "shape" begin @model function M(mu, sigma, observable) z ~ MvNormal(mu, sigma) @@ -72,7 +86,7 @@ -1.5 ~ Normal(m[1], m[2]) 1.5 ~ Normal(m[1], s) - 2.0 ~ Normal(m[1], s) + return 2.0 ~ Normal(m[1], s) end model = M(zeros(2), I, 1) @@ -81,7 +95,8 @@ dt, vt = Inference.dist_val_tuple(sampler, Turing.VarInfo(model)) @test dt[:z] isa AdvancedMH.StaticProposal{false,<:MvNormal} - @test dt[:m] isa AdvancedMH.StaticProposal{false,Vector{ContinuousUnivariateDistribution}} + @test dt[:m] isa + AdvancedMH.StaticProposal{false,Vector{ContinuousUnivariateDistribution}} @test dt[:m].proposal[1] isa Normal && dt[:m].proposal[2] isa InverseGamma @test dt[:s] isa AdvancedMH.StaticProposal{false,<:InverseGamma} @@ -94,9 +109,9 @@ @test chain isa MCMCChains.Chains end - @turing_testset "proposal matrix" begin + @testset "proposal matrix" begin Random.seed!(100) - + mat = [1.0 -0.05; -0.05 1.0] prop1 = mat # Matrix only constructor @@ -117,48 +132,43 @@ check_gdemo(chain2) end - @turing_testset "gibbs MH proposal matrix" begin + @testset "gibbs MH proposal matrix" begin # https://github.com/TuringLang/Turing.jl/issues/1556 # generate data x = rand(Normal(5, 10), 20) y = rand(LogNormal(-3, 2), 20) - + # Turing model @model function twomeans(x, y) # Set Priors μ ~ MvNormal(zeros(2), 9 * I) σ ~ filldist(Exponential(1), 2) - + # Distributions of supplied data x .~ Normal(μ[1], σ[1]) - y .~ LogNormal(μ[2], σ[2]) - + return y .~ LogNormal(μ[2], σ[2]) end mod = twomeans(x, y) - + # generate covariance matrix for RWMH # with small-valued VC matrix to check if we only see very small steps - vc_μ = convert(Array, 1e-4*I(2)) - vc_σ = convert(Array, 1e-4*I(2)) + vc_μ = convert(Array, 1e-4 * I(2)) + vc_σ = convert(Array, 1e-4 * I(2)) - alg = Gibbs( - MH((:μ, vc_μ)), - MH((:σ, vc_σ)), - ) + alg = Gibbs(MH((:μ, vc_μ)), MH((:σ, vc_σ))) chn = sample( mod, alg, - 3_000 # draws + 3_000, # draws ) - - + chn2 = sample(mod, MH(), 3_000) # Test that the small variance version is actually smaller. - v1 = var(diff(Array(chn["μ[1]"]), dims=1)) - v2 = var(diff(Array(chn2["μ[1]"]), dims=1)) + v1 = var(diff(Array(chn["μ[1]"]); dims=1)) + v2 = var(diff(Array(chn2["μ[1]"]); dims=1)) # FIXME: Do this properly. It sometimes fails. # @test v1 < v2 @@ -167,7 +177,7 @@ # Disable on Julia <1.8 due to https://github.com/TuringLang/Turing.jl/pull/2197. # TODO: Remove this block once https://github.com/JuliaFolds2/BangBang.jl/pull/22 has been released. if VERSION ≥ v"1.8" - @turing_testset "vector of multivariate distributions" begin + @testset "vector of multivariate distributions" begin @model function test(k) T = Vector{Vector{Float64}}(undef, k) for i in 1:k @@ -189,7 +199,7 @@ end end - @turing_testset "MH link/invlink" begin + @testset "MH link/invlink" begin vi_base = DynamicPPL.VarInfo(gdemo_default) # Don't link when no proposals are given since we're using priors @@ -222,32 +232,40 @@ vi = deepcopy(vi_base) alg = MH( :m => AdvancedMH.StaticProposal(Normal()), - :s => AdvancedMH.RandomWalkProposal(Normal()) + :s => AdvancedMH.RandomWalkProposal(Normal()), ) spl = DynamicPPL.Sampler(alg) vi = Turing.Inference.maybe_link!!(vi, spl, alg.proposals, gdemo_default) @test !DynamicPPL.islinked(vi, spl) end - @turing_testset "prior" begin + @testset "prior" begin # HACK: MH can be so bad for this prior model for some reason that it's difficult to # find a non-trivial `atol` where the tests will pass for all seeds. Hence we fix it :/ rng = StableRNG(10) alg = MH() - gdemo_default_prior = DynamicPPL.contextualize(gdemo_default, DynamicPPL.PriorContext()) + gdemo_default_prior = DynamicPPL.contextualize( + gdemo_default, DynamicPPL.PriorContext() + ) burnin = 10_000 n = 10_000 - chain = sample(rng, gdemo_default_prior, alg, n; discard_initial = burnin, thinning=10) - check_numerical(chain, [:s, :m], [mean(InverseGamma(2, 3)), 0], atol=0.3) + chain = sample( + rng, gdemo_default_prior, alg, n; discard_initial=burnin, thinning=10 + ) + check_numerical(chain, [:s, :m], [mean(InverseGamma(2, 3)), 0]; atol=0.3) end - @turing_testset "`filldist` proposal (issue #2180)" begin + @testset "`filldist` proposal (issue #2180)" begin @model demo_filldist_issue2180() = x ~ MvNormal(zeros(3), I) chain = sample( - demo_filldist_issue2180(), - MH(AdvancedMH.RandomWalkProposal(filldist(Normal(), 3))), - 10_000 + demo_filldist_issue2180(), + MH(AdvancedMH.RandomWalkProposal(filldist(Normal(), 3))), + 10_000, + ) + check_numerical( + chain, [Symbol("x[1]"), Symbol("x[2]"), Symbol("x[3]")], [0, 0, 0]; atol=0.2 ) - check_numerical(chain, [Symbol("x[1]"), Symbol("x[2]"), Symbol("x[3]")], [0, 0, 0], atol=0.2) end end + +end diff --git a/test/mcmc/particle_mcmc.jl b/test/mcmc/particle_mcmc.jl index de6e65e40..2e3744fef 100644 --- a/test/mcmc/particle_mcmc.jl +++ b/test/mcmc/particle_mcmc.jl @@ -1,5 +1,16 @@ +module ParticleMCMCTests + +using ..Models: gdemo_default +#using ..Models: MoGtest, MoGtest_default +using AdvancedPS: ResampleWithESSThreshold, resample_systematic, resample_multinomial +using Distributions: Bernoulli, Beta, Gamma, Normal, sample +using DynamicPPL: getspace +using Random: Random +using Test: @test, @test_throws, @testset +using Turing + @testset "SMC" begin - @turing_testset "constructor" begin + @testset "constructor" begin s = SMC() @test s.resampler == ResampleWithESSThreshold() @test getspace(s) === () @@ -45,32 +56,32 @@ @test getspace(s) === (:x,) end - @turing_testset "models" begin + @testset "models" begin @model function normal() - a ~ Normal(4,5) - 3 ~ Normal(a,2) - b ~ Normal(a,1) - 1.5 ~ Normal(b,2) - a, b + a ~ Normal(4, 5) + 3 ~ Normal(a, 2) + b ~ Normal(a, 1) + 1.5 ~ Normal(b, 2) + return a, b end - tested = sample(normal(), SMC(), 100); + tested = sample(normal(), SMC(), 100) # failing test @model function fail_smc() - a ~ Normal(4,5) - 3 ~ Normal(a,2) - b ~ Normal(a,1) + a ~ Normal(4, 5) + 3 ~ Normal(a, 2) + b ~ Normal(a, 1) if a >= 4.0 - 1.5 ~ Normal(b,2) + 1.5 ~ Normal(b, 2) end - a, b + return a, b end @test_throws ErrorException sample(fail_smc(), SMC(), 100) end - @turing_testset "logevidence" begin + @testset "logevidence" begin Random.seed!(100) @model function test() @@ -80,7 +91,7 @@ 1 ~ Bernoulli(x / 2) c ~ Beta() 0 ~ Bernoulli(x / 2) - x + return x end chains_smc = sample(test(), SMC(), 100) @@ -91,7 +102,7 @@ end @testset "PG" begin - @turing_testset "constructor" begin + @testset "constructor" begin s = PG(10) @test s.nparticles == 10 @test s.resampler == ResampleWithESSThreshold() @@ -148,7 +159,7 @@ end @test getspace(s) === (:x,) end - @turing_testset "logevidence" begin + @testset "logevidence" begin Random.seed!(100) @model function test() @@ -158,7 +169,7 @@ end 1 ~ Bernoulli(x / 2) c ~ Beta() 0 ~ Bernoulli(x / 2) - x + return x end chains_pg = sample(test(), PG(10), 100) @@ -168,21 +179,21 @@ end end # https://github.com/TuringLang/Turing.jl/issues/1598 - @turing_testset "reference particle" begin + @testset "reference particle" begin c = sample(gdemo_default, PG(1), 1_000) @test length(unique(c[:m])) == 1 @test length(unique(c[:s])) == 1 end # https://github.com/TuringLang/Turing.jl/issues/2007 - @turing_testset "keyword arguments not supported" begin - @model kwarg_demo(; x = 2) = return x + @testset "keyword arguments not supported" begin + @model kwarg_demo(; x=2) = return x @test_throws ErrorException sample(kwarg_demo(), PG(1), 10) end end # @testset "pmmh.jl" begin -# @turing_testset "pmmh constructor" begin +# @testset "pmmh constructor" begin # N = 2000 # s1 = PMMH(N, SMC(10, :s), MH(1,(:m, s -> Normal(s, sqrt(1))))) # s2 = PMMH(N, SMC(10, :s), MH(1, :m)) @@ -218,7 +229,7 @@ end # end # @testset "ipmcmc.jl" begin -# @turing_testset "ipmcmc constructor" begin +# @testset "ipmcmc constructor" begin # Random.seed!(125) # # N = 50 @@ -239,3 +250,4 @@ end # end # end +end diff --git a/test/mcmc/sghmc.jl b/test/mcmc/sghmc.jl index 16b6508ee..5f40f67e4 100644 --- a/test/mcmc/sghmc.jl +++ b/test/mcmc/sghmc.jl @@ -1,6 +1,25 @@ +module SGHMCTests + +using ..Models: gdemo_default +using ..NumericalTests: check_gdemo +using Distributions: sample +import Enzyme +import ForwardDiff +using LinearAlgebra: dot +import ReverseDiff +using StableRNGs: StableRNG +using Test: @test, @testset +using Turing + +# Disable Enzyme warnings +Enzyme.API.typeWarning!(false) + +# Enable runtime activity (workaround) +Enzyme.API.runtimeActivity!(true) + # @testset "Testing sghmc.jl with $adbackend" for adbackend in (AutoForwardDiff(; chunksize=0), AutoReverseDiff(false)) @testset "Testing sghmc.jl with $adbackend" for adbackend in (AutoEnzyme(),) - @turing_testset "sghmc constructor" begin + @testset "sghmc constructor" begin alg = SGHMC(; learning_rate=0.01, momentum_decay=0.1, adtype=adbackend) @test alg isa SGHMC sampler = Turing.Sampler(alg) @@ -16,7 +35,7 @@ sampler = Turing.Sampler(alg) @test sampler isa Turing.Sampler{<:SGHMC} end - @numerical_testset "sghmc inference" begin + @testset "sghmc inference" begin rng = StableRNG(123) alg = SGHMC(; learning_rate=0.02, momentum_decay=0.5, adtype=adbackend) @@ -27,7 +46,7 @@ end # @testset "Testing sgld.jl with $adbackend" for adbackend in (AutoForwardDiff(; chunksize=0), AutoReverseDiff(false)) @testset "Testing sgld.jl with $adbackend" for adbackend in (AutoEnzyme(),) - @turing_testset "sgld constructor" begin + @testset "sgld constructor" begin alg = SGLD(; stepsize=PolynomialStepsize(0.25), adtype=adbackend) @test alg isa SGLD sampler = Turing.Sampler(alg) @@ -43,7 +62,7 @@ end sampler = Turing.Sampler(alg) @test sampler isa Turing.Sampler{<:SGLD} end - @numerical_testset "sgld inference" begin + @testset "sgld inference" begin rng = StableRNG(1) chain = sample(rng, gdemo_default, SGLD(; stepsize = PolynomialStepsize(0.5)), 20_000) @@ -57,3 +76,5 @@ end @test m_weighted ≈ 7/6 atol=0.2 end end + +end diff --git a/test/mcmc/utilities.jl b/test/mcmc/utilities.jl index 3a3517f60..06e92fbf2 100644 --- a/test/mcmc/utilities.jl +++ b/test/mcmc/utilities.jl @@ -1,34 +1,46 @@ +module MCMCUtilitiesTests + +using ..Models: gdemo_default +using Distributions: Normal, sample, truncated +using LinearAlgebra: I, vec +using Random: Random +using Random: MersenneTwister +using Test: @test, @testset +using Turing + @testset "predict" begin Random.seed!(100) - @model function linear_reg(x, y, σ = 0.1) + @model function linear_reg(x, y, σ=0.1) β ~ Normal(0, 1) - for i ∈ eachindex(y) + for i in eachindex(y) y[i] ~ Normal(β * x[i], σ) end end - @model function linear_reg_vec(x, y, σ = 0.1) + @model function linear_reg_vec(x, y, σ=0.1) β ~ Normal(0, 1) - y ~ MvNormal(β .* x, σ^2 * I) + return y ~ MvNormal(β .* x, σ^2 * I) end f(x) = 2 * x + 0.1 * randn() Δ = 0.1 - xs_train = 0:Δ:10; ys_train = f.(xs_train); - xs_test = [10 + Δ, 10 + 2 * Δ]; ys_test = f.(xs_test); + xs_train = 0:Δ:10 + ys_train = f.(xs_train) + xs_test = [10 + Δ, 10 + 2 * Δ] + ys_test = f.(xs_test) # Infer - m_lin_reg = linear_reg(xs_train, ys_train); - chain_lin_reg = sample(m_lin_reg, NUTS(100, 0.65), 200); + m_lin_reg = linear_reg(xs_train, ys_train) + chain_lin_reg = sample(m_lin_reg, NUTS(100, 0.65), 200) # Predict on two last indices - m_lin_reg_test = linear_reg(xs_test, fill(missing, length(ys_test))); + m_lin_reg_test = linear_reg(xs_test, fill(missing, length(ys_test))) predictions = Turing.Inference.predict(m_lin_reg_test, chain_lin_reg) - ys_pred = vec(mean(Array(group(predictions, :y)); dims = 1)) + ys_pred = vec(mean(Array(group(predictions, :y)); dims=1)) @test sum(abs2, ys_test - ys_pred) ≤ 0.1 @@ -42,91 +54,88 @@ @test all(Array(predictions1) .== Array(predictions2)) # Predict on two last indices for vectorized - m_lin_reg_test = linear_reg_vec(xs_test, missing); + m_lin_reg_test = linear_reg_vec(xs_test, missing) predictions_vec = Turing.Inference.predict(m_lin_reg_test, chain_lin_reg) - ys_pred_vec = vec(mean(Array(group(predictions_vec, :y)); dims = 1)) + ys_pred_vec = vec(mean(Array(group(predictions_vec, :y)); dims=1)) @test sum(abs2, ys_test - ys_pred_vec) ≤ 0.1 # Multiple chains - chain_lin_reg = sample(m_lin_reg, NUTS(100, 0.65), MCMCThreads(), 200, 2); - m_lin_reg_test = linear_reg(xs_test, fill(missing, length(ys_test))); + chain_lin_reg = sample(m_lin_reg, NUTS(100, 0.65), MCMCThreads(), 200, 2) + m_lin_reg_test = linear_reg(xs_test, fill(missing, length(ys_test))) predictions = Turing.Inference.predict(m_lin_reg_test, chain_lin_reg) @test size(chain_lin_reg, 3) == size(predictions, 3) for chain_idx in MCMCChains.chains(chain_lin_reg) - ys_pred = vec(mean(Array(group(predictions[:, :, chain_idx], :y)); dims = 1)) + ys_pred = vec(mean(Array(group(predictions[:, :, chain_idx], :y)); dims=1)) @test sum(abs2, ys_test - ys_pred) ≤ 0.1 end # Predict on two last indices for vectorized - m_lin_reg_test = linear_reg_vec(xs_test, missing); + m_lin_reg_test = linear_reg_vec(xs_test, missing) predictions_vec = Turing.Inference.predict(m_lin_reg_test, chain_lin_reg) for chain_idx in MCMCChains.chains(chain_lin_reg) - ys_pred_vec = vec(mean( - Array(group(predictions_vec[:, :, chain_idx], :y)); - dims = 1 - )) + ys_pred_vec = vec(mean(Array(group(predictions_vec[:, :, chain_idx], :y)); dims=1)) @test sum(abs2, ys_test - ys_pred_vec) ≤ 0.1 end # https://github.com/TuringLang/Turing.jl/issues/1352 @model function simple_linear1(x, y) - intercept ~ Normal(0,1) + intercept ~ Normal(0, 1) coef ~ MvNormal(zeros(2), I) - coef = reshape(coef, 1, size(x,1)) + coef = reshape(coef, 1, size(x, 1)) - mu = intercept .+ coef * x |> vec - error ~ truncated(Normal(0,1), 0, Inf) - y ~ MvNormal(mu, error^2 * I) - end; + mu = vec(intercept .+ coef * x) + error ~ truncated(Normal(0, 1), 0, Inf) + return y ~ MvNormal(mu, error^2 * I) + end @model function simple_linear2(x, y) - intercept ~ Normal(0,1) - coef ~ filldist(Normal(0,1), 2) - coef = reshape(coef, 1, size(x,1)) + intercept ~ Normal(0, 1) + coef ~ filldist(Normal(0, 1), 2) + coef = reshape(coef, 1, size(x, 1)) - mu = intercept .+ coef * x |> vec - error ~ truncated(Normal(0,1), 0, Inf) - y ~ MvNormal(mu, error^2 * I) - end; + mu = vec(intercept .+ coef * x) + error ~ truncated(Normal(0, 1), 0, Inf) + return y ~ MvNormal(mu, error^2 * I) + end @model function simple_linear3(x, y) - intercept ~ Normal(0,1) + intercept ~ Normal(0, 1) coef = Vector(undef, 2) for i in axes(coef, 1) - coef[i] ~ Normal(0,1) + coef[i] ~ Normal(0, 1) end - coef = reshape(coef, 1, size(x,1)) + coef = reshape(coef, 1, size(x, 1)) - mu = intercept .+ coef * x |> vec - error ~ truncated(Normal(0,1), 0, Inf) - y ~ MvNormal(mu, error^2 * I) - end; + mu = vec(intercept .+ coef * x) + error ~ truncated(Normal(0, 1), 0, Inf) + return y ~ MvNormal(mu, error^2 * I) + end @model function simple_linear4(x, y) - intercept ~ Normal(0,1) - coef1 ~ Normal(0,1) - coef2 ~ Normal(0,1) + intercept ~ Normal(0, 1) + coef1 ~ Normal(0, 1) + coef2 ~ Normal(0, 1) coef = [coef1, coef2] - coef = reshape(coef, 1, size(x,1)) + coef = reshape(coef, 1, size(x, 1)) - mu = intercept .+ coef * x |> vec - error ~ truncated(Normal(0,1), 0, Inf) - y ~ MvNormal(mu, error^2 * I) - end; + mu = vec(intercept .+ coef * x) + error ~ truncated(Normal(0, 1), 0, Inf) + return y ~ MvNormal(mu, error^2 * I) + end # Some data - x = randn(2, 100); - y = [1 + 2 * a + 3 * b for (a,b) in eachcol(x)]; + x = randn(2, 100) + y = [1 + 2 * a + 3 * b for (a, b) in eachcol(x)] for model in [simple_linear1, simple_linear2, simple_linear3, simple_linear4] - m = model(x, y); - chain = sample(m, NUTS(), 100); - chain_predict = predict(model(x, missing), chain); - mean_prediction = [chain_predict["y[$i]"].data |> mean for i = 1:length(y)] + m = model(x, y) + chain = sample(m, NUTS(), 100) + chain_predict = predict(model(x, missing), chain) + mean_prediction = [mean(chain_predict["y[$i]"].data) for i in 1:length(y)] @test mean(abs2, mean_prediction - y) ≤ 1e-3 end end @@ -138,3 +147,5 @@ end @test chain.info.stop_time isa Float64 @test chain.info.start_time ≤ chain.info.stop_time end + +end diff --git a/test/optimisation/Optimisation.jl b/test/optimisation/Optimisation.jl new file mode 100644 index 000000000..5e6144e57 --- /dev/null +++ b/test/optimisation/Optimisation.jl @@ -0,0 +1,596 @@ +module OptimisationTests + +using ..Models: gdemo, gdemo_default +using Distributions +using Distributions.FillArrays: Zeros +using DynamicPPL: DynamicPPL +using LinearAlgebra: I +using Random: Random +using Optimization +using Optimization: Optimization +using OptimizationBBO: OptimizationBBO +using OptimizationNLopt: OptimizationNLopt +using OptimizationOptimJL: OptimizationOptimJL +using StatsBase: StatsBase +using StatsBase: coef, coefnames, coeftable, informationmatrix, stderror, vcov +using Test: @test, @testset, @test_throws +using Turing + +@testset "Optimisation" begin + + # The `stats` field is populated only in newer versions of OptimizationOptimJL and + # similar packages. Hence we end up doing this check a lot + hasstats(result) = result.optim_result.stats !== nothing + + # Issue: https://discourse.julialang.org/t/two-equivalent-conditioning-syntaxes-giving-different-likelihood-values/100320 + @testset "OptimizationContext" begin + # Used for testing how well it works with nested contexts. + struct OverrideContext{C,T1,T2} <: DynamicPPL.AbstractContext + context::C + logprior_weight::T1 + loglikelihood_weight::T2 + end + DynamicPPL.NodeTrait(::OverrideContext) = DynamicPPL.IsParent() + DynamicPPL.childcontext(parent::OverrideContext) = parent.context + DynamicPPL.setchildcontext(parent::OverrideContext, child) = + OverrideContext(child, parent.logprior_weight, parent.loglikelihood_weight) + + # Only implement what we need for the models above. + function DynamicPPL.tilde_assume(context::OverrideContext, right, vn, vi) + value, logp, vi = DynamicPPL.tilde_assume(context.context, right, vn, vi) + return value, context.logprior_weight, vi + end + function DynamicPPL.tilde_observe(context::OverrideContext, right, left, vi) + logp, vi = DynamicPPL.tilde_observe(context.context, right, left, vi) + return context.loglikelihood_weight, vi + end + + @model function model1(x) + μ ~ Uniform(0, 2) + return x ~ LogNormal(μ, 1) + end + + @model function model2() + μ ~ Uniform(0, 2) + return x ~ LogNormal(μ, 1) + end + + x = 1.0 + w = [1.0] + + @testset "With ConditionContext" begin + m1 = model1(x) + m2 = model2() | (x=x,) + ctx = Turing.Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext()) + @test Turing.Optimisation.OptimLogDensity(m1, ctx)(w) == + Turing.Optimisation.OptimLogDensity(m2, ctx)(w) + end + + @testset "With prefixes" begin + function prefix_μ(model) + return DynamicPPL.contextualize( + model, DynamicPPL.PrefixContext{:inner}(model.context) + ) + end + m1 = prefix_μ(model1(x)) + m2 = prefix_μ(model2() | (var"inner.x"=x,)) + ctx = Turing.Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext()) + @test Turing.Optimisation.OptimLogDensity(m1, ctx)(w) == + Turing.Optimisation.OptimLogDensity(m2, ctx)(w) + end + + @testset "Weighted" begin + function override(model) + return DynamicPPL.contextualize( + model, OverrideContext(model.context, 100, 1) + ) + end + m1 = override(model1(x)) + m2 = override(model2() | (x=x,)) + ctx = Turing.Optimisation.OptimizationContext(DynamicPPL.DefaultContext()) + @test Turing.Optimisation.OptimLogDensity(m1, ctx)(w) == + Turing.Optimisation.OptimLogDensity(m2, ctx)(w) + end + + @testset "Default, Likelihood, Prior Contexts" begin + m1 = model1(x) + defctx = Turing.Optimisation.OptimizationContext(DynamicPPL.DefaultContext()) + llhctx = Turing.Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext()) + prictx = Turing.Optimisation.OptimizationContext(DynamicPPL.PriorContext()) + a = [0.3] + + @test Turing.Optimisation.OptimLogDensity(m1, defctx)(a) == + Turing.Optimisation.OptimLogDensity(m1, llhctx)(a) + + Turing.Optimisation.OptimLogDensity(m1, prictx)(a) + + # test that PriorContext is calculating the right thing + @test Turing.Optimisation.OptimLogDensity(m1, prictx)([0.3]) ≈ + -Distributions.logpdf(Uniform(0, 2), 0.3) + @test Turing.Optimisation.OptimLogDensity(m1, prictx)([-0.3]) ≈ + -Distributions.logpdf(Uniform(0, 2), -0.3) + end + end + + @testset "gdemo" begin + """ + check_success(result, true_value, true_logp, check_retcode=true) + + Check that the `result` returned by optimisation is close to the truth. + """ + function check_optimisation_result( + result, true_value, true_logp, check_retcode=true + ) + optimum = result.values.array + @test all(isapprox.(optimum - true_value, 0.0, atol=0.01)) + if check_retcode + @test result.optim_result.retcode == Optimization.ReturnCode.Success + end + @test isapprox(result.lp, true_logp, atol=0.01) + end + + @testset "MLE" begin + Random.seed!(222) + true_value = [0.0625, 1.75] + true_logp = loglikelihood(gdemo_default, (s=true_value[1], m=true_value[2])) + check_success(result) = check_optimisation_result(result, true_value, true_logp) + + m1 = Turing.Optimisation.estimate_mode(gdemo_default, MLE()) + m2 = maximum_likelihood( + gdemo_default, OptimizationOptimJL.LBFGS(); initial_params=true_value + ) + m3 = maximum_likelihood(gdemo_default, OptimizationOptimJL.Newton()) + # TODO(mhauru) How can we check that the adtype is actually AutoReverseDiff? + m4 = maximum_likelihood( + gdemo_default, OptimizationOptimJL.BFGS(); adtype=AutoReverseDiff() + ) + m5 = maximum_likelihood( + gdemo_default, OptimizationOptimJL.NelderMead(); initial_params=true_value + ) + m6 = maximum_likelihood(gdemo_default, OptimizationOptimJL.NelderMead()) + + check_success(m1) + check_success(m2) + check_success(m3) + check_success(m4) + check_success(m5) + check_success(m6) + + @test !hasstats(m2) || m2.optim_result.stats.iterations <= 1 + if hasstats(m6) && hasstats(m5) + @test m5.optim_result.stats.iterations < m6.optim_result.stats.iterations + end + + @test !hasstats(m2) || m2.optim_result.stats.gevals > 0 + @test !hasstats(m3) || m3.optim_result.stats.gevals > 0 + @test !hasstats(m4) || m4.optim_result.stats.gevals > 0 + @test !hasstats(m5) || m5.optim_result.stats.gevals == 0 + @test !hasstats(m6) || m6.optim_result.stats.gevals == 0 + end + + @testset "MAP" begin + Random.seed!(222) + true_value = [49 / 54, 7 / 6] + true_logp = logjoint(gdemo_default, (s=true_value[1], m=true_value[2])) + check_success(result) = check_optimisation_result(result, true_value, true_logp) + + m1 = Turing.Optimisation.estimate_mode(gdemo_default, MAP()) + m2 = maximum_a_posteriori( + gdemo_default, OptimizationOptimJL.LBFGS(); initial_params=true_value + ) + m3 = maximum_a_posteriori(gdemo_default, OptimizationOptimJL.Newton()) + m4 = maximum_a_posteriori( + gdemo_default, OptimizationOptimJL.BFGS(); adtype=AutoReverseDiff() + ) + m5 = maximum_a_posteriori( + gdemo_default, OptimizationOptimJL.NelderMead(); initial_params=true_value + ) + m6 = maximum_a_posteriori(gdemo_default, OptimizationOptimJL.NelderMead()) + + check_success(m1) + check_success(m2) + check_success(m3) + check_success(m4) + check_success(m5) + check_success(m6) + + @test !hasstats(m2) || m2.optim_result.stats.iterations <= 1 + if hasstats(m6) && hasstats(m5) + @test m5.optim_result.stats.iterations < m6.optim_result.stats.iterations + end + + @test !hasstats(m2) || m2.optim_result.stats.gevals > 0 + @test !hasstats(m3) || m3.optim_result.stats.gevals > 0 + @test !hasstats(m4) || m4.optim_result.stats.gevals > 0 + @test !hasstats(m5) || m5.optim_result.stats.gevals == 0 + @test !hasstats(m6) || m6.optim_result.stats.gevals == 0 + end + + @testset "MLE with box constraints" begin + Random.seed!(222) + true_value = [0.0625, 1.75] + true_logp = loglikelihood(gdemo_default, (s=true_value[1], m=true_value[2])) + check_success(result, check_retcode=true) = + check_optimisation_result(result, true_value, true_logp, check_retcode) + + lb = [0.0, 0.0] + ub = [2.0, 2.0] + + m1 = Turing.Optimisation.estimate_mode(gdemo_default, MLE(); lb=lb, ub=ub) + m2 = maximum_likelihood( + gdemo_default, + OptimizationOptimJL.Fminbox(OptimizationOptimJL.LBFGS()); + initial_params=true_value, + lb=lb, + ub=ub, + ) + m3 = maximum_likelihood( + gdemo_default, + OptimizationBBO.BBO_separable_nes(); + maxiters=100_000, + abstol=1e-5, + lb=lb, + ub=ub, + ) + m4 = maximum_likelihood( + gdemo_default, + OptimizationOptimJL.Fminbox(OptimizationOptimJL.BFGS()); + adtype=AutoReverseDiff(), + lb=lb, + ub=ub, + ) + m5 = maximum_likelihood( + gdemo_default, + OptimizationOptimJL.IPNewton(); + initial_params=true_value, + lb=lb, + ub=ub, + ) + m6 = maximum_likelihood(gdemo_default; lb=lb, ub=ub) + + check_success(m1) + check_success(m2) + # BBO retcodes are misconfigured, so skip checking the retcode in this case. + # See https://github.com/SciML/Optimization.jl/issues/745 + check_success(m3, false) + check_success(m4) + check_success(m5) + check_success(m6) + + @test !hasstats(m2) || m2.optim_result.stats.iterations <= 1 + @test !hasstats(m5) || m5.optim_result.stats.iterations <= 1 + + @test !hasstats(m2) || m2.optim_result.stats.gevals > 0 + @test !hasstats(m3) || m3.optim_result.stats.gevals == 0 + @test !hasstats(m4) || m4.optim_result.stats.gevals > 0 + @test !hasstats(m5) || m5.optim_result.stats.gevals > 0 + end + + @testset "MAP with box constraints" begin + Random.seed!(222) + true_value = [49 / 54, 7 / 6] + true_logp = logjoint(gdemo_default, (s=true_value[1], m=true_value[2])) + check_success(result, check_retcode=true) = + check_optimisation_result(result, true_value, true_logp, check_retcode) + + lb = [0.0, 0.0] + ub = [2.0, 2.0] + + m1 = Turing.Optimisation.estimate_mode(gdemo_default, MAP(); lb=lb, ub=ub) + m2 = maximum_a_posteriori( + gdemo_default, + OptimizationOptimJL.Fminbox(OptimizationOptimJL.LBFGS()); + initial_params=true_value, + lb=lb, + ub=ub, + ) + m3 = maximum_a_posteriori( + gdemo_default, + OptimizationBBO.BBO_separable_nes(); + maxiters=100_000, + abstol=1e-5, + lb=lb, + ub=ub, + ) + m4 = maximum_a_posteriori( + gdemo_default, + OptimizationOptimJL.Fminbox(OptimizationOptimJL.BFGS()); + adtype=AutoReverseDiff(), + lb=lb, + ub=ub, + ) + m5 = maximum_a_posteriori( + gdemo_default, + OptimizationOptimJL.IPNewton(); + initial_params=true_value, + lb=lb, + ub=ub, + ) + m6 = maximum_a_posteriori(gdemo_default; lb=lb, ub=ub) + + check_success(m1) + check_success(m2) + # BBO retcodes are misconfigured, so skip checking the retcode in this case. + # See https://github.com/SciML/Optimization.jl/issues/745 + check_success(m3, false) + check_success(m4) + check_success(m5) + check_success(m6) + + @test !hasstats(m2) || m2.optim_result.stats.iterations <= 1 + @test !hasstats(m5) || m5.optim_result.stats.iterations <= 1 + + @show m2.optim_result.stats + @test !hasstats(m2) || m2.optim_result.stats.gevals > 0 + @test !hasstats(m3) || m3.optim_result.stats.gevals == 0 + @test !hasstats(m4) || m4.optim_result.stats.gevals > 0 + @test !hasstats(m5) || m5.optim_result.stats.gevals > 0 + end + + @testset "MLE with generic constraints" begin + Random.seed!(222) + true_value = [0.0625, 1.75] + true_logp = loglikelihood(gdemo_default, (s=true_value[1], m=true_value[2])) + check_success(result, check_retcode=true) = + check_optimisation_result(result, true_value, true_logp, check_retcode) + + # Set two constraints: The first parameter must be non-negative, and the L2 norm + # of the parameters must be between 0.5 and 2. + cons(res, x, _) = (res .= [x[1], sqrt(sum(x .^ 2))]) + lcons = [0, 0.5] + ucons = [Inf, 2.0] + cons_args = (cons=cons, lcons=lcons, ucons=ucons) + initial_params = [0.5, -1.0] + + m1 = Turing.Optimisation.estimate_mode( + gdemo_default, MLE(); initial_params=initial_params, cons_args... + ) + m2 = maximum_likelihood(gdemo_default; initial_params=true_value, cons_args...) + m3 = maximum_likelihood( + gdemo_default, + OptimizationOptimJL.IPNewton(); + initial_params=initial_params, + cons_args..., + ) + m4 = maximum_likelihood( + gdemo_default, + OptimizationOptimJL.IPNewton(); + initial_params=initial_params, + adtype=AutoReverseDiff(), + cons_args..., + ) + m5 = maximum_likelihood( + gdemo_default; initial_params=initial_params, cons_args... + ) + + check_success(m1) + check_success(m2) + check_success(m3) + check_success(m4) + check_success(m5) + + @test !hasstats(m2) || m2.optim_result.stats.iterations <= 1 + + @test !hasstats(m3) || m3.optim_result.stats.gevals > 0 + @test !hasstats(m4) || m4.optim_result.stats.gevals > 0 + + expected_error = ArgumentError( + "You must provide an initial value when using generic constraints." + ) + @test_throws expected_error maximum_likelihood(gdemo_default; cons_args...) + end + + @testset "MAP with generic constraints" begin + Random.seed!(222) + true_value = [49 / 54, 7 / 6] + true_logp = logjoint(gdemo_default, (s=true_value[1], m=true_value[2])) + check_success(result, check_retcode=true) = + check_optimisation_result(result, true_value, true_logp, check_retcode) + + # Set two constraints: The first parameter must be non-negative, and the L2 norm + # of the parameters must be between 0.5 and 2. + cons(res, x, _) = (res .= [x[1], sqrt(sum(x .^ 2))]) + lcons = [0, 0.5] + ucons = [Inf, 2.0] + cons_args = (cons=cons, lcons=lcons, ucons=ucons) + initial_params = [0.5, -1.0] + + m1 = Turing.Optimisation.estimate_mode( + gdemo_default, MAP(); initial_params=initial_params, cons_args... + ) + m2 = maximum_a_posteriori( + gdemo_default; initial_params=true_value, cons_args... + ) + m3 = maximum_a_posteriori( + gdemo_default, + OptimizationOptimJL.IPNewton(); + initial_params=initial_params, + cons_args..., + ) + m4 = maximum_a_posteriori( + gdemo_default, + OptimizationOptimJL.IPNewton(); + initial_params=initial_params, + adtype=AutoReverseDiff(), + cons_args..., + ) + m5 = maximum_a_posteriori( + gdemo_default; initial_params=initial_params, cons_args... + ) + + check_success(m1) + check_success(m2) + check_success(m3) + check_success(m4) + check_success(m5) + + @test !hasstats(m2) || m2.optim_result.stats.iterations <= 1 + + @test !hasstats(m3) || m3.optim_result.stats.gevals > 0 + @test !hasstats(m4) || m4.optim_result.stats.gevals > 0 + + expected_error = ArgumentError( + "You must provide an initial value when using generic constraints." + ) + @test_throws expected_error maximum_a_posteriori(gdemo_default; cons_args...) + end + end + + @testset "StatsBase integration" begin + Random.seed!(54321) + mle_est = maximum_likelihood(gdemo_default) + # Calculated based on the two data points in gdemo_default, [1.5, 2.0] + true_values = [0.0625, 1.75] + + @test coefnames(mle_est) == [:s, :m] + + diffs = coef(mle_est).array - [0.0625031; 1.75001] + @test all(isapprox.(diffs, 0.0, atol=0.1)) + + infomat = [2/(2 * true_values[1]^2) 0.0; 0.0 2/true_values[1]] + @test all(isapprox.(infomat - informationmatrix(mle_est), 0.0, atol=0.01)) + + vcovmat = [2 * true_values[1]^2/2 0.0; 0.0 true_values[1]/2] + @test all(isapprox.(vcovmat - vcov(mle_est), 0.0, atol=0.01)) + + ctable = coeftable(mle_est) + @test ctable isa StatsBase.CoefTable + + s = stderror(mle_est).array + @test all(isapprox.(s - [0.06250415643292194, 0.17677963626053916], 0.0, atol=0.01)) + + @test coefnames(mle_est) == Distributions.params(mle_est) + @test vcov(mle_est) == inv(informationmatrix(mle_est)) + + @test isapprox(loglikelihood(mle_est), -0.0652883561466624, atol=0.01) + end + + @testset "Linear regression test" begin + @model function regtest(x, y) + beta ~ MvNormal(Zeros(2), I) + mu = x * beta + return y ~ MvNormal(mu, I) + end + + Random.seed!(987) + true_beta = [1.0, -2.2] + x = rand(40, 2) + y = x * true_beta + + model = regtest(x, y) + mle = maximum_likelihood(model) + + vcmat = inv(x'x) + vcmat_mle = vcov(mle).array + + @test isapprox(mle.values.array, true_beta) + @test isapprox(vcmat, vcmat_mle) + end + + @testset "Dot tilde test" begin + @model function dot_gdemo(x) + s ~ InverseGamma(2, 3) + m ~ Normal(0, sqrt(s)) + + return (.~)(x, Normal(m, sqrt(s))) + end + + model_dot = dot_gdemo([1.5, 2.0]) + + mle1 = maximum_likelihood(gdemo_default) + mle2 = maximum_likelihood(model_dot) + + map1 = maximum_a_posteriori(gdemo_default) + map2 = maximum_a_posteriori(model_dot) + + @test isapprox(mle1.values.array, mle2.values.array) + @test isapprox(map1.values.array, map2.values.array) + end + + # TODO(mhauru): The corresponding Optim.jl test had a note saying that some models + # don't work for Tracker and ReverseDiff. Is that still the case? + @testset "MAP for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS + Random.seed!(23) + result_true = DynamicPPL.TestUtils.posterior_optima(model) + + optimizers = [ + OptimizationOptimJL.LBFGS(), + OptimizationOptimJL.NelderMead(), + OptimizationNLopt.NLopt.LD_TNEWTON_PRECOND_RESTART(), + ] + @testset "$(nameof(typeof(optimizer)))" for optimizer in optimizers + result = maximum_a_posteriori(model, optimizer) + vals = result.values + + for vn in DynamicPPL.TestUtils.varnames(model) + for vn_leaf in DynamicPPL.TestUtils.varname_leaves(vn, get(result_true, vn)) + @test get(result_true, vn_leaf) ≈ vals[Symbol(vn_leaf)] atol = 0.05 + end + end + end + end + + # Some of the models have one variance parameter per observation, and so + # the MLE should have the variances set to 0. Since we're working in + # transformed space, this corresponds to `-Inf`, which is of course not achievable. + # In particular, it can result in "early termniation" of the optimization process + # because we hit NaNs, etc. To avoid this, we set the `g_tol` and the `f_tol` to + # something larger than the default. + allowed_incorrect_mle = [ + DynamicPPL.TestUtils.demo_dot_assume_dot_observe, + DynamicPPL.TestUtils.demo_assume_index_observe, + DynamicPPL.TestUtils.demo_assume_multivariate_observe, + DynamicPPL.TestUtils.demo_assume_observe_literal, + DynamicPPL.TestUtils.demo_dot_assume_observe_submodel, + DynamicPPL.TestUtils.demo_dot_assume_dot_observe_matrix, + DynamicPPL.TestUtils.demo_dot_assume_matrix_dot_observe_matrix, + DynamicPPL.TestUtils.demo_assume_submodel_observe_index_literal, + DynamicPPL.TestUtils.demo_dot_assume_observe_index, + DynamicPPL.TestUtils.demo_dot_assume_observe_index_literal, + DynamicPPL.TestUtils.demo_assume_matrix_dot_observe_matrix, + ] + @testset "MLE for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS + Random.seed!(23) + result_true = DynamicPPL.TestUtils.likelihood_optima(model) + + optimizers = [ + OptimizationOptimJL.LBFGS(), + OptimizationOptimJL.NelderMead(), + OptimizationNLopt.NLopt.LD_TNEWTON_PRECOND_RESTART(), + ] + @testset "$(nameof(typeof(optimizer)))" for optimizer in optimizers + result = maximum_likelihood(model, optimizer; reltol=1e-3) + vals = result.values + + for vn in DynamicPPL.TestUtils.varnames(model) + for vn_leaf in DynamicPPL.TestUtils.varname_leaves(vn, get(result_true, vn)) + if model.f in allowed_incorrect_mle + @test isfinite(get(result_true, vn_leaf)) + else + @test get(result_true, vn_leaf) ≈ vals[Symbol(vn_leaf)] atol = 0.05 + end + end + end + end + end + + # Issue: https://discourse.julialang.org/t/turing-mixture-models-with-dirichlet-weightings/112910 + @testset "Optimization with different linked dimensionality" begin + @model demo_dirichlet() = x ~ Dirichlet(2 * ones(3)) + model = demo_dirichlet() + result = maximum_a_posteriori(model) + @test result.values ≈ mode(Dirichlet(2 * ones(3))) atol = 0.2 + end + + @testset "with :=" begin + @model function demo_track() + x ~ Normal() + return y := 100 + x + end + model = demo_track() + result = maximum_a_posteriori(model) + @test result.values[:x] ≈ 0 atol = 1e-1 + @test result.values[:y] ≈ 100 atol = 1e-1 + end +end + +end diff --git a/test/runtests.jl b/test/runtests.jl index 3193a30f7..9294b3487 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,72 +1,41 @@ import Pkg Pkg.add(Pkg.PackageSpec(; url="https://github.com/simsurace/Enzyme.jl.git", rev="fix-cholesky")) -using AbstractMCMC -using AdvancedMH -using AdvancedPS -using Clustering -using Distributions -using Distributions.FillArrays -using DistributionsAD -using FiniteDifferences -using ForwardDiff -using MCMCChains -using NamedArrays -using Optim -using Optimization -using OptimizationOptimJL -using PDMats -using ReverseDiff -using SpecialFunctions -using StatsBase -using StatsFuns -using HypothesisTests -using Tracker -using Turing -using Turing.Inference -using Turing.RandomMeasures -using Zygote - -using LinearAlgebra +include("test_utils/SelectiveTests.jl") +using .SelectiveTests: isincluded, parse_args using Pkg -using Random using Test -using StableRNGs - -using AdvancedPS: ResampleWithESSThreshold, resample_systematic, resample_multinomial -using AdvancedVI: TruncatedADAGrad, DecayedADAGrad, apply! -using Distributions: Binomial, logpdf -using DynamicPPL: getval, getlogp -using ForwardDiff: Dual -using MCMCChains: Chains -using StatsFuns: binomlogpdf, logistic, logsumexp using TimerOutputs: TimerOutputs, @timeit -using Turing: BinomialLogit, Sampler, SampleFromPrior, NUTS, - Variational, getspace -using Turing.Essential: TuringDenseMvNormal, TuringDiagMvNormal -using Turing.Variational: TruncatedADAGrad, DecayedADAGrad, AdvancedVI +import Turing -import Enzyme -import LogDensityProblems -import LogDensityProblemsAD +include(pkgdir(Turing) * "/test/test_utils/models.jl") +include(pkgdir(Turing) * "/test/test_utils/numerical_tests.jl") -setprogress!(false) +Turing.setprogress!(false) -# Disable Enzyme warnings -Enzyme.API.typeWarning!(false) +included_paths, excluded_paths = parse_args(ARGS) -# Enable runtime activity (workaround) -Enzyme.API.runtimeActivity!(true) - -include(pkgdir(Turing)*"/test/test_utils/AllUtils.jl") - -# Collect timing and allocations information to show in a clear way. +# Filter which tests to run and collect timing and allocations information to show in a +# clear way. const TIMEROUTPUT = TimerOutputs.TimerOutput() -macro timeit_include(path::AbstractString) :(@timeit TIMEROUTPUT $path include($path)) end +macro timeit_include(path::AbstractString) + return quote + if isincluded($path, included_paths, excluded_paths) + @timeit TIMEROUTPUT $path include($path) + else + println("Skipping tests in $($path)") + end + end +end @testset "Turing" begin + @testset "Aqua" begin + @timeit_include("Aqua.jl") + end + @testset "essential" begin @timeit_include("essential/ad.jl") + @timeit_include("essential/container.jl") end @testset "samplers (without AD)" begin @@ -75,7 +44,7 @@ macro timeit_include(path::AbstractString) :(@timeit TIMEROUTPUT $path include($ @timeit_include("mcmc/ess.jl") @timeit_include("mcmc/is.jl") end - + @timeit TIMEROUTPUT "inference" begin @testset "inference with samplers" begin @timeit_include("mcmc/gibbs.jl") @@ -93,8 +62,8 @@ macro timeit_include(path::AbstractString) :(@timeit TIMEROUTPUT $path include($ end @testset "mode estimation" begin - @timeit_include("optimisation/OptimInterface.jl") - @timeit_include("ext/Optimisation.jl") + @timeit_include("optimisation/Optimisation.jl") + @timeit_include("ext/OptimInterface.jl") end end diff --git a/test/skipped/advi_demo.jl b/test/skipped/advi_demo.jl index d10e140d0..a9cbabd0f 100644 --- a/test/skipped/advi_demo.jl +++ b/test/skipped/advi_demo.jl @@ -20,7 +20,7 @@ using ConjugatePriors s ~ InverseGamma(2, 3) m ~ Normal(0.0, sqrt(s)) # `Normal(μ, σ)` has mean μ and variance σ², i.e. parametrize with std. not variance - for i = 1:length(x) + for i in 1:length(x) x[i] ~ Normal(m, sqrt(s)) end end @@ -28,28 +28,28 @@ end const seeds = [125, 245, 3] const ad_modes = [:forwarddiff, :reversediff, :tracker] -for seed ∈ seeds +for seed in seeds @info seed - - for ad_mode ∈ ad_modes + + for ad_mode in ad_modes @info ad_mode setadbackend(ad_mode) - + # set random seed Random.seed!(seed) # generate data - x = randn(1, 2000); + x = randn(1, 2000) # construct model m = model(x) - + # ADVI opt = Variational.TruncatedADAGrad() # optimizer advi = ADVI(10, 100) # <: VariationalInference q = Variational.meanfield(m) # => <: VariationalPosterior - + elbo = Variational.ELBO() # <: VariationalObjective μ, σs = params(q) @@ -58,16 +58,16 @@ for seed ∈ seeds history = [elbo(advi, q, m, 1000)] # construct animation - anim = @animate for j = 1:25 + anim = @animate for j in 1:25 # global q - Variational.optimize!(elbo, advi, q, m, θ; optimizer = opt) - μ, ω = θ[1:length(q)], θ[length(q) + 1:end] - + Variational.optimize!(elbo, advi, q, m, θ; optimizer=opt) + μ, ω = θ[1:length(q)], θ[(length(q) + 1):end] + q = Variational.update(q, μ, f.(ω)) samples = rand(q, 2000) # quick check - println([mean(samples, dims=2), [var(x), mean(x)]]) + println([mean(samples; dims=2), [var(x), mean(x)]]) # plotting code assumes (samples, dim) shape so we just transpose samples = transpose(samples) @@ -90,7 +90,7 @@ for seed ∈ seeds # Eq. (90) in "Conjugate Bayesian analysis of the Gaussian distribution" by Murphy # `scale(post)` = θ p_τ = Gamma(post.shape, scale(post)) - p_σ²_pdf = z -> pdf(p_τ, 1 / z) # τ => 1 / σ² + p_σ²_pdf = z -> pdf(p_τ, 1 / z) # τ => 1 / σ² # marginal of μ # Eq. (91) in "Conjugate Bayesian analysis of the Gaussian distribution" by Murphy @@ -102,55 +102,60 @@ for seed ∈ seeds βₙ = post.rate # β → rate # numerically more stable but doesn't seem to have effect; issue is probably internal to - # `pdf` which needs to compute ≈ Γ(1000) - p_μ_pdf = z -> exp(logpdf(p_μ, (z - μₙ) * exp(- 0.5 * log(βₙ) + 0.5 * log(αₙ) + 0.5 * log(κₙ)))) + # `pdf` which needs to compute ≈ Γ(1000) + p_μ_pdf = + z -> exp( + logpdf( + p_μ, + (z - μₙ) * exp(-0.5 * log(βₙ) + 0.5 * log(αₙ) + 0.5 * log(κₙ)), + ), + ) # p_μ_pdf1 = z -> pdf(p_μ, (z - μₙ) / √(βₙ / (αₙ * κₙ))) # posterior plots - p1 = plot(); - density!(samples[:, 1], label = "s (ADVI)", color = :blue, linestyle = :dash); - histogram!(samples[:, 1], label = "", normed = true, alpha = 0.3, color = :blue); + p1 = plot() + density!(samples[:, 1]; label="s (ADVI)", color=:blue, linestyle=:dash) + histogram!(samples[:, 1]; label="", normed=true, alpha=0.3, color=:blue) # normalize using Riemann approx. because of (almost certainly) numerical issues Δ = 0.001 r = 0.75:0.001:1.50 norm_const = sum(p_σ²_pdf.(r) .* Δ) - plot!(r, p_σ²_pdf, label = "s (posterior)", color = :red); - vline!([var(x)], label = "s (data)", linewidth = 1.5, color = :black, alpha = 0.7); - xlims!(0.5, 1.5); - title!("$(j * advi.max_iters) steps"); - - p2 = plot(); - density!(samples[:, 2], label = "m (ADVI)", color = :blue, linestyle = :dash); - histogram!(samples[:, 2], label = "", normed = true, alpha = 0.3, color = :blue); + plot!(r, p_σ²_pdf; label="s (posterior)", color=:red) + vline!([var(x)]; label="s (data)", linewidth=1.5, color=:black, alpha=0.7) + xlims!(0.5, 1.5) + title!("$(j * advi.max_iters) steps") + p2 = plot() + density!(samples[:, 2]; label="m (ADVI)", color=:blue, linestyle=:dash) + histogram!(samples[:, 2]; label="", normed=true, alpha=0.3, color=:blue) # normalize using Riemann approx. because of (almost certainly) numerical issues Δ = 0.0001 - r = -0.1 + mean(x):Δ:0.1 + mean(x) + r = (-0.1 + mean(x)):Δ:(0.1 + mean(x)) norm_const = sum(p_μ_pdf.(r) .* Δ) - plot!(r, z -> p_μ_pdf(z) / norm_const, label = "m (posterior)", color = :red); - vline!([mean(x)], label = "m (data)", linewidth = 1.5, color = :black, alpha = 0.7); + plot!(r, z -> p_μ_pdf(z) / norm_const; label="m (posterior)", color=:red) + vline!([mean(x)]; label="m (data)", linewidth=1.5, color=:black, alpha=0.7) - xlims!(-0.25, 0.25); + xlims!(-0.25, 0.25) # visualize evolution of objective wrt. optimization iterations obj = elbo(advi, q, m, 1000) @info "ELBO" obj push!(history, obj) - p3 = plot(); - plot!(1:advi.max_iters:length(history) * advi.max_iters, history, label = "") + p3 = plot() + plot!(1:(advi.max_iters):(length(history) * advi.max_iters), history; label="") title!("ELBO = $obj") # plot the latest 25 objective evaluations to visualize trend - p4 = plot(); - plot!(history[max(1, end - 10):end], label = "") + p4 = plot() + plot!(history[max(1, end - 10):end]; label="") + + p = plot(p1, p2, p3, p4; layout=(4, 1)) - p = plot(p1, p2, p3, p4; layout = (4, 1)) - @info "[$j] Done" p end - gif(anim, "advi_w_elbo_fps15_$(seed)_$(ad_mode).gif", fps = 15) + gif(anim, "advi_w_elbo_fps15_$(seed)_$(ad_mode).gif"; fps=15) end end diff --git a/test/skipped/bayes_lr.jl b/test/skipped/bayes_lr.jl index b426ce0cb..6b9b4d31e 100644 --- a/test/skipped/bayes_lr.jl +++ b/test/skipped/bayes_lr.jl @@ -9,13 +9,13 @@ using Turing s = 1 β ~ Normal(0, 1) - for n = 1:N + for n in 1:N ys[n] ~ Normal(xs[n] * β, sqrt(s)) end end N = 100 -xs = collect(range(-10, stop = 10, length = N)) +xs = collect(range(-10; stop=10, length=N)) s = 1 β = rand(Normal(0, 1)) ys = xs * β + rand(Normal(0, sqrt(s)), N) @@ -29,18 +29,18 @@ println("mean of β: ", mean(chn[1000:end, :β])) θ_dim = 1 function lj_func(θ) - N = length(xs) - _lj = zero(Real) + N = length(xs) + _lj = zero(Real) - s = 1 + s = 1 - β = θ[1] - _lj += logpdf(Normal(0, 1), β) - for n = 1:N - _lj += logpdf(Normal(xs[n] * β, sqrt(s)), ys[n]) - end + β = θ[1] + _lj += logpdf(Normal(0, 1), β) + for n in 1:N + _lj += logpdf(Normal(xs[n] * β, sqrt(s)), ys[n]) + end - return _lj + return _lj end neg_lj_func(θ) = -lj_func(θ) @@ -48,18 +48,16 @@ const f_tape = GradientTape(neg_lj_func, randn(θ_dim)) const compiled_f_tape = compile(f_tape) function grad_func(θ) + inputs = θ + results = similar(θ) + all_results = DiffResults.GradientResult(results) - inputs = θ - results = similar(θ) - all_results = DiffResults.GradientResult(results) - - ReverseDiff.gradient!(all_results, compiled_f_tape, inputs) - - neg_lj = all_results.value - grad, = all_results.derivs + ReverseDiff.gradient!(all_results, compiled_f_tape, inputs) - return -neg_lj, grad + neg_lj = all_results.value + grad, = all_results.derivs + return -neg_lj, grad end std = ones(θ_dim) @@ -70,12 +68,12 @@ chn = [] accept_num = 1 total_num = 2000 -for iter = 1:total_num - global θ, chn, lj, lj_func, grad_func, std, accept_num - push!(chn, θ) - θ, lj, is_accept, τ_valid, α = _hmc_step(θ, lj, lj_func, grad_func, 3, 0.005, std) - accept_num += is_accept -# if (iter % 50 == 0) println(θ) end +for iter in 1:total_num + global θ, chn, lj, lj_func, grad_func, std, accept_num + push!(chn, θ) + θ, lj, is_accept, τ_valid, α = _hmc_step(θ, lj, lj_func, grad_func, 3, 0.005, std) + accept_num += is_accept + # if (iter % 50 == 0) println(θ) end end @show mean(chn[1000:end]), lj diff --git a/test/skipped/dual_averaging.jl b/test/skipped/dual_averaging.jl index 01c1cb401..897abe8d9 100644 --- a/test/skipped/dual_averaging.jl +++ b/test/skipped/dual_averaging.jl @@ -1,6 +1,4 @@ -function _adapt_ϵ(logϵ, Hbar, logϵbar, da_stat, m, M_adapt, δ, μ; - γ=0.05, t0=10, κ=0.75) - +function _adapt_ϵ(logϵ, Hbar, logϵbar, da_stat, m, M_adapt, δ, μ; γ=0.05, t0=10, κ=0.75) if m <= M_adapt Hbar = (1.0 - 1.0 / (m + t0)) * Hbar + (1 / (m + t0)) * (δ - da_stat) logϵ = μ - sqrt(m) / γ * Hbar diff --git a/test/skipped/explicit_ret.jl b/test/skipped/explicit_ret.jl index f0b53d093..c1340464f 100644 --- a/test/skipped/explicit_ret.jl +++ b/test/skipped/explicit_ret.jl @@ -2,17 +2,18 @@ using Turing using Test @model function test_ex_rt() - x ~ Normal(10, 1) - y ~ Normal(x / 2, 1) - z = y + 1 - x = x - 1 - x, y, z + x ~ Normal(10, 1) + y ~ Normal(x / 2, 1) + z = y + 1 + x = x - 1 + return x, y, z end mf = test_ex_rt() -for alg = [HMC(0.2, 3), PG(20, 2000), SMC(), IS(10000), Gibbs(PG(20, 1, :x), HMC(0.2, 3, :y))] - chn = sample(mf, alg) - @test mean(chn[:x]) ≈ 10.0 atol=0.2 - @test mean(chn[:y]) ≈ 5.0 atol=0.2 +for alg in + [HMC(0.2, 3), PG(20, 2000), SMC(), IS(10000), Gibbs(PG(20, 1, :x), HMC(0.2, 3, :y))] + chn = sample(mf, alg) + @test mean(chn[:x]) ≈ 10.0 atol = 0.2 + @test mean(chn[:y]) ≈ 5.0 atol = 0.2 end diff --git a/test/skipped/gdemo.jl b/test/skipped/gdemo.jl index 610e5b6ee..84242067e 100644 --- a/test/skipped/gdemo.jl +++ b/test/skipped/gdemo.jl @@ -6,11 +6,11 @@ using Turing @model function gdemo() s ~ InverseGamma(2, 3) - m ~ Normal(0,sqrt(s)) + m ~ Normal(0, sqrt(s)) 1.5 ~ Normal(m, sqrt(s)) 2.0 ~ Normal(m, sqrt(s)) return s, m - end +end # Plain Julia @@ -19,18 +19,18 @@ using Turing: invlink, logpdf θ_dim = 2 function lj_func(θ) - _lj = zero(Real) + _lj = zero(Real) - d_s = InverseGamma(2, 3) - s = invlink(d_s, θ[1]) - _lj += logpdf(d_s, s, true) - m = θ[2] - _lj += logpdf(Normal(0, sqrt(s)), m) + d_s = InverseGamma(2, 3) + s = invlink(d_s, θ[1]) + _lj += logpdf(d_s, s, true) + m = θ[2] + _lj += logpdf(Normal(0, sqrt(s)), m) - _lj += logpdf(Normal(m, sqrt(s)), 1.5) - _lj += logpdf(Normal(m, sqrt(s)), 2.0) + _lj += logpdf(Normal(m, sqrt(s)), 1.5) + _lj += logpdf(Normal(m, sqrt(s)), 2.0) - return _lj + return _lj end neg_lj_func(θ) = -lj_func(θ) @@ -38,18 +38,16 @@ const f_tape = GradientTape(neg_lj_func, randn(θ_dim)) const compiled_f_tape = compile(f_tape) function grad_func(θ) + inputs = θ + results = similar(θ) + all_results = DiffResults.GradientResult(results) - inputs = θ - results = similar(θ) - all_results = DiffResults.GradientResult(results) - - gradient!(all_results, compiled_f_tape, inputs) - - neg_lj = all_results.value - grad, = all_results.derivs + gradient!(all_results, compiled_f_tape, inputs) - return -neg_lj, grad + neg_lj = all_results.value + grad, = all_results.derivs + return -neg_lj, grad end # Unit test for gradient diff --git a/test/skipped/gdemo_hmc.jl b/test/skipped/gdemo_hmc.jl index 577a10230..855b06f89 100644 --- a/test/skipped/gdemo_hmc.jl +++ b/test/skipped/gdemo_hmc.jl @@ -17,18 +17,18 @@ std = ones(θ_dim) θ = randn(θ_dim) lj = lj_func(θ) -chn = Dict(:θ=>Vector{Vector{Float64}}(), :logϵ=>Vector{Float64}()) +chn = Dict(:θ => Vector{Vector{Float64}}(), :logϵ => Vector{Float64}()) accept_num = 0 function dummy_print(args...) - nothing + return nothing end totla_num = 5000 -for iter = 1:totla_num - push!(chn[:θ], θ) - θ, lj, is_accept, τ_valid, α = _hmc_step(θ, lj, lj_func, grad_func, 5, 0.05, std) - accept_num += is_accept +for iter in 1:totla_num + push!(chn[:θ], θ) + θ, lj, is_accept, τ_valid, α = _hmc_step(θ, lj, lj_func, grad_func, 5, 0.05, std) + accept_num += is_accept end @show lj diff --git a/test/skipped/gdemo_nuts.jl b/test/skipped/gdemo_nuts.jl index 34c4c6f57..ecc4d5c7f 100644 --- a/test/skipped/gdemo_nuts.jl +++ b/test/skipped/gdemo_nuts.jl @@ -24,49 +24,47 @@ Hbar = 0 δ = 0.75 -for test_id = 1:2 - - test_name = "$test_id. NUTS " * (test_id == 1 ? "without DA" : "with DA") - - @testset "$test_name" begin - - std = ones(θ_dim) - θ = randn(θ_dim) - lj = lj_func(θ) - - chn = Dict(:θ=>Vector{Vector{Float64}}(), :logϵ=>Vector{Float64}()) - - function dummy_print(args...) - nothing - end - - println("Start to run NUTS") - - totla_num = 10000 - for iter = 1:totla_num - global logϵ, lj_func, grad_func, M_adapt, δ, μ - θ, da_stat = _nuts_step(θ, exp(logϵ), lj_func, grad_func, std) - if test_id == 1 - logϵ, Hbar, logϵbar = _adapt_ϵ(logϵ, Hbar, logϵbar, da_stat, iter, M_adapt, δ, μ) - end - - push!(chn[:θ], θ) - push!(chn[:logϵ], logϵ) - # if (iter % 50 == 0) println(θ) end +for test_id in 1:2 + test_name = "$test_id. NUTS " * (test_id == 1 ? "without DA" : "with DA") + + @testset "$test_name" begin + std = ones(θ_dim) + θ = randn(θ_dim) + lj = lj_func(θ) + + chn = Dict(:θ => Vector{Vector{Float64}}(), :logϵ => Vector{Float64}()) + + function dummy_print(args...) + return nothing + end + + println("Start to run NUTS") + + totla_num = 10000 + for iter in 1:totla_num + global logϵ, lj_func, grad_func, M_adapt, δ, μ + θ, da_stat = _nuts_step(θ, exp(logϵ), lj_func, grad_func, std) + if test_id == 1 + logϵ, Hbar, logϵbar = _adapt_ϵ( + logϵ, Hbar, logϵbar, da_stat, iter, M_adapt, δ, μ + ) + end + + push!(chn[:θ], θ) + push!(chn[:logϵ], logϵ) + # if (iter % 50 == 0) println(θ) end + end + + samples_s = exp.(map(x -> x[1], chn[:θ])) + samples_m = map(x -> x[2], chn[:θ]) + @show mean(samples_s[1000:end]) + @test mean(samples_s[1000:end]) ≈ 49 / 24 atol = 0.2 + @show mean(samples_m[1000:end]) + @test mean(samples_m[1000:end]) ≈ 7 / 6 atol = 0.2 + + @show std(samples_s[1000:end]) + @show std(samples_m[1000:end]) + + @show mean(exp.(chn[:logϵ])) end - - samples_s = exp.(map(x -> x[1], chn[:θ])) - samples_m = map(x -> x[2], chn[:θ]) - @show mean(samples_s[1000:end]) - @test mean(samples_s[1000:end]) ≈ 49/24 atol=0.2 - @show mean(samples_m[1000:end]) - @test mean(samples_m[1000:end]) ≈ 7/6 atol=0.2 - - @show std(samples_s[1000:end]) - @show std(samples_m[1000:end]) - - @show mean(exp.(chn[:logϵ])) - - end - end diff --git a/test/skipped/hmcda_geweke.jl b/test/skipped/hmcda_geweke.jl index b0e43ecb5..8e2f20986 100644 --- a/test/skipped/hmcda_geweke.jl +++ b/test/skipped/hmcda_geweke.jl @@ -5,29 +5,41 @@ using Gadfly import Gadfly.ElementOrFunction # First add a method to the basic Gadfly.plot function for QQPair types (generated by Distributions.qqbuild()) -Gadfly.plot(qq::QQPair, elements::ElementOrFunction...) = Gadfly.plot(x=qq.qx, y=qq.qy, Geom.point, Theme(highlight_width=0px), elements...) +function Gadfly.plot(qq::QQPair, elements::ElementOrFunction...) + return Gadfly.plot(; + x=qq.qx, y=qq.qy, Geom.point, Theme(; highlight_width=0px), elements... + ) +end # Now some shorthand functions qqplot(x, y, elements::ElementOrFunction...) = Gadfly.plot(qqbuild(x, y), elements...) -qqnorm(x, elements::ElementOrFunction...) = qqplot(Normal(), x, Guide.xlabel("Theoretical Normal quantiles"), Guide.ylabel("Observed quantiles"), elements...) +function qqnorm(x, elements::ElementOrFunction...) + return qqplot( + Normal(), + x, + Guide.xlabel("Theoretical Normal quantiles"), + Guide.ylabel("Observed quantiles"), + elements..., + ) +end NSamples = 5000 @model function gdemo_fw() - # s ~ InverseGamma(2,3) - s = 1 - m ~ Normal(0, sqrt(s)) - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + # s ~ InverseGamma(2,3) + s = 1 + m ~ Normal(0, sqrt(s)) + return y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) end @model function gdemo_bk(x) - # Backward Step 1: theta ~ theta | x - # s ~ InverseGamma(2,3) - s = 1 - m ~ Normal(0,sqrt(s)) - x ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) - # Backward Step 2: x ~ x | theta - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + # Backward Step 1: theta ~ theta | x + # s ~ InverseGamma(2,3) + s = 1 + m ~ Normal(0, sqrt(s)) + x ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + # Backward Step 2: x ~ x | theta + return y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) end fw = PG(50, NSamples) @@ -44,24 +56,30 @@ s_bk = Array{Turing.Chain}(undef, N) simple_logger = Base.CoreLogging.SimpleLogger(stderr, Base.CoreLogging.Debug) Base.CoreLogging.with_logger(simple_logger) do - global x, bk, s_bk - i = 1 - while i <= N - s_bk[i] = sample(gdemo_bk(x), bk) - x = s_bk[i][end, :y] - i += 1 - end + global x, bk, s_bk + i = 1 + while i <= N + s_bk[i] = sample(gdemo_bk(x), bk) + x = s_bk[i][end, :y] + i += 1 + end end s2 = reduce(vcat, s_bk); # describe(s2) - using UnicodePlots qqm = qqbuild(s[:m], s2[:m]) -show(scatterplot(qqm.qx, qqm.qy, title = "QQ plot for m", canvas = DotCanvas)) -show(scatterplot(qqm.qx[51:end-50], qqm.qy[51:end-50], title = "QQ plot for m (removing first and last 50 quantiles):", canvas = DotCanvas)) +show(scatterplot(qqm.qx, qqm.qy; title="QQ plot for m", canvas=DotCanvas)) +show( + scatterplot( + qqm.qx[51:(end - 50)], + qqm.qy[51:(end - 50)]; + title="QQ plot for m (removing first and last 50 quantiles):", + canvas=DotCanvas, + ), +) qqm = qqbuild(s[:m], s2[:m]) X = qqm.qx @@ -71,10 +89,10 @@ slope = (1 / (transpose(X) * X)[1] * transpose(X) * y)[1] print(" slopeₛ = $slope ≈ 1 (ϵ = 0.1)") ans1 = abs(slope - 1.0) <= 0.1 if ans1 - printstyled(" ✓\n", color=:green) + printstyled(" ✓\n"; color=:green) else - printstyled(" X\n", color=:red) - printstyled(" slope = $slope, diff = $(slope - 1.0)\n", color=:red) + printstyled(" X\n"; color=:red) + printstyled(" slope = $slope, diff = $(slope - 1.0)\n"; color=:red) end # qqs = qqbuild(s[:s], s2[:s]) diff --git a/test/skipped/nuts_geweke.jl b/test/skipped/nuts_geweke.jl index 070b7f552..d312be884 100644 --- a/test/skipped/nuts_geweke.jl +++ b/test/skipped/nuts_geweke.jl @@ -5,29 +5,41 @@ using Gadfly import Gadfly.ElementOrFunction # First add a method to the basic Gadfly.plot function for QQPair types (generated by Distributions.qqbuild()) -Gadfly.plot(qq::QQPair, elements::ElementOrFunction...) = Gadfly.plot(x=qq.qx, y=qq.qy, Geom.point, Theme(highlight_width=0px), elements...) +function Gadfly.plot(qq::QQPair, elements::ElementOrFunction...) + return Gadfly.plot(; + x=qq.qx, y=qq.qy, Geom.point, Theme(; highlight_width=0px), elements... + ) +end # Now some shorthand functions qqplot(x, y, elements::ElementOrFunction...) = Gadfly.plot(qqbuild(x, y), elements...) -qqnorm(x, elements::ElementOrFunction...) = qqplot(Normal(), x, Guide.xlabel("Theoretical Normal quantiles"), Guide.ylabel("Observed quantiles"), elements...) +function qqnorm(x, elements::ElementOrFunction...) + return qqplot( + Normal(), + x, + Guide.xlabel("Theoretical Normal quantiles"), + Guide.ylabel("Observed quantiles"), + elements..., + ) +end NSamples = 5000 @model function gdemo_fw() - s ~ InverseGamma(2,3) - # s = 1 - m ~ Normal(0,sqrt(s)) - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + s ~ InverseGamma(2, 3) + # s = 1 + m ~ Normal(0, sqrt(s)) + return y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) end @model function gdemo_bk(x) - # Backward Step 1: theta ~ theta | x - s ~ InverseGamma(2,3) - # s = 1 - m ~ Normal(0,sqrt(s)) - x ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) - # Backward Step 2: x ~ x | theta - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + # Backward Step 1: theta ~ theta | x + s ~ InverseGamma(2, 3) + # s = 1 + m ~ Normal(0, sqrt(s)) + x ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + # Backward Step 2: x ~ x | theta + return y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) end fw = IS(NSamples) @@ -45,18 +57,17 @@ s_bk = Array{MCMCChains.Chains}(undef, N) simple_logger = Base.CoreLogging.SimpleLogger(stderr, Base.CoreLogging.Debug) with_logger(simple_logger) do - i = 1 - while i <= N - s_bk[i] = sample(gdemo_bk(x), bk) - x = s_bk[i][end, :y] - i += 1 - end + i = 1 + while i <= N + s_bk[i] = sample(gdemo_bk(x), bk) + x = s_bk[i][end, :y] + i += 1 + end end s2 = chainscat(s_bk...) # describe(s2) - # qqplot(s[:m], s2[:m]) # qqplot(s[:s], s2[:s]) @@ -65,14 +76,33 @@ qqm = qqbuild(s[:m], s2[:m]) using UnicodePlots qqm = qqbuild(s[:m], s2[:m]) -show(scatterplot(qqm.qx, qqm.qy, title = "QQ plot for m", canvas = DotCanvas)) -show(scatterplot(qqm.qx[51:end-50], qqm.qy[51:end-50], title = "QQ plot for m (removing first and last 50 quantiles):", canvas = DotCanvas)) -show(scatterplot(qqm.qx, qqm.qy, title = "QQ plot for m")) -show(scatterplot(qqm.qx[51:end-50], qqm.qy[51:end-50], title = "QQ plot for m (removing first and last 50 quantiles):")) +show(scatterplot(qqm.qx, qqm.qy; title="QQ plot for m", canvas=DotCanvas)) +show( + scatterplot( + qqm.qx[51:(end - 50)], + qqm.qy[51:(end - 50)]; + title="QQ plot for m (removing first and last 50 quantiles):", + canvas=DotCanvas, + ), +) +show(scatterplot(qqm.qx, qqm.qy; title="QQ plot for m")) +show( + scatterplot( + qqm.qx[51:(end - 50)], + qqm.qy[51:(end - 50)]; + title="QQ plot for m (removing first and last 50 quantiles):", + ), +) qqs = qqbuild(s[:s].value, s2[:s].value) -show(scatterplot(qqs.qx, qqs.qy, title = "QQ plot for s")) -show(scatterplot(qqs.qx[51:end-50], qqs.qy[51:end-50], title = "QQ plot for s (removing first and last 50 quantiles):")) +show(scatterplot(qqs.qx, qqs.qy; title="QQ plot for s")) +show( + scatterplot( + qqs.qx[51:(end - 50)], + qqs.qy[51:(end - 50)]; + title="QQ plot for s (removing first and last 50 quantiles):", + ), +) X = qqm.qx y = qqm.qy @@ -81,10 +111,10 @@ slope = (1 / (transpose(X) * X)[1] * transpose(X) * y)[1] print(" slopeₛ = $slope ≈ 1 (ϵ = 0.1)") ans1 = abs(slope - 1.0) <= 0.1 if ans1 - printstyled(" ✓\n", color=:green) + printstyled(" ✓\n"; color=:green) else - printstyled(" X\n", color=:red) - printstyled(" slope = $slope, diff = $(slope - 1.0)\n", color=:red) + printstyled(" X\n"; color=:red) + printstyled(" slope = $slope, diff = $(slope - 1.0)\n"; color=:red) end # qqs = qqbuild(s[:s], s2[:s]) diff --git a/test/skipped/opt_param_of_dist.jl b/test/skipped/opt_param_of_dist.jl index e750208d8..3c8e4d2dc 100644 --- a/test/skipped/opt_param_of_dist.jl +++ b/test/skipped/opt_param_of_dist.jl @@ -2,11 +2,11 @@ using Turing using Test @model testassume begin - x ~ Bernoulli(1; :static = true) - y ~ Bernoulli(x / 2; :param = true) - 1 ~ Normal(0, 1; :static = true) - 2 ~ Normal(0, 1; :param = true) - y, x + x ~ Bernoulli(1; :static=true) + y ~ Bernoulli(x / 2; :param=true) + 1 ~ Normal(0, 1; :static=true) + 2 ~ Normal(0, 1; :param=true) + y, x end s = SMC() @@ -17,4 +17,4 @@ res = sample(testassume, s) @test all(isone, res[:x]) # check that the mean of y is between 0.4 and 0.6 -@test mean(res[:y]) ≈ 0.5 atol=0.1 +@test mean(res[:y]) ≈ 0.5 atol = 0.1 diff --git a/test/skipped/simple_gauss.jl b/test/skipped/simple_gauss.jl index 935ced27b..14f36dce1 100644 --- a/test/skipped/simple_gauss.jl +++ b/test/skipped/simple_gauss.jl @@ -6,9 +6,9 @@ using Turing @model function simple_gauss() s = 1 - m ~ Normal(0,sqrt(s)) + m ~ Normal(0, sqrt(s)) 2.0 ~ Normal(m, sqrt(s)) - 2.5 ~ Normal(m, sqrt(s)) + return 2.5 ~ Normal(m, sqrt(s)) end # Plain Julia @@ -17,17 +17,17 @@ using ReverseDiff: GradientTape, GradientConfig, gradient, gradient!, compile θ_dim = 1 function lj_func(θ) - _lj = zero(Real) + _lj = zero(Real) - s = 1 + s = 1 - m = θ[1] - _lj += logpdf(Normal(0, sqrt(s)), m) + m = θ[1] + _lj += logpdf(Normal(0, sqrt(s)), m) - _lj += logpdf(Normal(m, sqrt(s)), 2.0) - _lj += logpdf(Normal(m, sqrt(s)), 2.5) + _lj += logpdf(Normal(m, sqrt(s)), 2.0) + _lj += logpdf(Normal(m, sqrt(s)), 2.5) - return _lj + return _lj end neg_lj_func(θ) = -lj_func(θ) @@ -35,18 +35,16 @@ const f_tape = GradientTape(neg_lj_func, randn(θ_dim)) const compiled_f_tape = compile(f_tape) function grad_func(θ) + inputs = θ + results = similar(θ) + all_results = DiffResults.GradientResult(results) - inputs = θ - results = similar(θ) - all_results = DiffResults.GradientResult(results) - - gradient!(all_results, compiled_f_tape, inputs) - - neg_lj = all_results.value - grad, = all_results.derivs + gradient!(all_results, compiled_f_tape, inputs) - return -neg_lj, grad + neg_lj = all_results.value + grad, = all_results.derivs + return -neg_lj, grad end # Unit test for gradient diff --git a/test/skipped/simple_gauss_hmc.jl b/test/skipped/simple_gauss_hmc.jl index 8b2e41d7f..ba1f9dd23 100644 --- a/test/skipped/simple_gauss_hmc.jl +++ b/test/skipped/simple_gauss_hmc.jl @@ -16,17 +16,14 @@ std = ones(θ_dim) θ = randn(θ_dim) lj = lj_func(θ) -chn = Dict(:θ=>Vector{Vector{Float64}}(), :logϵ=>Vector{Float64}()) +chn = Dict(:θ => Vector{Vector{Float64}}(), :logϵ => Vector{Float64}()) accept_num = 1 - totla_num = 10000 -for iter = 1:totla_num - - push!(chn[:θ], θ) - θ, lj, is_accept, τ_valid, α = _hmc_step(θ, lj, lj_func, grad_func, 10, 0.05, std) - accept_num += is_accept - +for iter in 1:totla_num + push!(chn[:θ], θ) + θ, lj, is_accept, τ_valid, α = _hmc_step(θ, lj, lj_func, grad_func, 10, 0.05, std) + accept_num += is_accept end @show lj diff --git a/test/skipped/sv.jl b/test/skipped/sv.jl index 29b301614..b5fe22e71 100644 --- a/test/skipped/sv.jl +++ b/test/skipped/sv.jl @@ -2,37 +2,27 @@ using ReverseDiff: GradientTape, GradientConfig, gradient, gradient!, compile using Turing: _hmc_step using Turing using HDF5, JLD -sv_data = load(TPATH*"/example-models/nips-2017/sv-data.jld.data")["data"] +sv_data = load(TPATH * "/example-models/nips-2017/sv-data.jld.data")["data"] @model function sv_model(T, y) ϕ ~ Uniform(-1, 1) - σ ~ truncated(Cauchy(0,5), 0, Inf) + σ ~ truncated(Cauchy(0, 5), 0, Inf) μ ~ Cauchy(0, 10) h = Vector{Real}(T) h[1] ~ Normal(μ, σ / sqrt(1 - ϕ^2)) y[1] ~ Normal(0, exp.(h[1] / 2)) - for t = 2:T - h[t] ~ Normal(μ + ϕ * (h[t-1] - μ) , σ) - y[t] ~ Normal(0, exp.(h[t] / 2)) + for t in 2:T + h[t] ~ Normal(μ + ϕ * (h[t - 1] - μ), σ) + y[t] ~ Normal(0, exp.(h[t] / 2)) end - end +end - -mf = sv_model(data=sv_data[1]) +mf = sv_model(; data=sv_data[1]) chain_nuts = sample(mf, HMC(0.05, 10), 2000) println("mean of m: $(mean(chn[1000:end, :μ]))") - - - - - - - - - # θ_dim = 1 # function lj_func(θ) # _lj = zero(Real) @@ -74,7 +64,6 @@ println("mean of m: $(mean(chn[1000:end, :μ]))") # chn = [] # accept_num = 1 - # totla_num = 5000 # for iter = 1:totla_num # push!(chn, θ) diff --git a/test/skipped/unit_test_helper.jl b/test/skipped/unit_test_helper.jl index 1517459c5..174234480 100644 --- a/test/skipped/unit_test_helper.jl +++ b/test/skipped/unit_test_helper.jl @@ -10,9 +10,11 @@ function test_grad(turing_model, grad_f; trans=Dict()) @testset "Gradient using random inputs" begin ℓ = LogDensityProblemsAD.ADgradient( Turing.AutoTracker(), - Turing.LogDensityFunction(vi, model_f, SampleFromPrior(), DynamicPPL.DefaultContext()), + Turing.LogDensityFunction( + vi, model_f, SampleFromPrior(), DynamicPPL.DefaultContext() + ), ) - for _ = 1:10000 + for _ in 1:10000 theta = rand(d) @test LogDensityProblems.logdensity_and_gradient(ℓ, theta) == grad_f(theta)[2] end diff --git a/test/skipped/vec_assume_mat.jl b/test/skipped/vec_assume_mat.jl index 28ae99d47..b9efc94d0 100644 --- a/test/skipped/vec_assume_mat.jl +++ b/test/skipped/vec_assume_mat.jl @@ -4,17 +4,17 @@ N = 5 alg = HMC(0.2, 4) @model function vdemo(::Type{T}=Float64) where {T} - v = Vector{Matrix{T}}(undef, N) - @. v ~ Wishart(7, [1 0.5; 0.5 1]) + v = Vector{Matrix{T}}(undef, N) + @. v ~ Wishart(7, [1 0.5; 0.5 1]) end t_vec = @elapsed res_vec = sample(vdemo(), alg, 1000) @model function vdemo() - v = Vector{Matrix{Real}}(undef, N) - for i = 1:N - v[i] ~ Wishart(7, [1 0.5; 0.5 1]) - end + v = Vector{Matrix{Real}}(undef, N) + for i in 1:N + v[i] ~ Wishart(7, [1 0.5; 0.5 1]) + end end t_loop = @elapsed res = sample(vdemo(), alg, 1000) diff --git a/test/skipped/vec_assume_mv.jl b/test/skipped/vec_assume_mv.jl index bafa1f096..ba373b124 100644 --- a/test/skipped/vec_assume_mv.jl +++ b/test/skipped/vec_assume_mv.jl @@ -7,24 +7,26 @@ alg = HMC(0.2, 4) # Test for vectorize UnivariateDistribution @model function vdemo() - phi = Vector{Vector{Real}}(undef, N) - @> phi ~ Dirichlet(beta) + phi = Vector{Vector{Real}}(undef, N) + @> phi ~ Dirichlet(beta) end ch_vec, t_vec, m_vec, gctime, memallocs = @timed res_vec = sample(vdemo(), alg) @model function vdemo() - phi = Matrix(undef, 2, N) - @. phi ~ Dirichlet(beta) + phi = Matrix(undef, 2, N) + @. phi ~ Dirichlet(beta) end -ch_vec_mat, t_vec_mat, m_vec_mat, gctime, memallocs = @timed res_vec_mat = sample(vdemo(), alg) +ch_vec_mat, t_vec_mat, m_vec_mat, gctime, memallocs = @timed res_vec_mat = sample( + vdemo(), alg +) @model function vdemo() - phi = Vector{Vector{Real}}(undef, N) - for i = 1:N - phi[i] ~ Dirichlet(beta) - end + phi = Vector{Vector{Real}}(undef, N) + for i in 1:N + phi[i] ~ Dirichlet(beta) + end end ch_loop, t_loop, m_loop, gctime, memallocs = @timed res = sample(vdemo(), alg) diff --git a/test/stdlib/RandomMeasures.jl b/test/stdlib/RandomMeasures.jl index b56504451..cf4ef90ad 100644 --- a/test/stdlib/RandomMeasures.jl +++ b/test/stdlib/RandomMeasures.jl @@ -1,3 +1,11 @@ +module RandomMeasuresTests + +using Distributions: Normal, sample +using Random: Random +using Test: @test, @testset +using Turing +using Turing.RandomMeasures: ChineseRestaurantProcess, DirichletProcess + @testset "RandomMeasures.jl" begin @testset "Infinite Mixture Model" begin @model function infiniteGMM(x) @@ -23,37 +31,37 @@ # Number of clusters. K = maximum(z) nk = Vector{Int}(map(k -> sum(z .== k), 1:K)) - + # Draw the latent assignment. z[i] ~ ChineseRestaurantProcess(rpm, nk) - + # Create a new cluster? if z[i] > K push!(μ, 0.0) - + # Draw location of new cluster. μ[z[i]] ~ H end - + # Draw observation. x[i] ~ Normal(μ[z[i]], 1.0) end end - + # Generate some test data. - Random.seed!(1); - data = vcat(randn(10), randn(10) .- 5, randn(10) .+ 10); - data .-= mean(data); - data /= std(data); - + Random.seed!(1) + data = vcat(randn(10), randn(10) .- 5, randn(10) .+ 10) + data .-= mean(data) + data /= std(data) + # MCMC sampling - Random.seed!(2); - iterations = 500; - model_fun = infiniteGMM(data); - chain = sample(model_fun, SMC(), iterations); + Random.seed!(2) + iterations = 500 + model_fun = infiniteGMM(data) + chain = sample(model_fun, SMC(), iterations) @test chain isa MCMCChains.Chains - @test eltype(chain.value) === Union{Float64, Missing} + @test eltype(chain.value) === Union{Float64,Missing} end # partitions = [ # [[1, 2, 3, 4]], @@ -72,7 +80,7 @@ # [[1], [2], [3, 4]], # [[1], [2], [3], [4]]] - # @turing_testset "chinese restaurant processes" begin + # @testset "chinese restaurant processes" begin # # Data # data = [-2,2,-1.5,1.5] @@ -146,7 +154,7 @@ # @test discr < 0.2 # end # @testset "distributions" begin - # @turing_testset "Representations" begin + # @testset "Representations" begin # d = StickBreakingProcess(DirichletProcess(1.0)) # @test minimum(d) == 0 # @test maximum(d) == 1 @@ -159,7 +167,7 @@ # @test minimum(d) == 1 # @test maximum(d) == 3 # end - # @turing_testset "Dirichlet Process" begin + # @testset "Dirichlet Process" begin # α = 0.1 # N = 10_000 @@ -187,7 +195,7 @@ # @test p[2] ≈ q[2] atol=0.1 # @test p[3] ≈ q[3] atol=0.1 # end - # @turing_testset "Pitman-Yor Process" begin + # @testset "Pitman-Yor Process" begin # a = 0.5 # θ = 0.1 @@ -218,7 +226,7 @@ # @test p[3] ≈ q[3] atol=0.1 # end # end - # @turing_testset "stick breaking" begin + # @testset "stick breaking" begin # # Data # data = [-2,2,-1.5,1.5] @@ -304,7 +312,7 @@ # @test l2 < 0.1 # @test discr < 0.3 # end - # @turing_testset "size-based sampling" begin + # @testset "size-based sampling" begin # # Data # data = [-2,2,-1.5,1.5] @@ -381,3 +389,5 @@ # @test discr < 0.2 # end end + +end diff --git a/test/stdlib/distributions.jl b/test/stdlib/distributions.jl index 9024bba9b..0d42fb76b 100644 --- a/test/stdlib/distributions.jl +++ b/test/stdlib/distributions.jl @@ -1,6 +1,17 @@ +module DistributionsTests + +using ..NumericalTests: check_dist_numerical +using Distributions +using LinearAlgebra: I +using Random: Random +using StableRNGs: StableRNG +using StatsFuns: logistic +using Test: @testset, @test +using Turing + @testset "distributions.jl" begin rng = StableRNG(12345) - @turing_testset "distributions functions" begin + @testset "distributions functions" begin ns = 10 logitp = randn(rng) d1 = BinomialLogit(ns, logitp) @@ -9,7 +20,7 @@ @test logpdf(d1, k) ≈ logpdf(d2, k) end - @turing_testset "distributions functions" begin + @testset "distributions functions" begin d = OrderedLogistic(-2, [-1, 1]) n = 1_000_000 @@ -21,15 +32,14 @@ @test all(((x, y),) -> abs(x - y) < 0.001, zip(p, pmf)) end - @turing_testset "distributions functions" begin - λ = .01:.01:5 - LLp = @. logpdf(Poisson(λ),1) - LLlp = @. logpdf(LogPoisson(log(λ)),1) - @test LLp ≈ LLlp atol = .0001 - + @testset "distributions functions" begin + λ = 0.01:0.01:5 + LLp = @. logpdf(Poisson(λ), 1) + LLlp = @. logpdf(LogPoisson(log(λ)), 1) + @test LLp ≈ LLlp atol = 0.0001 end - @numerical_testset "single distribution correctness" begin + @testset "single distribution correctness" begin rng = StableRNG(1) n_samples = 10_000 @@ -95,31 +105,32 @@ # 3. MatrixDistribution dist_matrix = [ - Wishart(7, [1.0 0.5; 0.5 1.0]), - InverseWishart(7, [1.0 0.5; 0.5 1.0]), + Wishart(7, [1.0 0.5; 0.5 1.0]), InverseWishart(7, [1.0 0.5; 0.5 1.0]) ] - @numerical_testset "Correctness test for single distributions" begin - for (dist_set, dist_list) ∈ [ - ("UnivariateDistribution", dist_uni), + @testset "Correctness test for single distributions" begin + for (dist_set, dist_list) in [ + ("UnivariateDistribution", dist_uni), ("MultivariateDistribution", dist_multi), - ("MatrixDistribution", dist_matrix) + ("MatrixDistribution", dist_matrix), ] @testset "$(string(dist_set))" begin for dist in dist_list - @testset "$(string(typeof(dist)))" begin - @info "Distribution(params)" dist + @testset "$(string(typeof(dist)))" begin + @info "Distribution(params)" dist - @model m() = x ~ dist + @model m() = x ~ dist - chn = sample(rng, m(), HMC(0.05, 20), n_samples) + chn = sample(rng, m(), HMC(0.05, 20), n_samples) - # Numerical tests. - check_dist_numerical(dist, - chn, - mean_tol=mean_tol, - var_atol=var_atol, - var_tol=var_tol) + # Numerical tests. + check_dist_numerical( + dist, + chn; + mean_tol=mean_tol, + var_atol=var_atol, + var_tol=var_tol, + ) end end end @@ -127,3 +138,5 @@ end end end + +end diff --git a/test/test_utils/AllUtils.jl b/test/test_utils/AllUtils.jl deleted file mode 100644 index 4ca57d838..000000000 --- a/test/test_utils/AllUtils.jl +++ /dev/null @@ -1,7 +0,0 @@ -# Import utility functions and reused models. -include("staging.jl") -include("numerical_tests.jl") -include("ad_utils.jl") -include("models.jl") -include("random_measure_utils.jl") -include("testing_functions.jl") diff --git a/test/test_utils/SelectiveTests.jl b/test/test_utils/SelectiveTests.jl new file mode 100644 index 000000000..3026cd16d --- /dev/null +++ b/test/test_utils/SelectiveTests.jl @@ -0,0 +1,59 @@ +module SelectiveTests + +""" + parse_args(args) + +Parse the command line arguments to get the included and excluded test file paths. + +The arguments are expected to be in the form: +``` +a b c --skip d e f +``` +where a test file is to be included if and only if +1) the argument list is empty, in which case all files are included, +or +2) + a) it has as a substring of its path any of the strings `a`, `b`, or `c`, + and + b) it does not have as a substring of its path any of the strings `d`, `e`, or `f`. + +The substring checks are done case-insensitively. +""" +function parse_args(args) + included_paths = Vector{String}() + excluded_paths = Vector{String}() + for (i, arg) in enumerate(args) + if arg == "--skip" + append!(excluded_paths, args[(i + 1):end]) + break + else + push!(included_paths, arg) + end + end + return included_paths, excluded_paths +end + +""" + isincluded(filepath, included_paths, excluded_paths) + +Check if a file should be included in the tests. + +`included_paths` and `excluded_paths` are the output of [`parse_args`](@ref). + +See [`parse_args`](@ref) for the logic of when a file should be included. +""" +function isincluded( + filepath::AbstractString, + included_paths::Vector{<:AbstractString}, + excluded_paths::Vector{<:AbstractString}, +) + if any(excl -> occursin(lowercase(excl), lowercase(filepath)), excluded_paths) + return false + end + if any(incl -> occursin(lowercase(incl), lowercase(filepath)), included_paths) + return true + end + return isempty(included_paths) +end + +end diff --git a/test/test_utils/ad_utils.jl b/test/test_utils/ad_utils.jl deleted file mode 100644 index e48a46ae2..000000000 --- a/test/test_utils/ad_utils.jl +++ /dev/null @@ -1,106 +0,0 @@ -""" - test_reverse_mode_ad(forward, f, ȳ, x...; rtol=1e-6, atol=1e-6) - -Check that the reverse-mode sensitivities produced by an AD library are correct for `f` -at `x...`, given sensitivity `ȳ` w.r.t. `y = f(x...)` up to `rtol` and `atol`. -""" -function test_reverse_mode_ad(f, ȳ, x...; rtol=1e-6, atol=1e-6) - # Perform a regular forwards-pass. - y = f(x...) - - # Use Tracker to compute reverse-mode sensitivities. - y_tracker, back_tracker = Tracker.forward(f, x...) - x̄s_tracker = back_tracker(ȳ) - - # Use Zygote to compute reverse-mode sensitivities. - y_zygote, back_zygote = Zygote.pullback(f, x...) - x̄s_zygote = back_zygote(ȳ) - - test_rd = length(x) == 1 && y isa Number - if test_rd - # Use ReverseDiff to compute reverse-mode sensitivities. - if x[1] isa Array - x̄s_rd = similar(x[1]) - tp = ReverseDiff.GradientTape(x -> f(x), x[1]) - ReverseDiff.gradient!(x̄s_rd, tp, x[1]) - x̄s_rd .*= ȳ - y_rd = ReverseDiff.value(tp.output) - @assert y_rd isa Number - else - x̄s_rd = [x[1]] - tp = ReverseDiff.GradientTape(x -> f(x[1]), [x[1]]) - ReverseDiff.gradient!(x̄s_rd, tp, [x[1]]) - y_rd = ReverseDiff.value(tp.output)[1] - x̄s_rd = x̄s_rd[1] * ȳ - @assert y_rd isa Number - end - end - - # Use finite differencing to compute reverse-mode sensitivities. - x̄s_fdm = FDM.j′vp(central_fdm(5, 1), f, ȳ, x...) - - # Check that Tracker forwards-pass produces the correct answer. - @test isapprox(y, Tracker.data(y_tracker), atol=atol, rtol=rtol) - - # Check that Zygpte forwards-pass produces the correct answer. - @test isapprox(y, y_zygote, atol=atol, rtol=rtol) - - if test_rd - # Check that ReverseDiff forwards-pass produces the correct answer. - @test isapprox(y, y_rd, atol=atol, rtol=rtol) - end - - # Check that Tracker reverse-mode sensitivities are correct. - @test all(zip(x̄s_tracker, x̄s_fdm)) do (x̄_tracker, x̄_fdm) - isapprox(Tracker.data(x̄_tracker), x̄_fdm; atol=atol, rtol=rtol) - end - - # Check that Zygote reverse-mode sensitivities are correct. - @test all(zip(x̄s_zygote, x̄s_fdm)) do (x̄_zygote, x̄_fdm) - isapprox(x̄_zygote, x̄_fdm; atol=atol, rtol=rtol) - end - - if test_rd - # Check that ReverseDiff reverse-mode sensitivities are correct. - @test isapprox(x̄s_rd, x̄s_zygote[1]; atol=atol, rtol=rtol) - end -end - -function test_model_ad(model, f, syms::Vector{Symbol}) - # Set up VI. - vi = Turing.VarInfo(model) - - # Collect symbols. - vnms = Vector(undef, length(syms)) - vnvals = Vector{Float64}() - for i in 1:length(syms) - s = syms[i] - vnms[i] = getfield(vi.metadata, s).vns[1] - - vals = getval(vi, vnms[i]) - for i in eachindex(vals) - push!(vnvals, vals[i]) - end - end - - # Compute primal. - x = vec(vnvals) - logp = f(x) - - # Call ForwardDiff's AD directly. - grad_FWAD = sort(ForwardDiff.gradient(f, x)) - - # Compare with `logdensity_and_gradient`. - z = vi[SampleFromPrior()] - for chunksize in (0, 1, 10), standardtag in (true, false, 0, 3) - ℓ = LogDensityProblemsAD.ADgradient( - Turing.AutoForwardDiff(; chunksize=chunksize, tag=standardtag), - Turing.LogDensityFunction(vi, model, SampleFromPrior(), DynamicPPL.DefaultContext()), - ) - l, ∇E = LogDensityProblems.logdensity_and_gradient(ℓ, z) - - # Compare result - @test l ≈ logp - @test sort(∇E) ≈ grad_FWAD atol = 1e-9 - end -end diff --git a/test/test_utils/models.jl b/test/test_utils/models.jl index fc392b050..344002c05 100644 --- a/test/test_utils/models.jl +++ b/test/test_utils/models.jl @@ -1,18 +1,31 @@ +module Models + +export MoGtest, + MoGtest_default, + MoGtest_default_z_vector, + MoGtest_z_vector, + gdemo, + gdemo_d, + gdemo_default + +using Distributions +using Turing: @model + # The old-gdemo model. @model function gdemo(x, y) - s ~ InverseGamma(2, 3) - m ~ Normal(0, sqrt(s)) - x ~ Normal(m, sqrt(s)) - y ~ Normal(m, sqrt(s)) - return s, m + s ~ InverseGamma(2, 3) + m ~ Normal(0, sqrt(s)) + x ~ Normal(m, sqrt(s)) + y ~ Normal(m, sqrt(s)) + return s, m end @model function gdemo_d() - s ~ InverseGamma(2, 3) - m ~ Normal(0, sqrt(s)) - 1.5 ~ Normal(m, sqrt(s)) - 2.0 ~ Normal(m, sqrt(s)) - return s, m + s ~ InverseGamma(2, 3) + m ~ Normal(0, sqrt(s)) + 1.5 ~ Normal(m, sqrt(s)) + 2.0 ~ Normal(m, sqrt(s)) + return s, m end gdemo_default = gdemo_d() @@ -44,7 +57,7 @@ gdemo_default = gdemo_d() else D[4] ~ Normal(mu2, 1) end - z1, z2, z3, z4, mu1, mu2 + return z1, z2, z3, z4, mu1, mu2 end MoGtest_default = MoGtest([1.0 1.0 4.0 4.0]) @@ -78,10 +91,9 @@ MoGtest_default = MoGtest([1.0 1.0 4.0 4.0]) else D[4] ~ Normal(mu2, 1) end - z[1], z[2], z[3], z[4], mu1, mu2 + return z[1], z[2], z[3], z[4], mu1, mu2 end MoGtest_default_z_vector = MoGtest_z_vector([1.0 1.0 4.0 4.0]) -# Declare empty model to make the Sampler constructor work. -@model empty_model() = x = 1 +end diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index 333a3f14a..cb583b517 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -1,3 +1,12 @@ +module NumericalTests + +using Distributions +using MCMCChains: namesingroup +using Test: @test, @testset + +export check_MoGtest_default, check_MoGtest_default_z_vector, check_dist_numerical, + check_gdemo, check_numerical + function check_dist_numerical(dist, chn; mean_tol = 0.1, var_atol = 1.0, var_tol = 0.5) @testset "numerical" begin # Extract values. @@ -71,3 +80,5 @@ function check_MoGtest_default_z_vector(chain; atol=0.2, rtol=0.0) [1.0, 1.0, 2.0, 2.0, 1.0, 4.0], atol=atol, rtol=rtol) end + +end diff --git a/test/test_utils/random_measure_utils.jl b/test/test_utils/random_measure_utils.jl deleted file mode 100644 index 63c84c11a..000000000 --- a/test/test_utils/random_measure_utils.jl +++ /dev/null @@ -1,34 +0,0 @@ -function compute_log_joint(observations, partition, tau0, tau1, sigma, theta) - n = length(observations) - k = length(partition) - prob = k*log(sigma) + lgamma(theta) + lgamma(theta/sigma + k) - lgamma(theta/sigma) - lgamma(theta + n) - for cluster in partition - prob += lgamma(length(cluster) - sigma) - lgamma(1 - sigma) - prob += compute_log_conditional_observations(observations, cluster, tau0, tau1) - end - prob -end - -function compute_log_conditional_observations(observations, cluster, tau0, tau1) - nl = length(cluster) - prob = (nl/2)*log(tau1) - (nl/2)*log(2*pi) + 0.5*log(tau0) + 0.5*log(tau0+nl) - prob += -tau1/2*(sum(observations)) + 0.5*(tau0*mu_0+tau1*sum(observations[cluster]))^2/(tau0+nl*tau1) - prob -end - -# Test of similarity between distributions -function correct_posterior(empirical_probs, data, partitions, τ0, τ1, σ, θ) - true_log_probs = map(p -> compute_log_joint(data, p, τ0, τ1, σ, θ), partitions) - true_probs = exp.(true_log_probs) - true_probs /= sum(true_probs) - - empirical_probs /= sum(empirical_probs) - - # compare distribitions - # L2 - L2 = sum((empirical_probs - true_probs).^2) - - # Discrepancy - discr = maximum(abs.(empirical_probs - true_probs)) - return L2, discr -end diff --git a/test/test_utils/staging.jl b/test/test_utils/staging.jl deleted file mode 100644 index 15d5853d0..000000000 --- a/test/test_utils/staging.jl +++ /dev/null @@ -1,52 +0,0 @@ -function get_stage() - # Appveyor uses "True" for non-Ubuntu images. - if get(ENV, "APPVEYOR", "") == "True" || get(ENV, "APPVEYOR", "") == "true" - return "nonnumeric" - end - - # Handle Travis and Github Actions specially. - if get(ENV, "TRAVIS", "") == "true" || get(ENV, "GITHUB_ACTIONS", "") == "true" - if "STAGE" in keys(ENV) - return ENV["STAGE"] - else - return "all" - end - end - - return "all" -end - -function do_test(stage_str) - stg = get_stage() - - # If the tests are being run by Appveyor, don't run - # any numerical tests. - if stg == "nonnumeric" - if stage_str == "numerical" - return false - else - return true - end - end - - # Otherwise run the regular testing procedure. - if stg == "all" || stg == stage_str - return true - end - - return false -end - -macro stage_testset(stage_string::String, args...) - if do_test(stage_string) - return esc(:(@testset($(args...)))) - end -end - -macro numerical_testset(args...) - esc(:(@stage_testset "numerical" $(args...))) -end - -macro turing_testset(args...) - esc(:(@stage_testset "test" $(args...))) -end diff --git a/test/test_utils/testing_functions.jl b/test/test_utils/testing_functions.jl deleted file mode 100644 index 4f00d7793..000000000 --- a/test/test_utils/testing_functions.jl +++ /dev/null @@ -1,26 +0,0 @@ -GKernel(var) = (x) -> Normal(x, sqrt.(var)) - -function randr(vi::Turing.VarInfo, - vn::Turing.VarName, - dist::Distribution, - spl::Turing.Sampler, - count::Bool = false) - if ~haskey(vi, vn) - r = rand(dist) - Turing.push!(vi, vn, r, dist, spl) - return r - elseif is_flagged(vi, vn, "del") - unset_flag!(vi, vn, "del") - r = rand(dist) - Turing.RandomVariables.setval!(vi, Turing.vectorize(dist, r), vn) - return r - else - if count Turing.checkindex(vn, vi, spl) end - Turing.updategid!(vi, vn, spl) - return vi[vn] - end -end - -function insdelim(c, deli=",") - return reduce((e, res) -> append!(e, [res, deli]), c; init = [])[1:end-1] -end diff --git a/test/variational/advi.jl b/test/variational/advi.jl index 62e5ac400..639df018c 100644 --- a/test/variational/advi.jl +++ b/test/variational/advi.jl @@ -1,5 +1,19 @@ +module AdvancedVITests + +using ..Models: gdemo_default +using ..NumericalTests: check_gdemo +import AdvancedVI +using AdvancedVI: TruncatedADAGrad, DecayedADAGrad +using Distributions: Dirichlet, Normal +using LinearAlgebra: I +using MCMCChains: Chains +import Random +using Test: @test, @testset +using Turing +using Turing.Essential: TuringDiagMvNormal + @testset "advi.jl" begin - @turing_testset "advi constructor" begin + @testset "advi constructor" begin Random.seed!(0) N = 500 @@ -7,22 +21,22 @@ q = vi(gdemo_default, s1) c1 = rand(q, N) end - @numerical_testset "advi inference" begin + @testset "advi inference" begin @testset for opt in [TruncatedADAGrad(), DecayedADAGrad()] Random.seed!(1) N = 500 alg = ADVI(10, 5000) - q = vi(gdemo_default, alg; optimizer = opt) + q = vi(gdemo_default, alg; optimizer=opt) samples = transpose(rand(q, N)) chn = Chains(reshape(samples, size(samples)..., 1), ["s", "m"]) # TODO: uhmm, seems like a large `eps` here... - check_gdemo(chn, atol = 0.5) + check_gdemo(chn; atol=0.5) end end - @turing_testset "advi different interfaces" begin + @testset "advi different interfaces" begin Random.seed!(1234) target = MvNormal(zeros(2), I) @@ -38,7 +52,7 @@ # OR: implement `update` and pass a `Distribution` function AdvancedVI.update(d::TuringDiagMvNormal, θ::AbstractArray{<:Real}) - return TuringDiagMvNormal(θ[1:length(q)], exp.(θ[length(q) + 1:end])) + return TuringDiagMvNormal(θ[1:length(q)], exp.(θ[(length(q) + 1):end])) end q0 = TuringDiagMvNormal(zeros(2), ones(2)) @@ -50,12 +64,12 @@ # regression test for: # https://github.com/TuringLang/Turing.jl/issues/2065 - @turing_testset "simplex bijector" begin + @testset "simplex bijector" begin @model function dirichlet() - x ~ Dirichlet([1.0,1.0]) + x ~ Dirichlet([1.0, 1.0]) return x end - + m = dirichlet() b = bijector(m) x0 = m() @@ -68,17 +82,17 @@ # And regression for https://github.com/TuringLang/Turing.jl/issues/2160. q = vi(m, ADVI(10, 1000)) x = rand(q, 1000) - @test mean(eachcol(x)) ≈ [0.5, 0.5] atol=0.1 + @test mean(eachcol(x)) ≈ [0.5, 0.5] atol = 0.1 end # Ref: https://github.com/TuringLang/Turing.jl/issues/2205 - @turing_testset "with `condition` (issue #2205)" begin + @testset "with `condition` (issue #2205)" begin @model function demo_issue2205() x ~ Normal() - y ~ Normal(x, 1) + return y ~ Normal(x, 1) end - model = demo_issue2205() | (y = 1.0,) + model = demo_issue2205() | (y=1.0,) q = vi(model, ADVI(10, 1000)) # True mean. mean_true = 1 / 2 @@ -87,7 +101,9 @@ samples = rand(q, 1000) mean_est = mean(samples) var_est = var(samples) - @test mean_est ≈ mean_true atol=0.2 - @test var_est ≈ var_true atol=0.2 + @test mean_est ≈ mean_true atol = 0.2 + @test var_est ≈ var_true atol = 0.2 end end + +end diff --git a/test/variational/optimisers.jl b/test/variational/optimisers.jl index 46a81aa0d..6f64d5fb1 100644 --- a/test/variational/optimisers.jl +++ b/test/variational/optimisers.jl @@ -1,8 +1,16 @@ +module VariationalOptimisersTests + +using AdvancedVI: DecayedADAGrad, TruncatedADAGrad, apply! +import ForwardDiff +import ReverseDiff +using Test: @test, @testset +using Turing + function test_opt(ADPack, opt) θ = randn(10, 10) θ_fit = randn(10, 10) - loss(x, θ_) = mean(sum(abs2, θ*x - θ_*x; dims = 1)) - for t = 1:10^4 + loss(x, θ_) = mean(sum(abs2, θ * x - θ_ * x; dims=1)) + for t in 1:(10^4) x = rand(10) Δ = ADPack.gradient(θ_ -> loss(x, θ_), θ_fit) Δ = apply!(opt, θ_fit, Δ) @@ -17,3 +25,5 @@ end for opt in [TruncatedADAGrad(), DecayedADAGrad(1e-2)] test_opt(ReverseDiff, opt) end + +end