using StatisticalRethinking, Distributed, JLD
using Mamba
# Data
line = Dict{Symbol, Any}()
howell1 = CSV.read(joinpath(dirname(Base.pathof(StatisticalRethinking)), "..", "data", "Howell1.csv"), delim=';')
df = convert(DataFrame, howell1);Use only adults
df2 = filter(row -> row[:age] >= 18, df);
line[:x] = convert(Array{Float64,1}, df2[:weight]);
line[:y] = convert(Array{Float64,1}, df2[:height]);
line[:xmat] = convert(Array{Float64,2}, [ones(length(line[:x])) line[:x]])
# Model Specification
model = Model(
y = Stochastic(1,
(xmat, beta, s2) -> MvNormal(xmat * beta, sqrt(s2)),
false
),
beta = Stochastic(1, () -> MvNormal([178, 0], [sqrt(10000), sqrt(100)])),
s2 = Stochastic(() -> Uniform(0, 50))
)
# Initial Values
inits = [
Dict{Symbol, Any}(
:y => line[:y],
:beta => [rand(Normal(178, 100)), rand(Normal(0, 10))],
:s2 => rand(Uniform(0, 50))
)
for i in 1:3
]
# Tuning Parameters
scale1 = [0.5, 0.25]
summary1 = identity
eps1 = 0.5
scale2 = 0.5
summary2 = x -> [mean(x); sqrt(var(x))]
eps2 = 0.1
# Define sampling scheme
scheme = [
Mamba.NUTS([:beta]),
Mamba.Slice([:s2], 10)
]
setsamplers!(model, scheme)
# MCMC Simulation
sim = mcmc(model, line, inits, 10000, burnin=1000, chains=3)MCMC Simulation of 10000 Iterations x 3 Chains...
Chain 1: 0% [0:16:06 of 0:16:07 remaining]
Chain 1: 10% [0:01:10 of 0:01:18 remaining]
Chain 1: 20% [0:01:05 of 0:01:21 remaining]
Chain 1: 30% [0:00:59 of 0:01:24 remaining]
Chain 1: 40% [0:00:51 of 0:01:25 remaining]
Chain 1: 50% [0:00:44 of 0:01:27 remaining]
Chain 1: 60% [0:00:35 of 0:01:27 remaining]
Chain 1: 70% [0:00:26 of 0:01:25 remaining]
Chain 1: 80% [0:00:17 of 0:01:25 remaining]
Chain 1: 90% [0:00:09 of 0:01:25 remaining]
Chain 1: 100% [0:00:00 of 0:01:25 remaining]
Chain 2: 0% [0:00:03 of 0:00:03 remaining]
Chain 2: 10% [0:01:04 of 0:01:11 remaining]
Chain 2: 20% [0:01:01 of 0:01:16 remaining]
Chain 2: 30% [0:00:54 of 0:01:18 remaining]
Chain 2: 40% [0:00:48 of 0:01:20 remaining]
Chain 2: 50% [0:00:40 of 0:01:20 remaining]
Chain 2: 60% [0:00:32 of 0:01:20 remaining]
Chain 2: 70% [0:00:24 of 0:01:21 remaining]
Chain 2: 80% [0:00:16 of 0:01:21 remaining]
Chain 2: 90% [0:00:08 of 0:01:22 remaining]
Chain 2: 100% [0:00:00 of 0:01:22 remaining]
Chain 3: 0% [0:00:03 of 0:00:03 remaining]
Chain 3: 10% [0:00:40 of 0:00:44 remaining]
Chain 3: 20% [0:00:46 of 0:00:58 remaining]
Chain 3: 30% [0:00:46 of 0:01:06 remaining]
Chain 3: 40% [0:00:41 of 0:01:08 remaining]
Chain 3: 50% [0:00:34 of 0:01:08 remaining]
Chain 3: 60% [0:00:27 of 0:01:08 remaining]
Chain 3: 70% [0:00:21 of 0:01:09 remaining]
Chain 3: 80% [0:00:14 of 0:01:11 remaining]
Chain 3: 90% [0:00:07 of 0:01:11 remaining]
Chain 3: 100% [0:00:00 of 0:01:13 remaining]
Object of type "Mamba.ModelChains"
Iterations = 1001:10000
Thinning interval = 1
Chains = 1,2,3
Samples per chain = 9000
[24.034 116.329 0.850134; 22.1181 116.719 0.846494; … ; 24.2888 113.533 0.909573; 24.2213 114.353 0.89875]
[24.2868 114.921 0.867828; 27.31 112.089 0.930551; … ; 24.9188 112.322 0.940324; 24.947 112.322 0.940324]
[26.1285 113.361 0.918129; 25.6931 113.365 0.919105; … ; 25.5777 110.565 0.964541; 23.5937 110.56 0.984838]Show draws summary
describe(sim)Iterations = 1001:10000
Thinning interval = 1
Chains = 1,2,3
Samples per chain = 9000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
s2 26.1983342 1.98722345 0.01209385675 0.01875145626 9000.0000
beta[1] 113.9141981 1.90797888 0.01161158970 0.03650408841 2731.8957
beta[2] 0.9042726 0.04197044 0.00025542396 0.00079745956 2769.9358
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
s2 22.5729600 24.8209965 26.11117040 27.48151249 30.354335
beta[1] 110.1752338 112.6446057 113.94099517 115.23261210 117.623426
beta[2] 0.8225753 0.8754846 0.90384201 0.93248828 0.986753This page was generated using Literate.jl.