Load Julia packages (libraries) needed for the snippets in chapter 0
using DynamicHMCModels
snippet 10.4
d = CSV.read(rel_path("..", "data", "chimpanzees.csv"), delim=';');
df = convert(DataFrame, d);
df[:pulled_left] = convert(Array{Int64}, df[:pulled_left])
df[:prosoc_left] = convert(Array{Int64}, df[:prosoc_left])
first(df, 5)
struct m_10_02d_model{TY <: AbstractVector, TX <: AbstractMatrix}
"Observations."
y::TY
"Covariates"
X::TX
"Number of observations"
N::Int
end
Make the type callable with the parameters as a single argument.
function (problem::m_10_02d_model)(θ)
@unpack y, X, N = problem # extract the data
@unpack β = θ # works on the named tuple too
ll = 0.0
ll += sum(logpdf.(Normal(0, 10), β)) # a & bp
ll += sum([loglikelihood(Binomial(1, logistic(dot(X[i, :], β))), [y[i]]) for i in 1:N])
ll
end
Instantiate the model with data and inits.
N = size(df, 1)
X = hcat(ones(Int64, N), df[:prosoc_left]);
y = df[:pulled_left]
p = m_10_02d_model(y, X, N);
θ = (β = [1.0, 2.0],)
p(θ)
-487.6540051035728
Write a function to return properly dimensioned transformation.
problem_transformation(p::m_10_02d_model) =
as( (β = as(Array, size(p.X, 2)), ) )
Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(problem_transformation(p), p)
∇P = LogDensityRejectErrors(ADgradient(:ForwardDiff, P));
LogDensityRejectErrors{InvalidLogDensityException,LogDensityProblems.ForwardDiffLogDensity{TransformedLogDensity{TransformVariables.TransformNamedTuple{(:β,),Tuple{TransformVariables.ArrayTransform{TransformVariables.Identity,1}}},Main.ex-m10.02d.m_10_02d_model{Array{Int64,1},Array{Int64,2}}},ForwardDiff.GradientConfig{ForwardDiff.Tag{getfield(LogDensityProblems, Symbol("##1#2")){TransformedLogDensity{TransformVariables.TransformNamedTuple{(:β,),Tuple{TransformVariables.ArrayTransform{TransformVariables.Identity,1}}},Main.ex-m10.02d.m_10_02d_model{Array{Int64,1},Array{Int64,2}}}},Float64},Float64,2,Array{ForwardDiff.Dual{ForwardDiff.Tag{getfield(LogDensityProblems, Symbol("##1#2")){TransformedLogDensity{TransformVariables.TransformNamedTuple{(:β,),Tuple{TransformVariables.ArrayTransform{TransformVariables.Identity,1}}},Main.ex-m10.02d.m_10_02d_model{Array{Int64,1},Array{Int64,2}}}},Float64},Float64,2},1}}}}(ForwardDiff AD wrapper for TransformedLogDensity of dimension 2, w/ chunk size 2)
Tune and sample.
chain, NUTS_tuned = NUTS_init_tune_mcmc(∇P, 1000);
MCMC, adapting ϵ (75 steps)
0.0015 s/step ...done
MCMC, adapting ϵ (25 steps)
0.0013 s/step ...done
MCMC, adapting ϵ (50 steps)
0.0016 s/step ...done
MCMC, adapting ϵ (100 steps)
0.00093 s/step ...done
MCMC, adapting ϵ (200 steps)
0.00085 s/step ...done
MCMC, adapting ϵ (400 steps)
0.00076 s/step ...done
MCMC, adapting ϵ (50 steps)
0.0011 s/step ...done
MCMC (1000 steps)
0.00085 s/step ...done
(NUTS_Transition{Array{Float64,1},Float64}[NUTS_Transition{Array{Float64,1},Float64}([-0.025655584690469635, 0.8129873419264347], -346.9925625686527, 2, DynamicHMC.AdjacentTurn, 0.9409499620744306, 7), NUTS_Transition{Array{Float64,1},Float64}([-0.2301043196622613, 0.7801974735931929], -348.1842296363849, 2, DynamicHMC.DoubledTurn, 0.8305062556516631, 3), NUTS_Transition{Array{Float64,1},Float64}([-0.23896181542828565, 0.7788066279136481], -347.8543501513971, 1, DynamicHMC.DoubledTurn, 0.9587123930592252, 1), NUTS_Transition{Array{Float64,1},Float64}([0.2864454503797942, 0.35922814071399195], -348.6262788046825, 2, DynamicHMC.DoubledTurn, 0.9233960215309498, 3), NUTS_Transition{Array{Float64,1},Float64}([-0.03622757968304968, 0.7657744644741831], -346.95621558557923, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([-0.042727117955691264, 0.7281428613734457], -345.3626095172827, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([0.14798555014084908, 0.5418428627329517], -345.6177021705968, 2, DynamicHMC.AdjacentTurn, 0.9683795432725447, 7), NUTS_Transition{Array{Float64,1},Float64}([0.018203519996538076, 0.525937921873747], -346.94592226007666, 2, DynamicHMC.DoubledTurn, 0.8163617219313871, 3), NUTS_Transition{Array{Float64,1},Float64}([0.020422428042493473, 0.4546789692894079], -345.233445973547, 2, DynamicHMC.DoubledTurn, 0.9684369094837716, 3), NUTS_Transition{Array{Float64,1},Float64}([0.1663675813618751, 0.5408023805244975], -346.9026183671446, 1, DynamicHMC.AdjacentTurn, 0.8112232596536012, 3) … NUTS_Transition{Array{Float64,1},Float64}([0.11481275713114296, 0.6681211881488176], -345.96751317071124, 2, DynamicHMC.DoubledTurn, 0.9886572587462267, 3), NUTS_Transition{Array{Float64,1},Float64}([-0.01774475069224174, 0.453169677610952], -345.72653061937206, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([0.0514816604529117, 0.6797468709477579], -345.6961351573631, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([0.13048436655446624, 0.29000621957076594], -348.2681714088514, 1, DynamicHMC.AdjacentTurn, 0.7380858678837003, 3), NUTS_Transition{Array{Float64,1},Float64}([-0.09232635825480652, 0.9273177802263682], -346.90888639976424, 2, DynamicHMC.DoubledTurn, 0.9527692051902648, 3), NUTS_Transition{Array{Float64,1},Float64}([-0.10580380791958818, 0.7295379893699157], -347.24748647160925, 2, DynamicHMC.DoubledTurn, 0.9849341381361075, 3), NUTS_Transition{Array{Float64,1},Float64}([0.3704702635720225, 0.44559847418261], -349.57640180746176, 2, DynamicHMC.AdjacentTurn, 0.6090853361141778, 7), NUTS_Transition{Array{Float64,1},Float64}([-0.015427229333586534, 0.6256775567051476], -348.5248646574353, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([0.011639690109713441, 0.6450476044775378], -346.0093370735131, 2, DynamicHMC.DoubledTurn, 0.8724557323621039, 3), NUTS_Transition{Array{Float64,1},Float64}([0.053755884113811755, 0.622364036828155], -344.9794898036665, 2, DynamicHMC.DoubledTurn, 0.9855900456387642, 3)], NUTS sampler in 2 dimensions
stepsize (ϵ) ≈ 0.833
maximum depth = 10
Gaussian kinetic energy, √diag(M⁻¹): [0.13264850930470776, 0.18129462866806742]
)
We use the transformation to obtain the posterior from the chain.
posterior = TransformVariables.transform.(Ref(problem_transformation(p)), get_position.(chain));
posterior[1:5]
5-element Array{NamedTuple{(:β,),Tuple{Array{Float64,1}}},1}:
(β = [-0.025655584690469635, 0.8129873419264347],)
(β = [-0.2301043196622613, 0.7801974735931929],)
(β = [-0.23896181542828565, 0.7788066279136481],)
(β = [0.2864454503797942, 0.35922814071399195],)
(β = [-0.03622757968304968, 0.7657744644741831],)
Extract the parameter posterior means: β
,
posterior_β = mean(first, posterior)
2-element Array{Float64,1}:
0.04839294948762045
0.5630063531066594
Effective sample sizes (of untransformed draws)
ess = mapslices(effective_sample_size, get_position_matrix(chain); dims = 1)
ess
1×2 Array{Float64,2}:
977.092 907.012
NUTS-specific statistics
NUTS_statistics(chain)
Hamiltonian Monte Carlo sample of length 1000
acceptance rate mean: 0.91, min/25%/median/75%/max: 0.28 0.87 0.95 0.99 1.0
termination: AdjacentTurn => 33% DoubledTurn => 67%
depth: 1 => 19% 2 => 73% 3 => 7% 4 => 1%
CmdStan result
m_10_2s_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 0.05103234 0.12579086 0.0019889282 0.0035186307 1000
bp 0.55711212 0.18074275 0.0028577937 0.0040160451 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a -0.19755400 -0.029431425 0.05024655 0.12978825 0.30087758
bp 0.20803447 0.433720250 0.55340400 0.67960975 0.91466915
";
"\nIterations = 1:1000\nThinning interval = 1\nChains = 1,2,3,4\nSamples per chain = 1000\n\nEmpirical Posterior Estimates:\n Mean SD Naive SE MCSE ESS\n a 0.05103234 0.12579086 0.0019889282 0.0035186307 1000\nbp 0.55711212 0.18074275 0.0028577937 0.0040160451 1000\n\nQuantiles:\n 2.5% 25.0% 50.0% 75.0% 97.5%\n a -0.19755400 -0.029431425 0.05024655 0.12978825 0.30087758\nbp 0.20803447 0.433720250 0.55340400 0.67960975 0.91466915\n"
Extract the parameter posterior means: β
,
posterior_β = mean(first, posterior)
2-element Array{Float64,1}:
0.04839294948762045
0.5630063531066594
End of 10/m10.02d.jl
This page was generated using Literate.jl.