Linear regression
using DynamicHMCModels
ProjDir = rel_path_d("..", "scripts", "05")
cd(ProjDir)
Import the dataset.
snippet 5.1
wd = CSV.read(rel_path("..", "data", "WaffleDivorce.csv"), delim=';')
df = convert(DataFrame, wd);
mean_ma = mean(df[!, :MedianAgeMarriage])
df[!, :MedianAgeMarriage_s] = convert(Vector{Float64},
(df[!, :MedianAgeMarriage]) .- mean_ma)/std(df[!, :MedianAgeMarriage]);
50-element Array{Float64,1}:
-0.6062895051354262
-0.6866992538271283
-0.20424076167692148
-1.4103869920524357
0.5998567252400879
-0.28465051036862354
1.2431347147736962
0.43903722785668664
2.9317394372994143
0.27821773047328247
⋮
-0.6866992538271283
-0.6866992538271283
-2.214484478969445
0.6802664739317872
0.27821773047328247
-0.12383101298522224
-0.8475187512105296
0.19780798178158324
-1.4907967407441378
Show the first six rows of the dataset.
first(df, 6)
Location | Loc | Population | MedianAgeMarriage | Marriage | Marriage SE | Divorce | Divorce SE | WaffleHouses | South | Slaves1860 | Population1860 | PropSlaves1860 | MedianAgeMarriage_s | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
String | String | Float64 | Float64 | Float64 | Float64 | Float64 | Float64 | Int64 | Int64 | Int64 | Int64 | Float64 | Float64 | |
1 | Alabama | AL | 4.78 | 25.3 | 20.2 | 1.27 | 12.7 | 0.79 | 128 | 1 | 435080 | 964201 | 0.45 | -0.60629 |
2 | Alaska | AK | 0.71 | 25.2 | 26.0 | 2.93 | 12.5 | 2.05 | 0 | 0 | 0 | 0 | 0.0 | -0.686699 |
3 | Arizona | AZ | 6.33 | 25.8 | 20.3 | 0.98 | 10.8 | 0.74 | 18 | 0 | 0 | 0 | 0.0 | -0.204241 |
4 | Arkansas | AR | 2.92 | 24.3 | 26.4 | 1.7 | 13.5 | 1.22 | 41 | 1 | 111115 | 435450 | 0.26 | -1.41039 |
5 | California | CA | 37.25 | 26.8 | 19.1 | 0.39 | 8.0 | 0.24 | 0 | 0 | 0 | 379994 | 0.0 | 0.599857 |
6 | Colorado | CO | 5.03 | 25.7 | 23.5 | 1.24 | 11.6 | 0.94 | 11 | 0 | 0 | 34277 | 0.0 | -0.284651 |
Model $y ∼ Normal(y - Xβ, σ)$. Flat prior for β
, half-T for σ
.
struct WaffleDivorceProblem{TY <: AbstractVector, TX <: AbstractMatrix}
"Observations."
y::TY
"Covariates"
X::TX
end
Make the type callable with the parameters as a single argument.
function (problem::WaffleDivorceProblem)(θ)
@unpack y, X, = problem # extract the data
@unpack β, σ = θ # works on the named tuple too
ll = 0.0
ll += logpdf(Normal(10, 10), X[1]) # a = X[1]
ll += logpdf(Normal(0, 1), X[2]) # b1 = X[2]
ll += logpdf(TDist(1.0), σ)
ll += loglikelihood(Normal(0, σ), y .- X*β)
ll
end
Instantiate the model with data and inits.
N = size(df, 1)
X = hcat(ones(N), df[!, :MedianAgeMarriage_s]);
y = convert(Vector{Float64}, df[!, :Divorce])
p = WaffleDivorceProblem(y, X);
p((β = [1.0, 2.0], σ = 1.0))
-2225.6614871340917
Write a function to return properly dimensioned transformation.
problem_transformation(p::WaffleDivorceProblem) =
as((β = as(Array, size(p.X, 2)), σ = asℝ₊))
problem_transformation (generic function with 1 method)
Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(problem_transformation(p), p)
#∇P = LogDensityRejectErrors(ADgradient(:ForwardDiff, P));
∇P = ADgradient(:ForwardDiff, P);
ForwardDiff AD wrapper for TransformedLogDensity of dimension 3, w/ chunk size 3
Tune and sample.
chain, NUTS_tuned = NUTS_init_tune_mcmc(∇P, 1000);
(NUTS_Transition{Array{Float64,1},Float64}[NUTS_Transition{Array{Float64,1},Float64}([10.004305697449984, -1.3714612149930516, 0.36929439247582024], -100.7437394671419, 2, DynamicHMC.DoubledTurn, 0.7344086984176769, 3), NUTS_Transition{Array{Float64,1},Float64}([9.9179700986045, -1.2814239983360167, 0.477268112404491], -99.1714291646178, 2, DynamicHMC.DoubledTurn, 0.9987070030512045, 3), NUTS_Transition{Array{Float64,1},Float64}([9.690326929178196, -1.5429847107920485, 0.587121829651363], -100.70187564561982, 2, DynamicHMC.DoubledTurn, 0.8525199570358085, 3), NUTS_Transition{Array{Float64,1},Float64}([9.330470619442291, -0.6995720011607425, 0.24824246759943344], -104.65761939230882, 2, DynamicHMC.DoubledTurn, 0.9231150877034162, 3), NUTS_Transition{Array{Float64,1},Float64}([9.780376191424626, -1.6818960900175397, 0.5563757784023828], -104.13660693103772, 3, DynamicHMC.DoubledTurn, 0.995320151277619, 7), NUTS_Transition{Array{Float64,1},Float64}([9.820499754875058, -0.9548144726696542, 0.3441498566606775], -100.69395597455839, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([9.487900795784073, -1.2053036223812124, 0.5312085347898865], -100.56167337707467, 3, DynamicHMC.AdjacentTurn, 0.7753044880036768, 15), NUTS_Transition{Array{Float64,1},Float64}([9.590676905377945, -0.9574525275085934, 0.3667777487067708], -97.99441579337656, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([9.54582231047986, -1.1971687727349622, 0.34696113557513314], -97.59186961420265, 2, DynamicHMC.DoubledTurn, 0.9298702893735925, 3), NUTS_Transition{Array{Float64,1},Float64}([9.903657385520063, -1.123460660136666, 0.45654780492503466], -97.63059369271961, 2, DynamicHMC.DoubledTurn, 0.92942538664366, 3) … NUTS_Transition{Array{Float64,1},Float64}([9.756338242466777, -1.1974079956120525, 0.2694191516587843], -97.31432735320291, 1, DynamicHMC.DoubledTurn, 1.0, 1), NUTS_Transition{Array{Float64,1},Float64}([9.875304096114887, -1.243153473458856, 0.39337134054907535], -97.71863354864169, 2, DynamicHMC.DoubledTurn, 0.9789819791853752, 3), NUTS_Transition{Array{Float64,1},Float64}([9.535165237811736, -1.0907306368882237, 0.2164196244737221], -98.48627760837952, 2, DynamicHMC.DoubledTurn, 0.9417872191893899, 3), NUTS_Transition{Array{Float64,1},Float64}([9.501347964271844, -1.207171918627993, 0.2516895406443095], -102.03585780816867, 2, DynamicHMC.DoubledTurn, 0.6338456641658248, 3), NUTS_Transition{Array{Float64,1},Float64}([10.014279277072257, -1.1548962727618939, 0.32545447023249385], -99.48504544756051, 3, DynamicHMC.AdjacentTurn, 0.9560003335568947, 15), NUTS_Transition{Array{Float64,1},Float64}([9.418205784393528, -1.1174642525897105, 0.3549965410113776], -99.18365437156186, 2, DynamicHMC.DoubledTurn, 0.950652513854969, 3), NUTS_Transition{Array{Float64,1},Float64}([9.893540367404471, -0.894881612747017, 0.4263659109620031], -98.91728941030672, 3, DynamicHMC.DoubledTurn, 0.9138349617138629, 7), NUTS_Transition{Array{Float64,1},Float64}([9.671828879892393, -1.4557343660280537, 0.46441754515542577], -99.05114392216396, 2, DynamicHMC.DoubledTurn, 0.8767058792995378, 3), NUTS_Transition{Array{Float64,1},Float64}([9.711858893368689, -1.1820913977190248, 0.42750338498571916], -98.04630224118706, 2, DynamicHMC.DoubledTurn, 1.0, 3), NUTS_Transition{Array{Float64,1},Float64}([9.590087948249272, -0.8124076613435623, 0.3722100562890867], -97.53603877323344, 2, DynamicHMC.DoubledTurn, 0.9394168234440272, 3)], NUTS sampler in 3 dimensions
stepsize (ϵ) ≈ 0.763
maximum depth = 10
Gaussian kinetic energy, √diag(M⁻¹): [0.20272491354772887, 0.20942274106747671, 0.10691672622531817]
)
We use the transformation to obtain the posterior from the chain.
posterior = TransformVariables.transform.(Ref(problem_transformation(p)), get_position.(chain));
posterior[1:5]
5-element Array{NamedTuple{(:β, :σ),Tuple{Array{Float64,1},Float64}},1}:
(β = [10.004305697449984, -1.3714612149930516], σ = 1.4467134425419372)
(β = [9.9179700986045, -1.2814239983360167], σ = 1.6116654936434616)
(β = [9.690326929178196, -1.5429847107920485], σ = 1.798803694265815)
(β = [9.330470619442291, -0.6995720011607425], σ = 1.2817706823794537)
(β = [9.780376191424626, -1.6818960900175397], σ = 1.7443391588589434)
Extract the parameter posterior means: β
,
posterior_β = mean(first, posterior)
2-element Array{Float64,1}:
9.688439457067162
-1.0786929944247698
then σ
:
posterior_σ = mean(last, posterior)
1.4893838519719738
Effective sample sizes (of untransformed draws)
ess = mapslices(effective_sample_size,
get_position_matrix(chain); dims = 1)
1×3 Array{Float64,2}:
813.094 842.733 707.622
NUTS-specific statistics
NUTS_statistics(chain)
Hamiltonian Monte Carlo sample of length 1000
acceptance rate mean: 0.91, min/25%/median/75%/max: 0.37 0.87 0.95 0.99 1.0
termination: AdjacentTurn => 28% DoubledTurn => 72%
depth: 1 => 6% 2 => 56% 3 => 30% 4 => 7% 5 => 1%
cmdstan result
cmdstan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 9.6882466 0.22179190 0.0035068378 0.0031243061 1000
bA -1.0361742 0.21650514 0.0034232469 0.0034433245 1000
sigma 1.5180337 0.15992781 0.0025286807 0.0026279593 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a 9.253141 9.5393175 9.689585 9.84221500 10.11121000
bA -1.454571 -1.1821025 -1.033065 -0.89366925 -0.61711705
sigma 1.241496 1.4079225 1.504790 1.61630750 1.86642750
";
"\nIterations = 1:1000\nThinning interval = 1\nChains = 1,2,3,4\nSamples per chain = 1000\n\nEmpirical Posterior Estimates:\n Mean SD Naive SE MCSE ESS\n a 9.6882466 0.22179190 0.0035068378 0.0031243061 1000\n bA -1.0361742 0.21650514 0.0034232469 0.0034433245 1000\nsigma 1.5180337 0.15992781 0.0025286807 0.0026279593 1000\n\nQuantiles:\n 2.5% 25.0% 50.0% 75.0% 97.5%\n a 9.253141 9.5393175 9.689585 9.84221500 10.11121000\n bA -1.454571 -1.1821025 -1.033065 -0.89366925 -0.61711705\nsigma 1.241496 1.4079225 1.504790 1.61630750 1.86642750\n"
Extract the parameter posterior means: β
,
[posterior_β[1], posterior_β[2], posterior_σ]
3-element Array{Float64,1}:
9.688439457067162
-1.0786929944247698
1.4893838519719738
end of m4.5d.jl#- This page was generated using Literate.jl.