Skip to content
4 changes: 3 additions & 1 deletion src/mcmc/gibbs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,9 @@ function setparams_varinfo!!(
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.setmodel(state.ldf, model, sampler.alg.adtype)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
Comment on lines -441 to +443
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

setmodel technically still exists, but it doesn't take the adtype argument anymore, and i think it's clearer to reconstruct the LDF (which is what setmodel does anyway)

new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
Expand Down
6 changes: 3 additions & 3 deletions src/mcmc/sghmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ function DynamicPPL.initialstep(
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[spl]))
state = SGHMCState(ℓ, vi, zero(vi[:]))

return sample, state
end
Expand All @@ -87,7 +87,7 @@ function AbstractMCMC.step(
# Compute gradient of log density.
ℓ = state.logdensity
vi = state.vi
θ = vi[spl]
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))

# Update latent variables and velocity according to
Expand Down Expand Up @@ -246,7 +246,7 @@ function AbstractMCMC.step(
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[spl]
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
Expand Down
4 changes: 2 additions & 2 deletions test/mcmc/Inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -512,7 +512,7 @@ using Turing

@model function vdemo2(x)
μ ~ MvNormal(zeros(size(x, 1)), I)
return x .~ MvNormal(μ, I)
return x ~ filldist(MvNormal(μ, I), size(x, 2))
end

D = 2
Expand Down Expand Up @@ -560,7 +560,7 @@ using Turing

@model function vdemo7()
x = Array{Real}(undef, N, N)
return x .~ [InverseGamma(2, 3) for i in 1:N]
return x ~ filldist(InverseGamma(2, 3), N, N)
Comment on lines 561 to +563
Copy link
Member Author

@penelopeysm penelopeysm Feb 20, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm kind of unsure as to whether we really need to keep the vdemoN models around. As far as I can tell, they're just legacy tests that try out different 'forms' of vectorisation and presumably the timings that are printed are meant to be some marker of their performance. But surely this is covered by DynamicPPL demo models already? I personally wouldn't mind if we just deleted the entire block

end

sample(StableRNG(seed), vdemo7(), alg, 10)
Expand Down
2 changes: 1 addition & 1 deletion test/mcmc/hmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ using Turing
# https://github.com/TuringLang/Turing.jl/issues/1308
@model function mwe3(::Type{T}=Array{Float64}) where {T}
m = T(undef, 2, 3)
return m .~ MvNormal(zeros(2), I)
return m ~ filldist(MvNormal(zeros(2), I), 3)
end
@test sample(StableRNG(seed), mwe3(), HMC(0.2, 4; adtype=adbackend), 100) isa Chains
end
Expand Down
2 changes: 1 addition & 1 deletion test/mcmc/mh.jl
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var))

# Link if proposal is `AdvancedHM.RandomWalkProposal`
vi = deepcopy(vi_base)
d = length(vi_base[DynamicPPL.SampleFromPrior()])
d = length(vi_base[:])
alg = MH(AdvancedMH.RandomWalkProposal(MvNormal(zeros(d), I)))
spl = DynamicPPL.Sampler(alg)
vi = Turing.Inference.maybe_link!!(vi, spl, alg.proposals, gdemo_default)
Expand Down
Loading