Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
10f960e
Import `varname_leaves` etc from AbstractPPL instead
penelopeysm Sep 24, 2025
3a04643
[no ci] initial updates for InitContext
penelopeysm Sep 24, 2025
7e522a6
[no ci] More fixes
penelopeysm Sep 24, 2025
9bc58c8
[no ci] Fix pMCMC
penelopeysm Sep 24, 2025
02d1d0e
[no ci] Fix Gibbs
penelopeysm Sep 24, 2025
27b0096
[no ci] More fixes, reexport InitFrom
penelopeysm Sep 24, 2025
7f12c3e
Fix a bunch of tests; I'll let CI tell me what's still broken...
penelopeysm Sep 24, 2025
ed197f9
Remove comment
penelopeysm Sep 24, 2025
c09c2a5
Fix more tests
penelopeysm Sep 24, 2025
20f9e97
More test fixes
penelopeysm Sep 24, 2025
ba4da83
Fix more tests
penelopeysm Sep 25, 2025
4b143ad
fix GeneralizedExtremeValue numerical test
penelopeysm Sep 25, 2025
b5d82c9
fix sample method
penelopeysm Sep 25, 2025
c315993
fix ESS reproducibility
penelopeysm Sep 25, 2025
3afd807
Fix externalsampler test correctly
penelopeysm Sep 25, 2025
25c6513
Fix everything (I _think_)
penelopeysm Sep 25, 2025
d4aaa18
Add changelog
penelopeysm Sep 25, 2025
aa3cfcf
Fix remaining tests (for real this time)
penelopeysm Sep 25, 2025
c0ea6e0
Specify default chain type in Turing
penelopeysm Oct 2, 2025
b0badc2
fix DPPL revision
penelopeysm Oct 3, 2025
049e950
Fix changelog to mention unwrapped NT / Dict for initial_params
penelopeysm Oct 16, 2025
14d3c14
Remove references to islinked, set_flag, unset_flag
penelopeysm Oct 16, 2025
ae7e1e2
Merge branch 'breaking' into py/dppl-0.38
penelopeysm Oct 16, 2025
3a13c63
use `setleafcontext(::Model, ::AbstractContext)`
penelopeysm Oct 16, 2025
5ed1230
Fix for upstream removal of default_chain_type
penelopeysm Oct 16, 2025
2a585fc
Add clarifying comment for IS test
penelopeysm Oct 16, 2025
16198fa
Revert ESS test (and add some numerical accuracy checks)
penelopeysm Oct 16, 2025
89a61af
istrans -> is_transformed
penelopeysm Oct 16, 2025
6af6330
Remove `loadstate` and `resume_from`
penelopeysm Oct 16, 2025
85a25b4
Remove a Sampler test
penelopeysm Oct 16, 2025
55e465b
Paper over one crack
penelopeysm Oct 16, 2025
9c34014
fix `resume_from`
penelopeysm Oct 16, 2025
deff3fd
remove a `Sampler` test
penelopeysm Oct 16, 2025
bbbde35
Update HISTORY.md
penelopeysm Oct 18, 2025
f927308
Remove `Sampler`, remove `InferenceAlgorithm`, transfer `initialstep`…
penelopeysm Oct 21, 2025
0566edb
Fix a word in changelog
penelopeysm Oct 21, 2025
43a30a2
Improve changelog
penelopeysm Oct 21, 2025
750418a
Add PNTDist to changelog
penelopeysm Oct 22, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions HISTORY.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# 0.41.0

## DynamicPPL 0.38

Lorem ipsum dynamicppl sit amet

## Initial step in MCMC sampling

HMC and NUTS samplers no longer take an extra single step before starting the chain.
This means that if you do not discard any samples at the start, the first sample will be the initial parameters (which may be user-provided).

Expand Down
7 changes: 5 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ Optim = "429524aa-4258-5aef-a3af-852621145aeb"

[extensions]
TuringDynamicHMCExt = "DynamicHMC"
TuringOptimExt = "Optim"
TuringOptimExt = ["Optim", "AbstractPPL"]

[compat]
ADTypes = "1.9"
Expand All @@ -64,7 +64,7 @@ Distributions = "0.25.77"
DistributionsAD = "0.6"
DocStringExtensions = "0.8, 0.9"
DynamicHMC = "3.4"
DynamicPPL = "0.37.2"
DynamicPPL = "0.38"
EllipticalSliceSampling = "0.5, 1, 2"
ForwardDiff = "0.10.3, 1"
Libtask = "0.9.3"
Expand All @@ -90,3 +90,6 @@ julia = "1.10.8"
[extras]
DynamicHMC = "bbc10e6e-7c05-544b-b16e-64fede858acb"
Optim = "429524aa-4258-5aef-a3af-852621145aeb"

[sources]
DynamicPPL = {url = "https://github.com/TuringLang/DynamicPPL.jl", rev = "py/setleafcontext-model"}
10 changes: 10 additions & 0 deletions docs/src/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,16 @@ even though [`Prior()`](@ref) is actually defined in the `Turing.Inference` modu
| `RepeatSampler` | [`Turing.Inference.RepeatSampler`](@ref) | A sampler that runs multiple times on the same variable |
| `externalsampler` | [`Turing.Inference.externalsampler`](@ref) | Wrap an external sampler for use in Turing |

### Initialisation strategies

Turing.jl provides several strategies to initialise parameters for models.

| Exported symbol | Documentation | Description |
|:----------------- |:--------------------------------------- |:--------------------------------------------------------------- |
| `InitFromPrior` | [`DynamicPPL.InitFromPrior`](@extref) | Obtain initial parameters from the prior distribution |
| `InitFromUniform` | [`DynamicPPL.InitFromUniform`](@extref) | Obtain initial parameters by sampling uniformly in linked space |
| `InitFromParams` | [`DynamicPPL.InitFromParams`](@extref) | Manually specify (possibly a subset of) initial parameters |

### Variational inference

See the [docs of AdvancedVI.jl](https://turinglang.org/AdvancedVI.jl/stable/) for detailed usage and the [variational inference tutorial](https://turinglang.org/docs/tutorials/09-variational-inference/) for a basic walkthrough.
Expand Down
4 changes: 0 additions & 4 deletions ext/TuringDynamicHMCExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,6 @@ struct DynamicNUTSState{L,V<:DynamicPPL.AbstractVarInfo,C,M,S}
stepsize::S
end

function DynamicPPL.initialsampler(::DynamicPPL.Sampler{<:DynamicNUTS})
return DynamicPPL.SampleFromUniform()
end

function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
Expand Down
3 changes: 2 additions & 1 deletion ext/TuringOptimExt.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
module TuringOptimExt

using Turing: Turing
using AbstractPPL: AbstractPPL
import Turing: DynamicPPL, NamedArrays, Accessors, Optimisation
using Optim: Optim

Expand Down Expand Up @@ -186,7 +187,7 @@ function _optimize(
f.ldf.model, f.ldf.getlogdensity, vi_optimum; adtype=f.ldf.adtype
)
vals_dict = Turing.Inference.getparams(f.ldf.model, vi_optimum)
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals_dict), values(vals_dict))
iters = map(AbstractPPL.varname_and_value_leaves, keys(vals_dict), values(vals_dict))
vns_vals_iter = mapreduce(collect, vcat, iters)
varnames = map(Symbol ∘ first, vns_vals_iter)
vals = map(last, vns_vals_iter)
Expand Down
9 changes: 8 additions & 1 deletion src/Turing.jl
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,10 @@ using DynamicPPL:
conditioned,
to_submodel,
LogDensityFunction,
@addlogprob!
@addlogprob!,
InitFromPrior,
InitFromUniform,
InitFromParams
using StatsBase: predict
using OrderedCollections: OrderedDict

Expand Down Expand Up @@ -148,6 +151,10 @@ export
fix,
unfix,
OrderedDict, # OrderedCollections
# Initialisation strategies for models
InitFromPrior,
InitFromUniform,
InitFromParams,
# Point estimates - Turing.Optimisation
# The MAP and MLE exports are only needed for the Optim.jl interface.
maximum_a_posteriori,
Expand Down
18 changes: 6 additions & 12 deletions src/mcmc/Inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@ using DynamicPPL:
getdist,
Model,
Sampler,
SampleFromPrior,
SampleFromUniform,
DefaultContext,
set_flag!,
unset_flag!
Expand Down Expand Up @@ -59,8 +57,6 @@ export InferenceAlgorithm,
Hamiltonian,
StaticHamiltonian,
AdaptiveHamiltonian,
SampleFromUniform,
SampleFromPrior,
MH,
ESS,
Emcee,
Expand Down Expand Up @@ -262,13 +258,13 @@ function _params_to_array(model::DynamicPPL.Model, ts::Vector)
dicts = map(ts) do t
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# `AbstractPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
iters = map(AbstractPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
end
nms = map(first, nms_and_vs)
Expand Down Expand Up @@ -315,11 +311,10 @@ end
getlogevidence(transitions, sampler, state) = missing

# Default MCMCChains.Chains constructor.
# This is type piracy (at least for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{Transition,AbstractVarInfo}},
ts::Vector{<:Transition},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
spl::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
Expand Down Expand Up @@ -378,11 +373,10 @@ function AbstractMCMC.bundle_samples(
return sort_chain ? sort(chain) : chain
end

# This is type piracy (for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{Transition,AbstractVarInfo}},
ts::Vector{<:Transition},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
spl::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
Expand Down
6 changes: 3 additions & 3 deletions src/mcmc/abstractmcmc.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# TODO: Implement additional checks for certain samplers, e.g.
# HMC not supporting discrete parameters.
function _check_model(model::DynamicPPL.Model)
# TODO(DPPL0.38/penelopeysm): use InitContext
spl_model = DynamicPPL.contextualize(model, DynamicPPL.SamplingContext(model.context))
return DynamicPPL.check_model(spl_model, VarInfo(); error_on_failure=true)
new_context = DynamicPPL.setleafcontext(model.context, DynamicPPL.InitContext())
new_model = DynamicPPL.contextualize(model, new_context)
return DynamicPPL.check_model(new_model, VarInfo(); error_on_failure=true)
end
function _check_model(model::DynamicPPL.Model, alg::InferenceAlgorithm)
return _check_model(model)
Expand Down
32 changes: 16 additions & 16 deletions src/mcmc/emcee.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,16 @@ struct EmceeState{V<:AbstractVarInfo,S}
states::S
end

# Utility function to tetrieve the number of walkers
_get_n_walkers(e::Emcee) = e.ensemble.n_walkers
_get_n_walkers(spl::Sampler{<:Emcee}) = _get_n_walkers(spl.alg)

function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
initial_params=fill(DynamicPPL.init_strategy(spl), _get_n_walkers(spl)),
kwargs...,
)
if resume_from !== nothing
Expand All @@ -45,23 +49,19 @@ function AbstractMCMC.step(
end

# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
n = _get_n_walkers(spl)
vis = [VarInfo(rng, model) for _ in 1:n]

# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
# TODO(DPPL0.38/penelopeysm) This whole thing can be replaced with init!!
vi = DynamicPPL.initialize_parameters!!(vi, init, model)

# Update log joint probability.
spl_model = DynamicPPL.contextualize(
model, DynamicPPL.SamplingContext(rng, SampleFromPrior(), model.context)
)
last(DynamicPPL.evaluate!!(spl_model, vi))
end
if !(
initial_params isa AbstractVector{<:DynamicPPL.AbstractInitStrategy} &&
length(initial_params) == n
)
err_msg = "initial_params for `Emcee` must be a vector of `DynamicPPL.AbstractInitStrategy`, with length equal to the number of walkers ($n)"
throw(ArgumentError(err_msg))
end
vis = map(vis, initial_params) do vi, strategy
last(DynamicPPL.init!!(rng, model, vi, strategy))
end

# Compute initial transition and states.
Expand Down
19 changes: 2 additions & 17 deletions src/mcmc/ess.jl
Original file line number Diff line number Diff line change
Expand Up @@ -82,23 +82,8 @@ EllipticalSliceSampling.isgaussian(::Type{<:ESSPrior}) = true

# Only define out-of-place sampling
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
# TODO(DPPL0.38/penelopeysm): This can be replaced with `init!!(p.model,
# p.varinfo, PriorInit())` after TuringLang/DynamicPPL.jl#984. The reason
# why we had to use the 'del' flag before this was because
# SampleFromPrior() wouldn't overwrite existing variables.
# The main problem I'm rather unsure about is ESS-within-Gibbs. The
# current implementation I think makes sure to only resample the variables
# that 'belong' to the current ESS sampler. InitContext on the other hand
# would resample all variables in the model (??) Need to think about this
# carefully.
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
end
p.model(rng, varinfo)
return varinfo[:]
_, vi = DynamicPPL.init!!(p.model, p.varinfo, DynamicPPL.InitFromPrior())
return vi[:]
end

# Mean of prior distribution
Expand Down
26 changes: 15 additions & 11 deletions src/mcmc/external_sampler.jl
Original file line number Diff line number Diff line change
Expand Up @@ -117,25 +117,25 @@ function AbstractMCMC.step(
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
initial_params=DynamicPPL.init_strategy(sampler_wrapper.alg.sampler),
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler

# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
_, varinfo = DynamicPPL.init!!(rng, model, varinfo, initial_params)

if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
varinfo = DynamicPPL.link(varinfo, model)
end
varinfo = DynamicPPL.link(varinfo, model)
end

# We need to extract the vectorised initial_params, because the later call to
# AbstractMCMC.step only sees a `LogDensityModel` which expects `initial_params`
# to be a vector.
initial_params_vector = varinfo[:]

# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(
model, DynamicPPL.getlogjoint_internal, varinfo; adtype=alg.adtype
Expand All @@ -144,15 +144,19 @@ function AbstractMCMC.step(
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
rng,
AbstractMCMC.LogDensityModel(f),
sampler;
initial_params=initial_params_vector,
kwargs...,
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
initial_params=initial_params_vector,
kwargs...,
)
end
Expand Down
Loading
Loading