diff --git a/HISTORY.md b/HISTORY.md index 62ca1d350c..f6c162d2d6 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -23,6 +23,22 @@ Turing.jl v0.37 uses DynamicPPL v0.35, which brings with it several breaking cha For more details about all of the above, see the changelog of DynamicPPL [here](https://github.com/TuringLang/DynamicPPL.jl/releases/tag/v0.35.0). +### Export list + +Turing.jl's export list has been cleaned up a fair bit. This affects what is imported into your namespace when you do an unqualified `using Turing`. You may need to import things more explicitly than before. + + - The `DynamicPPL` and `AbstractMCMC` modules are no longer exported. You will need to `import DynamicPPL` or `using DynamicPPL: DynamicPPL` (likewise `AbstractMCMC`) yourself, which in turn means that they have to be made available in your project environment. + + - `@logprob_str` and `@prob_str` have been removed following a long deprecation period. + - We no longer re-export everything from `Bijectors` and `Libtask`. To get around this, add `using Bijectors` or `using Libtask` at the top of your script (but we recommend using more selective imports). + + + We no longer export `Bijectors.ordered`. If you were using `ordered`, even Bijectors does not (currently) export this. You will have to manually import it with `using Bijectors: ordered`. + +On the other hand, we have added a few more exports: + + - `DynamicPPL.returned` and `DynamicPPL.prefix` are exported (for use with submodels). + - `LinearAlgebra.I` is exported for convenience. + # Release 0.36.0 ## Breaking changes diff --git a/Project.toml b/Project.toml index d72f323373..c5e62a675f 100644 --- a/Project.toml +++ b/Project.toml @@ -58,7 +58,7 @@ BangBang = "0.4.2" Bijectors = "0.14, 0.15" Compat = "4.15.0" DataStructures = "0.18" -Distributions = "0.23.3, 0.24, 0.25" +Distributions = "0.25.77" DistributionsAD = "0.6" DocStringExtensions = "0.8, 0.9" DynamicHMC = "3.4" diff --git a/docs/Project.toml b/docs/Project.toml index 025ada5de1..fd26d25df9 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,7 +1,4 @@ [deps] -Bijectors = "76274a88-744f-5084-9051-94815aaf08c4" -Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" DocumenterInterLinks = "d12716ef-a0f6-4df4-a9f1-a5a34e75c656" -DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8" Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" diff --git a/docs/make.jl b/docs/make.jl index b553109ea7..978e5881b3 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,19 +1,17 @@ using Documenter using Turing -# Need to import Distributions and Bijectors to generate docs for functions -# from those packages. -using Distributions -using Bijectors -using DynamicPPL using DocumenterInterLinks links = InterLinks( "DynamicPPL" => "https://turinglang.org/DynamicPPL.jl/stable/objects.inv", - "AbstractPPL" => "https://turinglang.org/AbstractPPL.jl/dev/objects.inv", + "AbstractPPL" => "https://turinglang.org/AbstractPPL.jl/stable/objects.inv", + "LinearAlgebra" => "https://docs.julialang.org/en/v1/objects.inv", + "AbstractMCMC" => "https://turinglang.org/AbstractMCMC.jl/stable/objects.inv", "ADTypes" => "https://sciml.github.io/ADTypes.jl/stable/objects.inv", "AdvancedVI" => "https://turinglang.org/AdvancedVI.jl/v0.2.8/objects.inv", "DistributionsAD" => "https://turinglang.org/DistributionsAD.jl/stable/objects.inv", + "OrderedCollections" => "https://juliacollections.github.io/OrderedCollections.jl/stable/objects.inv", ) # Doctest setup @@ -21,7 +19,7 @@ DocMeta.setdocmeta!(Turing, :DocTestSetup, :(using Turing); recursive=true) makedocs(; sitename="Turing", - modules=[Turing, Distributions, Bijectors], + modules=[Turing], pages=[ "Home" => "index.md", "API" => "api.md", @@ -29,7 +27,6 @@ makedocs(; ["Inference" => "api/Inference.md", "Optimisation" => "api/Optimisation.md"], ], checkdocs=:exports, - # checkdocs_ignored_modules=[Turing, Distributions, DynamicPPL, AbstractPPL, Bijectors], doctest=false, warnonly=true, plugins=[links], diff --git a/docs/src/api.md b/docs/src/api.md index 3066a7fad9..55b09a9d10 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -6,15 +6,13 @@ Turing.jl directly re-exports the entire public API of the following packages: - [Distributions.jl](https://juliastats.org/Distributions.jl) - [MCMCChains.jl](https://turinglang.org/MCMCChains.jl) - - [AbstractMCMC.jl](https://turinglang.org/AbstractMCMC.jl) - - [Bijectors.jl](https://turinglang.org/Bijectors.jl) - - [Libtask.jl](https://github.com/TuringLang/Libtask.jl) Please see the individual packages for their documentation. ## Individual exports and re-exports -**All** of the following symbols are exported unqualified by Turing, even though the documentation suggests that many of them are qualified. +In this API documentation, for the sake of clarity, we have listed the module that actually defines each of the exported symbols. +Note, however, that **all** of the following symbols are exported unqualified by Turing. That means, for example, you can just write ```julia @@ -37,17 +35,22 @@ even though [`Prior()`](@ref) is actually defined in the `Turing.Inference` modu ### Modelling -| Exported symbol | Documentation | Description | -|:--------------- |:----------------------------------- |:-------------------------------------------- | -| `@model` | [`DynamicPPL.@model`](@extref) | Define a probabilistic model | -| `@varname` | [`AbstractPPL.@varname`](@extref) | Generate a `VarName` from a Julia expression | -| `to_submodel` | [`DynamicPPL.to_submodel`](@extref) | Define a submodel | +| Exported symbol | Documentation | Description | +|:-------------------- |:------------------------------------------ |:-------------------------------------------------------------------------------------------- | +| `@model` | [`DynamicPPL.@model`](@extref) | Define a probabilistic model | +| `@varname` | [`AbstractPPL.@varname`](@extref) | Generate a `VarName` from a Julia expression | +| `to_submodel` | [`DynamicPPL.to_submodel`](@extref) | Define a submodel | +| `prefix` | [`DynamicPPL.prefix`](@extref) | Prefix all variable names in a model with a given symbol | +| `LogDensityFunction` | [`DynamicPPL.LogDensityFunction`](@extref) | A struct containing all information about how to evaluate a model. Mostly for advanced users | ### Inference -| Exported symbol | Documentation | Description | -|:--------------- |:------------------------------------------------------------------------------------------------ |:------------------- | -| `sample` | [`StatsBase.sample`](https://turinglang.org/AbstractMCMC.jl/stable/api/#Sampling-a-single-chain) | Sample from a model | +| Exported symbol | Documentation | Description | +|:----------------- |:------------------------------------------------------------------------------------------------ |:---------------------------------- | +| `sample` | [`StatsBase.sample`](https://turinglang.org/AbstractMCMC.jl/stable/api/#Sampling-a-single-chain) | Sample from a model | +| `MCMCThreads` | [`AbstractMCMC.MCMCThreads`](@extref) | Run MCMC using multiple threads | +| `MCMCDistributed` | [`AbstractMCMC.MCMCDistributed`](@extref) | Run MCMC using multiple processes | +| `MCMCSerial` | [`AbstractMCMC.MCMCSerial`](@extref) | Run MCMC using without parallelism | ### Samplers @@ -68,6 +71,7 @@ even though [`Prior()`](@ref) is actually defined in the `Turing.Inference` modu | `SMC` | [`Turing.Inference.SMC`](@ref) | Sequential Monte Carlo | | `PG` | [`Turing.Inference.PG`](@ref) | Particle Gibbs | | `CSMC` | [`Turing.Inference.CSMC`](@ref) | The same as PG | +| `RepeatSampler` | [`Turing.Inference.RepeatSampler`](@ref) | A sampler that runs multiple times on the same variable | | `externalsampler` | [`Turing.Inference.externalsampler`](@ref) | Wrap an external sampler for use in Turing | ### Variational inference @@ -108,52 +112,37 @@ OrderedLogistic LogPoisson ``` -`BernoulliLogit` is part of Distributions.jl since version 0.25.77. -If you are using an older version of Distributions where this isn't defined, Turing will export the same distribution. - -```@docs -Distributions.BernoulliLogit -``` - ### Tools to work with distributions | Exported symbol | Documentation | Description | |:--------------- |:-------------------------------------- |:-------------------------------------------------------------- | +| `I` | [`LinearAlgebra.I`](@extref) | Identity matrix | | `filldist` | [`DistributionsAD.filldist`](@extref) | Create a product distribution from a distribution and integers | | `arraydist` | [`DistributionsAD.arraydist`](@extref) | Create a product distribution from an array of distributions | | `NamedDist` | [`DynamicPPL.NamedDist`](@extref) | A distribution that carries the name of the variable | ### Predictions -```@docs -DynamicPPL.predict -``` +| Exported symbol | Documentation | Description | +|:--------------- |:--------------------------------------------------------------------------------- |:------------------------------------------------------- | +| `predict` | [`StatsAPI.predict`](https://turinglang.org/DynamicPPL.jl/stable/api/#Predicting) | Generate samples from posterior predictive distribution | ### Querying model probabilities and quantities Please see the [generated quantities](https://turinglang.org/docs/tutorials/usage-generated-quantities/) and [probability interface](https://turinglang.org/docs/tutorials/usage-probability-interface/) guides for more information. -| Exported symbol | Documentation | Description | -|:-------------------------- |:--------------------------------------------------------------------------------------------------------------------------------- |:--------------------------------------------------------------- | -| `generated_quantities` | [`DynamicPPL.generated_quantities`](@extref) | Calculate additional quantities defined in a model | -| `pointwise_loglikelihoods` | [`DynamicPPL.pointwise_loglikelihoods`](@extref) | Compute log likelihoods for each sample in a chain | -| `logprior` | [`DynamicPPL.logprior`](@extref) | Compute log prior probability | -| `logjoint` | [`DynamicPPL.logjoint`](@extref) | Compute log joint probability | -| `LogDensityFunction` | [`DynamicPPL.LogDensityFunction`](@extref) | Wrap a Turing model to satisfy LogDensityFunctions.jl interface | -| `condition` | [`AbstractPPL.condition`](@extref) | Condition a model on data | -| `decondition` | [`AbstractPPL.decondition`](@extref) | Remove conditioning on data | -| `conditioned` | [`DynamicPPL.conditioned`](@extref) | Return the conditioned values of a model | -| `fix` | [`DynamicPPL.fix`](@extref) | Fix the value of a variable | -| `unfix` | [`DynamicPPL.unfix`](@extref) | Unfix the value of a variable | -| `OrderedDict` | [`OrderedCollections.OrderedDict`](https://juliacollections.github.io/OrderedCollections.jl/dev/ordered_containers/#OrderedDicts) | An ordered dictionary | - -### Extra re-exports from Bijectors - -Note that Bijectors itself does not export `ordered`. - -```@docs -Bijectors.ordered -``` +| Exported symbol | Documentation | Description | +|:-------------------------- |:---------------------------------------------------------------------------------------------------------------------------- |:-------------------------------------------------- | +| `returned` | [`DynamicPPL.returned`](https://turinglang.org/DynamicPPL.jl/stable/api/#DynamicPPL.returned-Tuple%7BModel,%20NamedTuple%7D) | Calculate additional quantities defined in a model | +| `pointwise_loglikelihoods` | [`DynamicPPL.pointwise_loglikelihoods`](@extref) | Compute log likelihoods for each sample in a chain | +| `logprior` | [`DynamicPPL.logprior`](@extref) | Compute log prior probability | +| `logjoint` | [`DynamicPPL.logjoint`](@extref) | Compute log joint probability | +| `condition` | [`AbstractPPL.condition`](@extref) | Condition a model on data | +| `decondition` | [`AbstractPPL.decondition`](@extref) | Remove conditioning on data | +| `conditioned` | [`DynamicPPL.conditioned`](@extref) | Return the conditioned values of a model | +| `fix` | [`DynamicPPL.fix`](@extref) | Fix the value of a variable | +| `unfix` | [`DynamicPPL.unfix`](@extref) | Unfix the value of a variable | +| `OrderedDict` | [`OrderedCollections.OrderedDict`](@extref) | An ordered dictionary | ### Point estimates diff --git a/src/Turing.jl b/src/Turing.jl index d8fc09fb9d..aa5fbe8500 100644 --- a/src/Turing.jl +++ b/src/Turing.jl @@ -4,22 +4,24 @@ using Reexport, ForwardDiff using DistributionsAD, Bijectors, StatsFuns, SpecialFunctions using Statistics, LinearAlgebra using Libtask -@reexport using Distributions, MCMCChains, Libtask, AbstractMCMC, Bijectors +@reexport using Distributions, MCMCChains using Compat: pkgversion using AdvancedVI: AdvancedVI -using DynamicPPL: DynamicPPL, LogDensityFunction +using DynamicPPL: DynamicPPL import DynamicPPL: NoDist, NamedDist using LogDensityProblems: LogDensityProblems using NamedArrays: NamedArrays using Accessors: Accessors using StatsAPI: StatsAPI using StatsBase: StatsBase +using AbstractMCMC using Accessors: Accessors using Printf: Printf using Random: Random +using LinearAlgebra: I using ADTypes: ADTypes @@ -64,6 +66,7 @@ include("deprecated.jl") # to be removed in the next minor version release using DynamicPPL: pointwise_loglikelihoods, generated_quantities, + returned, logprior, logjoint, condition, @@ -71,68 +74,83 @@ using DynamicPPL: fix, unfix, conditioned, - to_submodel + to_submodel, + LogDensityFunction using StatsBase: predict -using Bijectors: ordered using OrderedCollections: OrderedDict # Turing essentials - modelling macros and inference algorithms -export @model, # modelling +export + # DEPRECATED + @submodel, + generated_quantities, + # Modelling - AbstractPPL and DynamicPPL + @model, @varname, - @submodel, # Deprecated to_submodel, - DynamicPPL, - Prior, # Sampling from the prior - MH, # classic sampling + prefix, + LogDensityFunction, + # Sampling - AbstractMCMC + sample, + MCMCThreads, + MCMCDistributed, + MCMCSerial, + # Samplers - Turing.Inference + Prior, + MH, Emcee, ESS, Gibbs, - HMC, # Hamiltonian-like sampling + HMC, SGLD, SGHMC, + PolynomialStepsize, HMCDA, NUTS, - PolynomialStepsize, - IS, # particle-based sampling + IS, SMC, - CSMC, PG, + CSMC, RepeatSampler, - vi, # variational inference - ADVI, - sample, # inference - @logprob_str, # TODO: Remove, see https://github.com/TuringLang/DynamicPPL.jl/issues/356 - @prob_str, # TODO: Remove, see https://github.com/TuringLang/DynamicPPL.jl/issues/356 externalsampler, - AutoForwardDiff, # ADTypes + # Variational inference - AdvancedVI + vi, + ADVI, + # ADTypes + AutoForwardDiff, AutoReverseDiff, AutoMooncake, - setprogress!, # debugging + # Debugging - Turing + setprogress!, + # Distributions Flat, FlatPos, BinomialLogit, - BernoulliLogit, # Part of Distributions >= 0.25.77 OrderedLogistic, LogPoisson, - filldist, - arraydist, - NamedDist, # Exports from DynamicPPL + # Tools to work with Distributions + I, # LinearAlgebra + filldist, # DistributionsAD + arraydist, # DistributionsAD + NamedDist, # DynamicPPL + # Predictions - DynamicPPL predict, + # Querying model probabilities - DynamicPPL + returned, pointwise_loglikelihoods, - generated_quantities, logprior, + loglikelihood, logjoint, - LogDensityFunction, condition, decondition, + conditioned, fix, unfix, - conditioned, - OrderedDict, - ordered, # Exports from Bijectors + OrderedDict, # OrderedCollections + # Point estimates - Turing.Optimisation + # The MAP and MLE exports are only needed for the Optim.jl interface. maximum_a_posteriori, maximum_likelihood, - # The MAP and MLE exports are only needed for the Optim.jl interface. MAP, MLE diff --git a/src/essential/Essential.jl b/src/essential/Essential.jl index cfa064c651..b045e4a857 100644 --- a/src/essential/Essential.jl +++ b/src/essential/Essential.jl @@ -17,12 +17,8 @@ using AdvancedPS: AdvancedPS include("container.jl") -export @model, - @varname, - AutoForwardDiff, - AutoReverseDiff, - AutoMooncake, - @logprob_str, - @prob_str +export @model +export @varname +export AutoForwardDiff, AutoReverseDiff, AutoMooncake end # module diff --git a/src/mcmc/Inference.jl b/src/mcmc/Inference.jl index 60aa087cdb..de87a5b391 100644 --- a/src/mcmc/Inference.jl +++ b/src/mcmc/Inference.jl @@ -606,13 +606,15 @@ julia> [first(t.θ.x) for t in transitions] # extract samples for `x` [-1.704630494695469] ``` """ -function transitions_from_chain(model::Turing.Model, chain::MCMCChains.Chains; kwargs...) +function transitions_from_chain( + model::DynamicPPL.Model, chain::MCMCChains.Chains; kwargs... +) return transitions_from_chain(Random.default_rng(), model, chain; kwargs...) end function transitions_from_chain( rng::Random.AbstractRNG, - model::Turing.Model, + model::DynamicPPL.Model, chain::MCMCChains.Chains; sampler=DynamicPPL.SampleFromPrior(), ) diff --git a/src/mcmc/emcee.jl b/src/mcmc/emcee.jl index ab68f795e1..dfd1fc0d30 100644 --- a/src/mcmc/emcee.jl +++ b/src/mcmc/emcee.jl @@ -81,7 +81,7 @@ function AbstractMCMC.step( # Generate a log joint function. vi = state.vi densitymodel = AMH.DensityModel( - Base.Fix1(LogDensityProblems.logdensity, Turing.LogDensityFunction(model, vi)) + Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi)) ) # Compute the next states. diff --git a/src/mcmc/ess.jl b/src/mcmc/ess.jl index c6e62d8e1c..5448173486 100644 --- a/src/mcmc/ess.jl +++ b/src/mcmc/ess.jl @@ -49,7 +49,7 @@ function AbstractMCMC.step( rng, EllipticalSliceSampling.ESSModel( ESSPrior(model, spl, vi), - Turing.LogDensityFunction( + DynamicPPL.LogDensityFunction( model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext()) ), ), @@ -110,7 +110,7 @@ Distributions.mean(p::ESSPrior) = p.μ # Evaluate log-likelihood of proposals const ESSLogLikelihood{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo} = - Turing.LogDensityFunction{M,V,<:DynamicPPL.SamplingContext{<:S},AD} where {AD} + DynamicPPL.LogDensityFunction{M,V,<:DynamicPPL.SamplingContext{<:S},AD} where {AD} (ℓ::ESSLogLikelihood)(f::AbstractVector) = LogDensityProblems.logdensity(ℓ, f) diff --git a/src/mcmc/mh.jl b/src/mcmc/mh.jl index 0af62e2597..6a03f5359f 100644 --- a/src/mcmc/mh.jl +++ b/src/mcmc/mh.jl @@ -189,7 +189,7 @@ A log density function for the MH sampler. This variant uses the `set_namedtuple!` function to update the `VarInfo`. """ const MHLogDensityFunction{M<:Model,S<:Sampler{<:MH},V<:AbstractVarInfo} = - Turing.LogDensityFunction{M,V,<:DynamicPPL.SamplingContext{<:S},AD} where {AD} + DynamicPPL.LogDensityFunction{M,V,<:DynamicPPL.SamplingContext{<:S},AD} where {AD} function LogDensityProblems.logdensity(f::MHLogDensityFunction, x::NamedTuple) vi = deepcopy(f.varinfo) @@ -308,7 +308,7 @@ function propose!!( densitymodel = AMH.DensityModel( Base.Fix1( LogDensityProblems.logdensity, - Turing.LogDensityFunction( + DynamicPPL.LogDensityFunction( model, vi, DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)), @@ -343,7 +343,7 @@ function propose!!( densitymodel = AMH.DensityModel( Base.Fix1( LogDensityProblems.logdensity, - Turing.LogDensityFunction( + DynamicPPL.LogDensityFunction( model, vi, DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)), diff --git a/src/optimisation/Optimisation.jl b/src/optimisation/Optimisation.jl index 9da929504b..0d380033c2 100644 --- a/src/optimisation/Optimisation.jl +++ b/src/optimisation/Optimisation.jl @@ -139,7 +139,7 @@ struct OptimLogDensity{ C<:OptimizationContext, AD<:ADTypes.AbstractADType, } - ldf::Turing.LogDensityFunction{M,V,C,AD} + ldf::DynamicPPL.LogDensityFunction{M,V,C,AD} end function OptimLogDensity( @@ -148,7 +148,7 @@ function OptimLogDensity( ctx::OptimizationContext; adtype::ADTypes.AbstractADType=AutoForwardDiff(), ) - return OptimLogDensity(Turing.LogDensityFunction(model, vi, ctx; adtype=adtype)) + return OptimLogDensity(DynamicPPL.LogDensityFunction(model, vi, ctx; adtype=adtype)) end # No varinfo @@ -158,7 +158,7 @@ function OptimLogDensity( adtype::ADTypes.AbstractADType=AutoForwardDiff(), ) return OptimLogDensity( - Turing.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx; adtype=adtype) + DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx; adtype=adtype) ) end diff --git a/src/stdlib/distributions.jl b/src/stdlib/distributions.jl index 568ab3ae3b..ebcefe72ac 100644 --- a/src/stdlib/distributions.jl +++ b/src/stdlib/distributions.jl @@ -16,9 +16,6 @@ Base.maximum(::Flat) = Inf Base.rand(rng::Random.AbstractRNG, d::Flat) = rand(rng) Distributions.logpdf(::Flat, x::Real) = zero(x) -# TODO: only implement `logpdf(d, ::Real)` if support for Distributions < 0.24 is dropped -Distributions.pdf(d::Flat, x::Real) = exp(logpdf(d, x)) - # For vec support Distributions.logpdf(::Flat, x::AbstractVector{<:Real}) = zero(x) Distributions.loglikelihood(::Flat, x::AbstractVector{<:Real}) = zero(eltype(x)) @@ -41,17 +38,13 @@ struct FlatPos{T<:Real} <: ContinuousUnivariateDistribution end Base.minimum(d::FlatPos) = d.l -Base.maximum(d::FlatPos) = Inf +Base.maximum(::FlatPos) = Inf Base.rand(rng::Random.AbstractRNG, d::FlatPos) = rand(rng) + d.l function Distributions.logpdf(d::FlatPos, x::Real) z = float(zero(x)) return x <= d.l ? oftype(z, -Inf) : z end - -# TODO: only implement `logpdf(d, ::Real)` if support for Distributions < 0.24 is dropped -Distributions.pdf(d::FlatPos, x::Real) = exp(logpdf(d, x)) - # For vec support function Distributions.loglikelihood(d::FlatPos, x::AbstractVector{<:Real}) lower = d.l @@ -91,12 +84,7 @@ BinomialLogit(n::Int, logitp::Real) = BinomialLogit{typeof(logitp)}(n, logitp) Base.minimum(::BinomialLogit) = 0 Base.maximum(d::BinomialLogit) = d.n -# TODO: only implement `logpdf(d, k::Real)` if support for Distributions < 0.24 is dropped -Distributions.pdf(d::BinomialLogit, k::Real) = exp(logpdf(d, k)) -Distributions.logpdf(d::BinomialLogit, k::Real) = _logpdf(d, k) -Distributions.logpdf(d::BinomialLogit, k::Integer) = _logpdf(d, k) - -function _logpdf(d::BinomialLogit, k::Real) +function Distributions.logpdf(d::BinomialLogit, k::Real) n, logitp, logconstant = d.n, d.logitp, d.logconstant _insupport = insupport(d, k) _k = _insupport ? round(Int, k) : 0 @@ -109,16 +97,6 @@ function Base.rand(rng::Random.AbstractRNG, d::BinomialLogit) end Distributions.sampler(d::BinomialLogit) = sampler(Binomial(d.n, logistic(d.logitp))) -# Part of Distributions >= 0.25.77 -if !isdefined(Distributions, :BernoulliLogit) - """ - BernoulliLogit(logitp::Real) - - Create a univariate logit-parameterised Bernoulli distribution. - """ - BernoulliLogit(logitp::Real) = BinomialLogit(1, logitp) -end - """ OrderedLogistic(η, c::AbstractVector) @@ -151,12 +129,7 @@ end Base.minimum(d::OrderedLogistic) = 0 Base.maximum(d::OrderedLogistic) = length(d.cutpoints) + 1 -# TODO: only implement `logpdf(d, k::Real)` if support for Distributions < 0.24 is dropped -Distributions.pdf(d::OrderedLogistic, k::Real) = exp(logpdf(d, k)) -Distributions.logpdf(d::OrderedLogistic, k::Real) = _logpdf(d, k) -Distributions.logpdf(d::OrderedLogistic, k::Integer) = _logpdf(d, k) - -function _logpdf(d::OrderedLogistic, k::Real) +function Distributions.logpdf(d::OrderedLogistic, k::Real) η, cutpoints = d.η, d.cutpoints K = length(cutpoints) + 1 @@ -232,7 +205,7 @@ LogPoisson(logλ::Real) = LogPoisson{typeof(logλ)}(logλ) Base.minimum(d::LogPoisson) = 0 Base.maximum(d::LogPoisson) = Inf -function _logpdf(d::LogPoisson, k::Real) +function Distributions.logpdf(d::LogPoisson, k::Real) _insupport = insupport(d, k) _k = _insupport ? round(Int, k) : 0 logp = _k * d.logλ - d.λ - SpecialFunctions.loggamma(_k + 1) @@ -240,10 +213,5 @@ function _logpdf(d::LogPoisson, k::Real) return _insupport ? logp : oftype(logp, -Inf) end -# TODO: only implement `logpdf(d, ::Real)` if support for Distributions < 0.24 is dropped -Distributions.pdf(d::LogPoisson, k::Real) = exp(logpdf(d, k)) -Distributions.logpdf(d::LogPoisson, k::Integer) = _logpdf(d, k) -Distributions.logpdf(d::LogPoisson, k::Real) = _logpdf(d, k) - Base.rand(rng::Random.AbstractRNG, d::LogPoisson) = rand(rng, Poisson(d.λ)) Distributions.sampler(d::LogPoisson) = sampler(Poisson(d.λ)) diff --git a/test/Project.toml b/test/Project.toml index 489923767a..36b7ebdec9 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -6,6 +6,7 @@ AdvancedPS = "576499cb-2369-40b2-a588-c64705576edc" AdvancedVI = "b5ca4192-6429-45e5-a2d9-87aec30a685c" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" BangBang = "198e06fe-97b7-11e9-32a5-e1d131e6ad66" +Bijectors = "76274a88-744f-5084-9051-94815aaf08c4" Clustering = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5" Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" @@ -45,6 +46,7 @@ AdvancedPS = "=0.6.0" AdvancedVI = "0.2" Aqua = "0.8" BangBang = "0.4" +Bijectors = "0.14, 0.15" Clustering = "0.14, 0.15" Combinatorics = "1" Distributions = "0.25" diff --git a/test/essential/container.jl b/test/essential/container.jl index d1e1b21bd6..1cb790d5ae 100644 --- a/test/essential/container.jl +++ b/test/essential/container.jl @@ -2,7 +2,7 @@ module ContainerTests using AdvancedPS: AdvancedPS using Distributions: Bernoulli, Beta, Gamma, Normal -using DynamicPPL: @model, Sampler +using DynamicPPL: DynamicPPL, @model, Sampler using Test: @test, @testset using Turing diff --git a/test/ext/OptimInterface.jl b/test/ext/OptimInterface.jl index 163cf36c53..a93206e7e0 100644 --- a/test/ext/OptimInterface.jl +++ b/test/ext/OptimInterface.jl @@ -2,6 +2,7 @@ module OptimInterfaceTests using ..Models: gdemo_default using Distributions.FillArrays: Zeros +using DynamicPPL: DynamicPPL using LinearAlgebra: I using Optim: Optim using Random: Random diff --git a/test/mcmc/abstractmcmc.jl b/test/mcmc/abstractmcmc.jl index 82baa41197..f909df8ef1 100644 --- a/test/mcmc/abstractmcmc.jl +++ b/test/mcmc/abstractmcmc.jl @@ -1,6 +1,7 @@ module AbstractMCMCTests import ..ADUtils +using AbstractMCMC: AbstractMCMC using AdvancedMH: AdvancedMH using Distributions: sample using Distributions.FillArrays: Zeros diff --git a/test/mcmc/gibbs.jl b/test/mcmc/gibbs.jl index b5993078ed..69b2b10326 100644 --- a/test/mcmc/gibbs.jl +++ b/test/mcmc/gibbs.jl @@ -9,6 +9,7 @@ using ..NumericalTests: two_sample_test import ..ADUtils import Combinatorics +using AbstractMCMC: AbstractMCMC using Distributions: InverseGamma, Normal using Distributions: sample using DynamicPPL: DynamicPPL @@ -179,7 +180,7 @@ end end # The methods that capture testing information for us. - function Turing.AbstractMCMC.step( + function AbstractMCMC.step( rng::Random.AbstractRNG, model::DynamicPPL.Model, sampler::DynamicPPL.Sampler{<:AlgWrapper}, @@ -187,9 +188,7 @@ end kwargs..., ) capture_targets_and_algs(sampler.alg.inner, model.context) - return Turing.AbstractMCMC.step( - rng, model, unwrap_sampler(sampler), args...; kwargs... - ) + return AbstractMCMC.step(rng, model, unwrap_sampler(sampler), args...; kwargs...) end function Turing.DynamicPPL.initialstep( diff --git a/test/mcmc/hmc.jl b/test/mcmc/hmc.jl index 0a5910e450..67a46ec00a 100644 --- a/test/mcmc/hmc.jl +++ b/test/mcmc/hmc.jl @@ -4,9 +4,9 @@ using ..Models: gdemo_default using ..ADUtils: ADTypeCheckContext using ..NumericalTests: check_gdemo, check_numerical import ..ADUtils +using Bijectors: Bijectors using Distributions: Bernoulli, Beta, Categorical, Dirichlet, Normal, Wishart, sample -import DynamicPPL -using DynamicPPL: Sampler +using DynamicPPL: DynamicPPL, Sampler import ForwardDiff using HypothesisTests: ApproximateTwoSampleKSTest, pvalue import ReverseDiff @@ -271,7 +271,9 @@ using Turing # HACK: Necessary to avoid NUTS failing during adaptation. try - x ~ transformed(Normal(0, 1), inverse(Bijectors.Logit(lb, ub))) + x ~ Bijectors.transformed( + Normal(0, 1), Bijectors.inverse(Bijectors.Logit(lb, ub)) + ) catch e if e isa DomainError Turing.@addlogprob! -Inf diff --git a/test/skipped/unit_test_helper.jl b/test/skipped/unit_test_helper.jl index aa513e428e..7dec7757bb 100644 --- a/test/skipped/unit_test_helper.jl +++ b/test/skipped/unit_test_helper.jl @@ -10,7 +10,7 @@ function test_grad(turing_model, grad_f; trans=Dict()) @testset "Gradient using random inputs" begin ℓ = LogDensityProblemsAD.ADgradient( Turing.AutoTracker(), - Turing.LogDensityFunction( + DynamicPPL.LogDensityFunction( model_f, vi, DynamicPPL.SamplingContext(SampleFromPrior(), DynamicPPL.DefaultContext()), diff --git a/test/variational/advi.jl b/test/variational/advi.jl index 639df018cc..c2abacb675 100644 --- a/test/variational/advi.jl +++ b/test/variational/advi.jl @@ -4,6 +4,7 @@ using ..Models: gdemo_default using ..NumericalTests: check_gdemo import AdvancedVI using AdvancedVI: TruncatedADAGrad, DecayedADAGrad +using Bijectors: Bijectors using Distributions: Dirichlet, Normal using LinearAlgebra: I using MCMCChains: Chains @@ -71,11 +72,11 @@ using Turing.Essential: TuringDiagMvNormal end m = dirichlet() - b = bijector(m) + b = Bijectors.bijector(m) x0 = m() z0 = b(x0) @test size(z0) == (1,) - x0_inv = inverse(b)(z0) + x0_inv = Bijectors.inverse(b)(z0) @test size(x0_inv) == size(x0) @test all(x0 .≈ x0_inv)