From 1980f93ccc18d2a3d935104510169d8727eb6ee7 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Fri, 7 Nov 2025 02:44:48 -0500 Subject: [PATCH 01/16] move gaussian expectation of grad and hess to its own file --- src/AdvancedVI.jl | 3 +- src/algorithms/gauss_expected_grad_hess.jl | 55 ++++++++++++++++++++++ src/algorithms/klminwassfwdbwd.jl | 54 --------------------- 3 files changed, 57 insertions(+), 55 deletions(-) create mode 100644 src/algorithms/gauss_expected_grad_hess.jl diff --git a/src/AdvancedVI.jl b/src/AdvancedVI.jl index 2b82b7b7..f62dbb20 100644 --- a/src/AdvancedVI.jl +++ b/src/AdvancedVI.jl @@ -352,8 +352,9 @@ include("algorithms/common.jl") export KLMinRepGradDescent, KLMinRepGradProxDescent, KLMinScoreGradDescent, ADVI, BBVI -# Other Algorithms +# Natural and Wasserstein gradient descent algorithms +include("algorithms/gauss_expected_grad_hess.jl") include("algorithms/klminwassfwdbwd.jl") export KLMinWassFwdBwd diff --git a/src/algorithms/gauss_expected_grad_hess.jl b/src/algorithms/gauss_expected_grad_hess.jl new file mode 100644 index 00000000..bcd124c8 --- /dev/null +++ b/src/algorithms/gauss_expected_grad_hess.jl @@ -0,0 +1,55 @@ + +""" + gaussian_expectation_gradient_and_hessian!(rng, q, n_samples, grad_buf, hess_buf, prob) + +Estimate the expectations of the gradient and Hessians of the log-density of `prob` taken over the Gaussian `q`. For estimating the expectation of the Hessian, if `prob` has second-order differentiation capability, this function uses the sample average of the Hessian. Otherwise, it uses Stein's identity. + +# Arguments +- `rng::Random.AbstractRNG`: Random number generator. +- `q::MvLocationScale{<:LowerTriangular,<:Normal,L}`: Gaussian to take expectation over. +- `n_samples::Int`: Number of samples used for estimation. +- `grad_buf::AbstractVector`: Buffer for the gradient estimate. +- `hess_buf::AbstractMatrix`: Buffer for the Hessian estimate. +- `prob`: `LogDensityProblem` associated with the log-density gradient and Hessian subject to expectation. +""" +function gaussian_expectation_gradient_and_hessian!( + rng::Random.AbstractRNG, + q::MvLocationScale{<:LowerTriangular,<:Normal,L}, + n_samples::Int, + grad_buf::AbstractVector{T}, + hess_buf::AbstractMatrix{T}, + prob, +) where {T<:Real,L} + logπ_avg = zero(T) + fill!(grad_buf, zero(T)) + fill!(hess_buf, zero(T)) + + if LogDensityProblems.capabilities(typeof(prob)) ≤ + LogDensityProblems.LogDensityOrder{1}() + # Use Stein's identity + d = LogDensityProblems.dimension(prob) + u = randn(rng, T, d, n_samples) + z = q.scale*u .+ q.location + for b in 1:n_samples + zb, ub = view(z, :, b), view(u, :, b) + logπ, ∇logπ = LogDensityProblems.logdensity_and_gradient(prob, zb) + logπ_avg += logπ/n_samples + grad_buf += ∇logπ/n_samples + hess_buf += ub*(∇logπ/n_samples)' + end + return logπ_avg, grad_buf, hess_buf + else + # Use sample average of the Hessian. + z = rand(rng, q, n_samples) + for b in 1:n_samples + zb = view(z, :, b) + logπ, ∇logπ, ∇2logπ = LogDensityProblems.logdensity_gradient_and_hessian( + prob, zb + ) + logπ_avg += logπ/n_samples + grad_buf += ∇logπ/n_samples + hess_buf += ∇2logπ/n_samples + end + return logπ_avg, grad_buf, hess_buf + end +end diff --git a/src/algorithms/klminwassfwdbwd.jl b/src/algorithms/klminwassfwdbwd.jl index f834b539..7f6c27f2 100644 --- a/src/algorithms/klminwassfwdbwd.jl +++ b/src/algorithms/klminwassfwdbwd.jl @@ -41,60 +41,6 @@ The keyword arguments are as follows: subsampling::Sub = nothing end -""" - gaussian_expectation_gradient_and_hessian!(rng, q, n_samples, grad_buf, hess_buf, prob) - -Estimate the expectations of the gradient and Hessians of the log-density of `prob` taken over the Gaussian `q`. For estimating the expectation of the Hessian, if `prob` has second-order differentiation capability, this function uses the sample average of the Hessian. Otherwise, it uses Stein's identity. - -# Arguments -- `rng::Random.AbstractRNG`: Random number generator. -- `q::MvLocationScale{<:LowerTriangular,<:Normal,L}`: Gaussian to take expectation over. -- `n_samples::Int`: Number of samples used for estimation. -- `grad_buf::AbstractVector`: Buffer for the gradient estimate. -- `hess_buf::AbstractMatrix`: Buffer for the Hessian estimate. -- `prob`: `LogDensityProblem` associated with the log-density gradient and Hessian subject to expectation. -""" -function gaussian_expectation_gradient_and_hessian!( - rng::Random.AbstractRNG, - q::MvLocationScale{<:LowerTriangular,<:Normal,L}, - n_samples::Int, - grad_buf::AbstractVector{T}, - hess_buf::AbstractMatrix{T}, - prob, -) where {T<:Real,L} - logπ_avg = zero(T) - fill!(grad_buf, zero(T)) - fill!(hess_buf, zero(T)) - - if LogDensityProblems.capabilities(typeof(prob)) ≤ - LogDensityProblems.LogDensityOrder{1}() - # Use Stein's identity - d = LogDensityProblems.dimension(prob) - u = randn(rng, T, d, n_samples) - z = q.scale*u .+ q.location - for b in 1:n_samples - zb, ub = view(z, :, b), view(u, :, b) - logπ, ∇logπ = LogDensityProblems.logdensity_and_gradient(prob, zb) - logπ_avg += logπ/n_samples - grad_buf += ∇logπ/n_samples - hess_buf += ub*(∇logπ/n_samples)' - end - return logπ_avg, grad_buf, hess_buf - else - # Use sample average of the Hessian. - z = rand(rng, q, n_samples) - for b in 1:n_samples - zb = view(z, :, b) - logπ, ∇logπ, ∇2logπ = LogDensityProblems.logdensity_gradient_and_hessian( - prob, zb - ) - logπ_avg += logπ/n_samples - grad_buf += ∇logπ/n_samples - hess_buf += ∇2logπ/n_samples - end - return logπ_avg, grad_buf, hess_buf - end -end struct KLMinWassFwdBwdState{Q,P,S,Sigma,GradBuf,HessBuf} q::Q From c84b453b0379c92109070ab0c4565c43d0510117 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Fri, 7 Nov 2025 02:45:44 -0500 Subject: [PATCH 02/16] add square-root variational newton algorithm --- src/AdvancedVI.jl | 3 +- src/algorithms/klminsqrtnaturalgraddescent.jl | 157 +++++++++++++++++ .../algorithms/klminsqrtnaturalgraddescent.jl | 158 ++++++++++++++++++ test/runtests.jl | 1 + 4 files changed, 318 insertions(+), 1 deletion(-) create mode 100644 src/algorithms/klminsqrtnaturalgraddescent.jl create mode 100644 test/algorithms/klminsqrtnaturalgraddescent.jl diff --git a/src/AdvancedVI.jl b/src/AdvancedVI.jl index f62dbb20..6ab91eb1 100644 --- a/src/AdvancedVI.jl +++ b/src/AdvancedVI.jl @@ -356,7 +356,8 @@ export KLMinRepGradDescent, KLMinRepGradProxDescent, KLMinScoreGradDescent, ADVI include("algorithms/gauss_expected_grad_hess.jl") include("algorithms/klminwassfwdbwd.jl") +include("algorithms/klminsqrtnaturalgraddescent.jl") -export KLMinWassFwdBwd +export KLMinWassFwdBwd, KLMinSqrtNaturalGradDescent end diff --git a/src/algorithms/klminsqrtnaturalgraddescent.jl b/src/algorithms/klminsqrtnaturalgraddescent.jl new file mode 100644 index 00000000..5693b296 --- /dev/null +++ b/src/algorithms/klminsqrtnaturalgraddescent.jl @@ -0,0 +1,157 @@ + +""" + KLMinSqrtNaturalGradDescent(n_samples, stepsize, subsampling) + KLMinSqrtNaturalGradDescent(; n_samples, stepsize, subsampling) + +KL divergence minimization algorithm obtained by discretizing the natural gradient flow under the square-root parameterization[^KMKL2025][^LDENKTM2024][^LDLNKS2023]. + +Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. + +# (Keyword) Arguments +- `n_samples::Int`: Number of samples used to estimate the Wasserstein gradient. (default: `1`) +- `stepsize::Float64`: Step size of stochastic proximal gradient descent. +- `subsampling::Union{Nothing,<:AbstractSubsampling}`: Optional subsampling strategy. + +!!! note + The `subsampling` strategy is only applied to the target `LogDensityProblem` but not to the variational approximation `q`. That is, `KLMinSqrtVarNewton` does not support amortization or structured variational families. + +# Output +- `q`: The last iterate of the algorithm. + +# Callback Signature +The `callback` function supplied to `optimize` needs to have the following signature: + + callback(; rng, iteration, q, info) + +The keyword arguments are as follows: +- `rng`: Random number generator internally used by the algorithm. +- `iteration`: The index of the current iteration. +- `q`: Current variational approximation. +- `info`: `NamedTuple` containing the information generated during the current iteration. + +# Requirements +- The variational family is [`FullRankGaussian`](@ref FullRankGaussian). +- The target distribution has unconstrained support (\$\$\\mathbb{R}^d\$\$). +- The target `LogDensityProblems.logdensity(prob, x)` has at least first-order differentiation capability. +""" +@kwdef struct KLMinSqrtNaturalGradDescent{Sub<:Union{Nothing,<:AbstractSubsampling}} <: + AbstractVariationalAlgorithm + n_samples::Int = 1 + stepsize::Float64 + subsampling::Sub = nothing +end + +struct KLMinSqrtNaturalGradDescentState{Q,P,S,GradBuf,HessBuf} + q::Q + prob::P + iteration::Int + sub_st::S + grad_buf::GradBuf + hess_buf::HessBuf +end + +function init( + rng::Random.AbstractRNG, + alg::KLMinSqrtNaturalGradDescent, + q_init::MvLocationScale{<:LowerTriangular,<:Normal,L}, + prob, +) where {L} + sub = alg.subsampling + n_dims = LogDensityProblems.dimension(prob) + capability = LogDensityProblems.capabilities(typeof(prob)) + if capability < LogDensityProblems.LogDensityOrder{1}() + throw( + ArgumentError( + "`KLMinSqrtNaturalGradDescent` requires at least first-order differentiation capability. The capability of the supplied `LogDensityProblem` is $(capability).", + ), + ) + end + sub_st = isnothing(sub) ? nothing : init(rng, sub) + grad_buf = Vector{eltype(q_init.location)}(undef, n_dims) + hess_buf = Matrix{eltype(q_init.location)}(undef, n_dims, n_dims) + return KLMinSqrtNaturalGradDescentState(q_init, prob, 0, sub_st, grad_buf, hess_buf) +end + +output(::KLMinSqrtNaturalGradDescent, state) = state.q + +function step( + rng::Random.AbstractRNG, alg::KLMinSqrtNaturalGradDescent, state, callback, objargs...; kwargs... +) + (; n_samples, stepsize, subsampling) = alg + (; q, prob, iteration, sub_st, grad_buf, hess_buf) = state + + m = q.location + C = q.scale + η = convert(eltype(m), stepsize) + iteration += 1 + + # Maybe apply subsampling + prob_sub, sub_st′, sub_inf = if isnothing(subsampling) + prob, sub_st, NamedTuple() + else + batch, sub_st′, sub_inf = step(rng, subsampling, sub_st) + prob_sub = subsample(prob, batch) + prob_sub, sub_st′, sub_inf + end + + # Estimate the Wasserstein gradient + logπ_avg, grad_buf, hess_buf = gaussian_expectation_gradient_and_hessian!( + rng, q, n_samples, grad_buf, hess_buf, prob_sub + ) + + CtHCmI = C'*-hess_buf*C - I + CtHCmI_tril = LowerTriangular(tril(CtHCmI) - Diagonal(diag(CtHCmI))/2) + + m′ = m - η * C * (C' * -grad_buf) + C′ = C - η * C * CtHCmI_tril + + q′ = MvLocationScale(m′, C′, q.dist) + + state = KLMinSqrtNaturalGradDescentState(q′, prob, iteration, sub_st′, grad_buf, hess_buf) + elbo = logπ_avg + entropy(q′) + info = merge((elbo=elbo,), sub_inf) + + if !isnothing(callback) + info′ = callback(; rng, iteration, q, info) + info = !isnothing(info′) ? merge(info′, info) : info + end + state, false, info +end + +""" + estimate_objective([rng,] alg, q, prob; n_samples) + +Estimate the ELBO of the variational approximation `q` against the target log-density `prob`. + +# Arguments +- `rng::Random.AbstractRNG`: Random number generator. +- `alg::KLMinSqrtNaturalGradDescent`: Variational inference algorithm. +- `q::MvLocationScale{<:Any,<:Normal,<:Any}`: Gaussian variational approximation. +- `prob`: The target log-joint likelihood implementing the `LogDensityProblem` interface. + +# Keyword Arguments +- `n_samples::Int`: Number of Monte Carlo samples for estimating the objective. (default: Same as the the number of samples used for estimating the gradient during optimization.) + +# Returns +- `obj_est`: Estimate of the objective value. +""" +function estimate_objective( + rng::Random.AbstractRNG, + alg::KLMinSqrtNaturalGradDescent, + q::MvLocationScale{S,<:Normal,L}, + prob; + n_samples::Int=alg.n_samples, +) where {S,L} + obj = RepGradELBO(n_samples; entropy=MonteCarloEntropy()) + if isnothing(alg.subsampling) + return estimate_objective(rng, obj, q, prob) + else + sub = alg.subsampling + sub_st = init(rng, sub) + return mapreduce(+, 1:length(sub)) do _ + batch, sub_st, _ = step(rng, sub, sub_st) + prob_sub = subsample(prob, batch) + estimate_objective(rng, obj, q, prob_sub) / length(sub) + end + end +end diff --git a/test/algorithms/klminsqrtnaturalgraddescent.jl b/test/algorithms/klminsqrtnaturalgraddescent.jl new file mode 100644 index 00000000..fc70dc81 --- /dev/null +++ b/test/algorithms/klminsqrtnaturalgraddescent.jl @@ -0,0 +1,158 @@ + +@testset "KLMinSqrtNaturalGradDescent" begin + begin + modelstats = normal_meanfield(Random.default_rng(), Float64; capability=2) + (; model, n_dims, μ_true, L_true) = modelstats + + alg = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3) + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + @testset "callback" begin + T = 10 + callback(; iteration, kwargs...) = (iteration_check=iteration,) + _, info, _ = optimize(alg, T, model, q0; callback, show_progress=PROGRESS) + @test [i.iteration_check for i in info] == 1:T + end + + @testset "estimate_objective" begin + q_true = FullRankGaussian(μ_true, LowerTriangular(Matrix(L_true))) + + obj_est = estimate_objective(alg, q_true, model) + @test isfinite(obj_est) + + obj_est = estimate_objective(alg, q_true, model; n_samples=10^5) + @test obj_est ≈ 0 atol=1e-2 + end + + @testset "determinism" begin + seed = (0x38bef07cf9cc549d) + rng = StableRNG(seed) + T = 10 + + q_avg, _, _ = optimize(rng, alg, T, model, q0; show_progress=PROGRESS) + μ = q_avg.location + L = q_avg.scale + + rng_repl = StableRNG(seed) + q_avg, _, _ = optimize(rng_repl, alg, T, model, q0; show_progress=PROGRESS) + μ_repl = q_avg.location + L_repl = q_avg.scale + @test μ == μ_repl + @test L == L_repl + end + end + + @testset "error low capability" begin + modelstats = normal_meanfield(Random.default_rng(), Float64; capability=0) + (; model, n_dims) = modelstats + + alg = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1.0) + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + @test_throws "first-order" optimize(alg, 1, model, q0) + end + + @testset "type stability type=$(realtype), capability=$(capability)" for realtype in [ + Float64, Float32 + ], + capability in [1, 2] + + modelstats = normal_meanfield(Random.default_rng(), realtype; capability) + (; model, μ_true, L_true, n_dims, strong_convexity, is_meanfield) = modelstats + + alg = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3) + T = 10 + + L0 = LowerTriangular(Matrix{realtype}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(realtype, n_dims), L0) + + q, _, _ = optimize(alg, T, model, q0; show_progress=PROGRESS) + + @test eltype(q.location) == eltype(μ_true) + @test eltype(q.scale) == eltype(L_true) + end + + @testset "convergence capability=$(capability)" for capability in [1, 2] + modelstats = normal_meanfield(Random.default_rng(), Float64; capability) + (; model, μ_true, L_true, n_dims, strong_convexity, is_meanfield) = modelstats + + T = 1000 + alg = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3) + + q_avg, _, _ = optimize(alg, T, model, q0; show_progress=PROGRESS) + + Δλ0 = sum(abs2, q0.location - μ_true) + sum(abs2, q0.scale - L_true) + Δλ = sum(abs2, q_avg.location - μ_true) + sum(abs2, q_avg.scale - L_true) + + @test Δλ ≤ 0.1*Δλ0 + end + + @testset "subsampling" begin + n_data = 8 + + @testset "estimate_objective batchsize=$(batchsize)" for batchsize in [1, 3, 4] + modelstats = subsamplednormal(Random.default_rng(), n_data) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3) + alg_sub = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3, subsampling) + + obj_full = estimate_objective(alg, q0, model; n_samples=10^5) + obj_sub = estimate_objective(alg_sub, q0, model; n_samples=10^5) + @test obj_full ≈ obj_sub rtol=0.1 + end + + @testset "determinism" begin + seed = (0x38bef07cf9cc549d) + rng = StableRNG(seed) + + modelstats = subsamplednormal(Random.default_rng(), n_data) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + T = 10 + batchsize = 3 + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg_sub = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3, subsampling) + + q, _, _ = optimize(rng, alg_sub, T, model, q0; show_progress=PROGRESS) + μ = q.location + L = q.scale + + rng_repl = StableRNG(seed) + q, _, _ = optimize(rng_repl, alg_sub, T, model, q0; show_progress=PROGRESS) + μ_repl = q.location + L_repl = q.scale + @test μ == μ_repl + @test L == L_repl + end + + @testset "convergence capability=$(capability)" for capability in [1, 2] + modelstats = subsamplednormal(Random.default_rng(), n_data; capability) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + T = 1000 + batchsize = 1 + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg_sub = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-2, subsampling) + + q, stats, _ = optimize(alg_sub, T, model, q0; show_progress=PROGRESS) + + Δλ0 = sum(abs2, q0.location - μ_true) + sum(abs2, q0.scale - L_true) + Δλ = sum(abs2, q.location - μ_true) + sum(abs2, q.scale - L_true) + + @test Δλ ≤ 0.1*Δλ0 + end + end +end diff --git a/test/runtests.jl b/test/runtests.jl index ab67247b..db66cb77 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -66,6 +66,7 @@ if GROUP == "All" || GROUP == "GENERAL" include("families/location_scale_low_rank.jl") include("algorithms/klminwassfwdbwd.jl") + include("algorithms/klminsqrtnaturalgraddescent.jl") end if GROUP == "All" || GROUP == "AD" From 48daaa08f8fe27f6f82760c11e35e97d98cd2f65 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Fri, 7 Nov 2025 02:48:10 -0500 Subject: [PATCH 03/16] apply formatter --- src/algorithms/klminsqrtnaturalgraddescent.jl | 11 +++++++++-- test/algorithms/klminsqrtnaturalgraddescent.jl | 12 +++++++++--- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/algorithms/klminsqrtnaturalgraddescent.jl b/src/algorithms/klminsqrtnaturalgraddescent.jl index 5693b296..0a047e94 100644 --- a/src/algorithms/klminsqrtnaturalgraddescent.jl +++ b/src/algorithms/klminsqrtnaturalgraddescent.jl @@ -75,7 +75,12 @@ end output(::KLMinSqrtNaturalGradDescent, state) = state.q function step( - rng::Random.AbstractRNG, alg::KLMinSqrtNaturalGradDescent, state, callback, objargs...; kwargs... + rng::Random.AbstractRNG, + alg::KLMinSqrtNaturalGradDescent, + state, + callback, + objargs...; + kwargs..., ) (; n_samples, stepsize, subsampling) = alg (; q, prob, iteration, sub_st, grad_buf, hess_buf) = state @@ -107,7 +112,9 @@ function step( q′ = MvLocationScale(m′, C′, q.dist) - state = KLMinSqrtNaturalGradDescentState(q′, prob, iteration, sub_st′, grad_buf, hess_buf) + state = KLMinSqrtNaturalGradDescentState( + q′, prob, iteration, sub_st′, grad_buf, hess_buf + ) elbo = logπ_avg + entropy(q′) info = merge((elbo=elbo,), sub_inf) diff --git a/test/algorithms/klminsqrtnaturalgraddescent.jl b/test/algorithms/klminsqrtnaturalgraddescent.jl index fc70dc81..7841c6d2 100644 --- a/test/algorithms/klminsqrtnaturalgraddescent.jl +++ b/test/algorithms/klminsqrtnaturalgraddescent.jl @@ -101,7 +101,9 @@ subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) alg = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3) - alg_sub = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3, subsampling) + alg_sub = KLMinSqrtNaturalGradDescent(; + n_samples=10, stepsize=1e-3, subsampling + ) obj_full = estimate_objective(alg, q0, model; n_samples=10^5) obj_sub = estimate_objective(alg_sub, q0, model; n_samples=10^5) @@ -121,7 +123,9 @@ T = 10 batchsize = 3 subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) - alg_sub = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-3, subsampling) + alg_sub = KLMinSqrtNaturalGradDescent(; + n_samples=10, stepsize=1e-3, subsampling + ) q, _, _ = optimize(rng, alg_sub, T, model, q0; show_progress=PROGRESS) μ = q.location @@ -145,7 +149,9 @@ T = 1000 batchsize = 1 subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) - alg_sub = KLMinSqrtNaturalGradDescent(; n_samples=10, stepsize=1e-2, subsampling) + alg_sub = KLMinSqrtNaturalGradDescent(; + n_samples=10, stepsize=1e-2, subsampling + ) q, stats, _ = optimize(alg_sub, T, model, q0; show_progress=PROGRESS) From 3483e8dbf9b110588b5e48869ccefb1a300933d3 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Fri, 7 Nov 2025 03:56:49 -0500 Subject: [PATCH 04/16] add natural gradient descent (variational online Newton) --- src/AdvancedVI.jl | 3 +- src/algorithms/klminnaturalgraddescent.jl | 175 +++++++++++++++++++++ test/algorithms/klminnaturalgraddescent.jl | 158 +++++++++++++++++++ test/runtests.jl | 1 + 4 files changed, 336 insertions(+), 1 deletion(-) create mode 100644 src/algorithms/klminnaturalgraddescent.jl create mode 100644 test/algorithms/klminnaturalgraddescent.jl diff --git a/src/AdvancedVI.jl b/src/AdvancedVI.jl index 6ab91eb1..7d57e32a 100644 --- a/src/AdvancedVI.jl +++ b/src/AdvancedVI.jl @@ -357,7 +357,8 @@ export KLMinRepGradDescent, KLMinRepGradProxDescent, KLMinScoreGradDescent, ADVI include("algorithms/gauss_expected_grad_hess.jl") include("algorithms/klminwassfwdbwd.jl") include("algorithms/klminsqrtnaturalgraddescent.jl") +include("algorithms/klminnaturalgraddescent.jl") -export KLMinWassFwdBwd, KLMinSqrtNaturalGradDescent +export KLMinWassFwdBwd, KLMinSqrtNaturalGradDescent, KLMinNaturalGradDescent end diff --git a/src/algorithms/klminnaturalgraddescent.jl b/src/algorithms/klminnaturalgraddescent.jl new file mode 100644 index 00000000..5a296fac --- /dev/null +++ b/src/algorithms/klminnaturalgraddescent.jl @@ -0,0 +1,175 @@ + +""" + KLMinNaturalGradDescent(stepsize, ensure_posdef, n_samples, subsampling) + KLMinNaturalGradDescent(; stepsize, ensure_posdef, n_samples, subsampling) + +KL divergence minimization by running natural gradient descent[^KL2017][^KR2023], also called variational online Newton. +This algorithm can be viewed as an instantiation of mirror descent, where the Bregman divergence is chosen to be the KL divergence. + +If the `ensure_posdef` argument is true, the algorithm applies the technique by Lin *et al.*[^LSK2020], where the precision matrix update includes an additional term that guarantees positive definiteness. +This, however, involves an additional set of matrix-matrix system solves that could be costly. + +Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. + +# (Keyword) Arguments +- `stepsize::Float64`: Step size of stochastic proximal gradient descent. +- `ensure_posdef::Bool`: Ensure that the updated precision preserves positive definiteness. (default: `true`) +- `n_samples::Int`: Number of samples used to estimate the Wasserstein gradient. (default: `1`) +- `subsampling::Union{Nothing,<:AbstractSubsampling}`: Optional subsampling strategy. + +!!! note + The `subsampling` strategy is only applied to the target `LogDensityProblem` but not to the variational approximation `q`. That is, `KLMinNaturalGradDescent` does not support amortization or structured variational families. + +# Output +- `q`: The last iterate of the algorithm. + +# Callback Signature +The `callback` function supplied to `optimize` needs to have the following signature: + + callback(; rng, iteration, q, info) + +The keyword arguments are as follows: +- `rng`: Random number generator internally used by the algorithm. +- `iteration`: The index of the current iteration. +- `q`: Current variational approximation. +- `info`: `NamedTuple` containing the information generated during the current iteration. + +# Requirements +- The variational family is [`FullRankGaussian`](@ref FullRankGaussian). +- The target distribution has unconstrained support (\$\$\\mathbb{R}^d\$\$). +- The target `LogDensityProblems.logdensity(prob, x)` has at least first-order differentiation capability. +""" +@kwdef struct KLMinNaturalGradDescent{Sub<:Union{Nothing,<:AbstractSubsampling}} <: + AbstractVariationalAlgorithm + stepsize::Float64 + ensure_posdef::Bool = true + n_samples::Int = 1 + subsampling::Sub = nothing +end + +struct KLMinNaturalGradDescentState{Q,P,S,Prec,GradBuf,HessBuf} + q::Q + prob::P + prec::Prec + iteration::Int + sub_st::S + grad_buf::GradBuf + hess_buf::HessBuf +end + +function init( + rng::Random.AbstractRNG, + alg::KLMinNaturalGradDescent, + q_init::MvLocationScale{<:LowerTriangular,<:Normal,L}, + prob, +) where {L} + sub = alg.subsampling + n_dims = LogDensityProblems.dimension(prob) + capability = LogDensityProblems.capabilities(typeof(prob)) + if capability < LogDensityProblems.LogDensityOrder{1}() + throw( + ArgumentError( + "`KLMinNaturalGradDescent` requires at least first-order differentiation capability. The capability of the supplied `LogDensityProblem` is $(capability).", + ), + ) + end + sub_st = isnothing(sub) ? nothing : init(rng, sub) + grad_buf = Vector{eltype(q_init.location)}(undef, n_dims) + hess_buf = Matrix{eltype(q_init.location)}(undef, n_dims, n_dims) + return KLMinNaturalGradDescentState( + q_init, prob, cov(q_init), 0, sub_st, grad_buf, hess_buf + ) +end + +output(::KLMinNaturalGradDescent, state) = state.q + +function step( + rng::Random.AbstractRNG, + alg::KLMinNaturalGradDescent, + state, + callback, + objargs...; + kwargs..., +) + (; ensure_posdef, n_samples, stepsize, subsampling) = alg + (; q, prob, prec, iteration, sub_st, grad_buf, hess_buf) = state + + m = mean(q) + S = prec + η = convert(eltype(m), stepsize) + iteration += 1 + + # Maybe apply subsampling + prob_sub, sub_st′, sub_inf = if isnothing(subsampling) + prob, sub_st, NamedTuple() + else + batch, sub_st′, sub_inf = step(rng, subsampling, sub_st) + prob_sub = subsample(prob, batch) + prob_sub, sub_st′, sub_inf + end + + # Estimate the Wasserstein gradient + logπ_avg, grad_buf, hess_buf = gaussian_expectation_gradient_and_hessian!( + rng, q, n_samples, grad_buf, hess_buf, prob_sub + ) + + # Compute natural gradient descent update + S′ = Hermitian(((1 - η) * S + η * (-hess_buf))) + if ensure_posdef + G_hat = S - (-hess_buf) + S′ += η^2 / 2 * Hermitian(G_hat * (S′ \ G_hat)) + end + m′ = m - η * (S′ \ (-grad_buf)) + + q′ = MvLocationScale(m′, inv(cholesky(S′).L), q.dist) + + state = KLMinNaturalGradDescentState( + q′, prob, S′, iteration, sub_st′, grad_buf, hess_buf + ) + elbo = logπ_avg + entropy(q′) + info = merge((elbo=elbo,), sub_inf) + + if !isnothing(callback) + info′ = callback(; rng, iteration, q, info) + info = !isnothing(info′) ? merge(info′, info) : info + end + state, false, info +end + +""" + estimate_objective([rng,] alg, q, prob; n_samples) + +Estimate the ELBO of the variational approximation `q` against the target log-density `prob`. + +# Arguments +- `rng::Random.AbstractRNG`: Random number generator. +- `alg::KLMinNaturalGradDescent`: Variational inference algorithm. +- `q::MvLocationScale{<:Any,<:Normal,<:Any}`: Gaussian variational approximation. +- `prob`: The target log-joint likelihood implementing the `LogDensityProblem` interface. + +# Keyword Arguments +- `n_samples::Int`: Number of Monte Carlo samples for estimating the objective. (default: Same as the the number of samples used for estimating the gradient during optimization.) + +# Returns +- `obj_est`: Estimate of the objective value. +""" +function estimate_objective( + rng::Random.AbstractRNG, + alg::KLMinNaturalGradDescent, + q::MvLocationScale{S,<:Normal,L}, + prob; + n_samples::Int=alg.n_samples, +) where {S,L} + obj = RepGradELBO(n_samples; entropy=MonteCarloEntropy()) + if isnothing(alg.subsampling) + return estimate_objective(rng, obj, q, prob) + else + sub = alg.subsampling + sub_st = init(rng, sub) + return mapreduce(+, 1:length(sub)) do _ + batch, sub_st, _ = step(rng, sub, sub_st) + prob_sub = subsample(prob, batch) + estimate_objective(rng, obj, q, prob_sub) / length(sub) + end + end +end diff --git a/test/algorithms/klminnaturalgraddescent.jl b/test/algorithms/klminnaturalgraddescent.jl new file mode 100644 index 00000000..e5cd5b55 --- /dev/null +++ b/test/algorithms/klminnaturalgraddescent.jl @@ -0,0 +1,158 @@ + +@testset "KLMinNaturalGradDescent" begin + begin + modelstats = normal_meanfield(Random.default_rng(), Float64; capability=2) + (; model, n_dims, μ_true, L_true) = modelstats + + alg = KLMinNaturalGradDescent(; n_samples=10, stepsize=1e-3) + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + @testset "callback" begin + T = 10 + callback(; iteration, kwargs...) = (iteration_check=iteration,) + _, info, _ = optimize(alg, T, model, q0; callback, show_progress=PROGRESS) + @test [i.iteration_check for i in info] == 1:T + end + + @testset "estimate_objective" begin + q_true = FullRankGaussian(μ_true, LowerTriangular(Matrix(L_true))) + + obj_est = estimate_objective(alg, q_true, model) + @test isfinite(obj_est) + + obj_est = estimate_objective(alg, q_true, model; n_samples=10^5) + @test obj_est ≈ 0 atol=1e-2 + end + + @testset "determinism" begin + seed = (0x38bef07cf9cc549d) + rng = StableRNG(seed) + T = 10 + + q_avg, _, _ = optimize(rng, alg, T, model, q0; show_progress=PROGRESS) + μ = q_avg.location + L = q_avg.scale + + rng_repl = StableRNG(seed) + q_avg, _, _ = optimize(rng_repl, alg, T, model, q0; show_progress=PROGRESS) + μ_repl = q_avg.location + L_repl = q_avg.scale + @test μ == μ_repl + @test L == L_repl + end + end + + @testset "error low capability" begin + modelstats = normal_meanfield(Random.default_rng(), Float64; capability=0) + (; model, n_dims) = modelstats + + alg = KLMinNaturalGradDescent(; n_samples=10, stepsize=1.0) + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + @test_throws "first-order" optimize(alg, 1, model, q0) + end + + @testset "type stability type=$(realtype), capability=$(capability)" for realtype in [ + Float64, Float32 + ], + capability in [1, 2] + + modelstats = normal_meanfield(Random.default_rng(), realtype; capability) + (; model, μ_true, L_true, n_dims, strong_convexity, is_meanfield) = modelstats + + alg = KLMinNaturalGradDescent(; n_samples=10, stepsize=1e-3) + T = 10 + + L0 = LowerTriangular(Matrix{realtype}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(realtype, n_dims), L0) + + q, _, _ = optimize(alg, T, model, q0; show_progress=PROGRESS) + + @test eltype(q.location) == eltype(μ_true) + @test eltype(q.scale) == eltype(L_true) + end + + @testset "convergence capability=$(capability)" for capability in [1, 2] + modelstats = normal_meanfield(Random.default_rng(), Float64; capability) + (; model, μ_true, L_true, n_dims, strong_convexity, is_meanfield) = modelstats + + T = 1000 + alg = KLMinNaturalGradDescent(; n_samples=10, stepsize=1e-3) + + q_avg, _, _ = optimize(alg, T, model, q0; show_progress=PROGRESS) + + Δλ0 = sum(abs2, q0.location - μ_true) + sum(abs2, q0.scale - L_true) + Δλ = sum(abs2, q_avg.location - μ_true) + sum(abs2, q_avg.scale - L_true) + + @test Δλ ≤ 0.1*Δλ0 + end + + @testset "subsampling" begin + n_data = 8 + + @testset "estimate_objective batchsize=$(batchsize)" for batchsize in [1, 3, 4] + modelstats = subsamplednormal(Random.default_rng(), n_data) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg = KLMinNaturalGradDescent(; n_samples=10, stepsize=1e-3) + alg_sub = KLMinNaturalGradDescent(; n_samples=10, stepsize=1e-3, subsampling) + + obj_full = estimate_objective(alg, q0, model; n_samples=10^5) + obj_sub = estimate_objective(alg_sub, q0, model; n_samples=10^5) + @test obj_full ≈ obj_sub rtol=0.1 + end + + @testset "determinism" begin + seed = (0x38bef07cf9cc549d) + rng = StableRNG(seed) + + modelstats = subsamplednormal(Random.default_rng(), n_data) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + T = 10 + batchsize = 3 + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg_sub = KLMinNaturalGradDescent(; n_samples=10, stepsize=1e-3, subsampling) + + q, _, _ = optimize(rng, alg_sub, T, model, q0; show_progress=PROGRESS) + μ = q.location + L = q.scale + + rng_repl = StableRNG(seed) + q, _, _ = optimize(rng_repl, alg_sub, T, model, q0; show_progress=PROGRESS) + μ_repl = q.location + L_repl = q.scale + @test μ == μ_repl + @test L == L_repl + end + + @testset "convergence capability=$(capability)" for capability in [1, 2] + modelstats = subsamplednormal(Random.default_rng(), n_data; capability) + (; model, n_dims, μ_true, L_true) = modelstats + + L0 = LowerTriangular(Matrix{Float64}(I, n_dims, n_dims)) + q0 = FullRankGaussian(zeros(Float64, n_dims), L0) + + T = 1000 + batchsize = 1 + subsampling = ReshufflingBatchSubsampling(1:n_data, batchsize) + alg_sub = KLMinNaturalGradDescent(; n_samples=10, stepsize=1e-2, subsampling) + + q, stats, _ = optimize(alg_sub, T, model, q0; show_progress=PROGRESS) + + Δλ0 = sum(abs2, q0.location - μ_true) + sum(abs2, q0.scale - L_true) + Δλ = sum(abs2, q.location - μ_true) + sum(abs2, q.scale - L_true) + + @test Δλ ≤ 0.1*Δλ0 + end + end +end diff --git a/test/runtests.jl b/test/runtests.jl index db66cb77..5840eecb 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -67,6 +67,7 @@ if GROUP == "All" || GROUP == "GENERAL" include("algorithms/klminwassfwdbwd.jl") include("algorithms/klminsqrtnaturalgraddescent.jl") + include("algorithms/klminnaturalgraddescent.jl") end if GROUP == "All" || GROUP == "AD" From 8267a98d3430e64befa6b0065bab721f897c8bd9 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Fri, 7 Nov 2025 03:58:33 -0500 Subject: [PATCH 05/16] update docstrings remove redundant comments --- src/algorithms/klminnaturalgraddescent.jl | 6 ++---- src/algorithms/klminsqrtnaturalgraddescent.jl | 13 ++++++------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/algorithms/klminnaturalgraddescent.jl b/src/algorithms/klminnaturalgraddescent.jl index 5a296fac..d639579a 100644 --- a/src/algorithms/klminnaturalgraddescent.jl +++ b/src/algorithms/klminnaturalgraddescent.jl @@ -12,9 +12,9 @@ This, however, involves an additional set of matrix-matrix system solves that co Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. # (Keyword) Arguments -- `stepsize::Float64`: Step size of stochastic proximal gradient descent. +- `stepsize::Float64`: Step size. - `ensure_posdef::Bool`: Ensure that the updated precision preserves positive definiteness. (default: `true`) -- `n_samples::Int`: Number of samples used to estimate the Wasserstein gradient. (default: `1`) +- `n_samples::Int`: Number of samples used to estimate the natural gradient. (default: `1`) - `subsampling::Union{Nothing,<:AbstractSubsampling}`: Optional subsampling strategy. !!! note @@ -108,12 +108,10 @@ function step( prob_sub, sub_st′, sub_inf end - # Estimate the Wasserstein gradient logπ_avg, grad_buf, hess_buf = gaussian_expectation_gradient_and_hessian!( rng, q, n_samples, grad_buf, hess_buf, prob_sub ) - # Compute natural gradient descent update S′ = Hermitian(((1 - η) * S + η * (-hess_buf))) if ensure_posdef G_hat = S - (-hess_buf) diff --git a/src/algorithms/klminsqrtnaturalgraddescent.jl b/src/algorithms/klminsqrtnaturalgraddescent.jl index 0a047e94..68a673e4 100644 --- a/src/algorithms/klminsqrtnaturalgraddescent.jl +++ b/src/algorithms/klminsqrtnaturalgraddescent.jl @@ -1,15 +1,15 @@ """ - KLMinSqrtNaturalGradDescent(n_samples, stepsize, subsampling) - KLMinSqrtNaturalGradDescent(; n_samples, stepsize, subsampling) + KLMinSqrtNaturalGradDescent(stepsize, n_samples, subsampling) + KLMinSqrtNaturalGradDescent(; stepsize, n_samples, subsampling) -KL divergence minimization algorithm obtained by discretizing the natural gradient flow under the square-root parameterization[^KMKL2025][^LDENKTM2024][^LDLNKS2023]. +KL divergence minimization algorithm obtained by discretizing the natural gradient flow (the Riemmanian gradient flow with the Fisher information matrix as the metric tensor) under the square-root parameterization[^KMKL2025][^LDENKTM2024][^LDLNKS2023][^T2025]. Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. # (Keyword) Arguments -- `n_samples::Int`: Number of samples used to estimate the Wasserstein gradient. (default: `1`) -- `stepsize::Float64`: Step size of stochastic proximal gradient descent. +- `stepsize::Float64`: Step size. +- `n_samples::Int`: Number of samples used to estimate the natural gradient. (default: `1`) - `subsampling::Union{Nothing,<:AbstractSubsampling}`: Optional subsampling strategy. !!! note @@ -36,8 +36,8 @@ The keyword arguments are as follows: """ @kwdef struct KLMinSqrtNaturalGradDescent{Sub<:Union{Nothing,<:AbstractSubsampling}} <: AbstractVariationalAlgorithm - n_samples::Int = 1 stepsize::Float64 + n_samples::Int = 1 subsampling::Sub = nothing end @@ -99,7 +99,6 @@ function step( prob_sub, sub_st′, sub_inf end - # Estimate the Wasserstein gradient logπ_avg, grad_buf, hess_buf = gaussian_expectation_gradient_and_hessian!( rng, q, n_samples, grad_buf, hess_buf, prob_sub ) From f3790c3bf0fa29aed88186c4ccbb508d4f842fc0 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Fri, 7 Nov 2025 04:12:21 -0500 Subject: [PATCH 06/16] run formatter Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- src/algorithms/klminwassfwdbwd.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/algorithms/klminwassfwdbwd.jl b/src/algorithms/klminwassfwdbwd.jl index 7f6c27f2..8d52bcdd 100644 --- a/src/algorithms/klminwassfwdbwd.jl +++ b/src/algorithms/klminwassfwdbwd.jl @@ -41,7 +41,6 @@ The keyword arguments are as follows: subsampling::Sub = nothing end - struct KLMinWassFwdBwdState{Q,P,S,Sigma,GradBuf,HessBuf} q::Q prob::P From 3ba84018e0f2daa4d4daac646fe725a07a93c3d7 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Fri, 7 Nov 2025 04:38:12 -0500 Subject: [PATCH 07/16] update history --- HISTORY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/HISTORY.md b/HISTORY.md index 1c4e1925..18ef01ab 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -4,6 +4,8 @@ This update adds new variational inference algorithms in light of the flexibilit Specifically, the following measure-space optimization algorithms have been added: - `KLMinWassFwdBwd` + - `KLMinNaturalGradDescent` + - `KLMinSqrtNaturalGradDescent` # Release 0.5 From bca8f55b1b98861e7e156bd7644b458a52284c14 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:21:15 -0500 Subject: [PATCH 08/16] fix gauss expected grad hess, use in-place operations, add tests --- src/algorithms/gauss_expected_grad_hess.jl | 37 +++++++++++--- test/general/gauss_expected_grad_hess.jl | 56 ++++++++++++++++++++++ test/runtests.jl | 2 + 3 files changed, 88 insertions(+), 7 deletions(-) create mode 100644 test/general/gauss_expected_grad_hess.jl diff --git a/src/algorithms/gauss_expected_grad_hess.jl b/src/algorithms/gauss_expected_grad_hess.jl index bcd124c8..f6a194bd 100644 --- a/src/algorithms/gauss_expected_grad_hess.jl +++ b/src/algorithms/gauss_expected_grad_hess.jl @@ -4,6 +4,9 @@ Estimate the expectations of the gradient and Hessians of the log-density of `prob` taken over the Gaussian `q`. For estimating the expectation of the Hessian, if `prob` has second-order differentiation capability, this function uses the sample average of the Hessian. Otherwise, it uses Stein's identity. +!!! warning + The resulting `hess_buf` may not be perfectly symmetric due to numerical issues. It is therefore useful to wrap it in a `Symmetric` before usage. + # Arguments - `rng::Random.AbstractRNG`: Random number generator. - `q::MvLocationScale{<:LowerTriangular,<:Normal,L}`: Gaussian to take expectation over. @@ -26,29 +29,49 @@ function gaussian_expectation_gradient_and_hessian!( if LogDensityProblems.capabilities(typeof(prob)) ≤ LogDensityProblems.LogDensityOrder{1}() - # Use Stein's identity + # First-order-only: use Stein/Price identity for the Hessian + # + # E_{z ~ N(m, CC')} ∇2 log π(z) + # = E_{z ~ N(m, CC')} (CC')^{-1} (z - m) ∇ log π(z)T + # = E_{u ~ N(0, I)} C \ (u ∇ log π(z)T) . + # + # Algorithmically, draw u ~ N(0, I), z = C u + m, where C = q.scale. + # Accumulate A = E[ u ∇ log π(z)T ], then map back: H = C \ A. d = LogDensityProblems.dimension(prob) u = randn(rng, T, d, n_samples) - z = q.scale*u .+ q.location + m, C = q.location, q.scale + z = C*u .+ m for b in 1:n_samples zb, ub = view(z, :, b), view(u, :, b) logπ, ∇logπ = LogDensityProblems.logdensity_and_gradient(prob, zb) logπ_avg += logπ/n_samples - grad_buf += ∇logπ/n_samples - hess_buf += ub*(∇logπ/n_samples)' + + rdiv!(∇logπ, n_samples) + ∇logπ_div_nsamples = ∇logπ + + grad_buf[:] .+= ∇logπ_div_nsamples + hess_buf[:, :] .+= ub*∇logπ_div_nsamples' end + hess_buf[:, :] .= C \ hess_buf return logπ_avg, grad_buf, hess_buf else - # Use sample average of the Hessian. + # Second-order: use naive sample average z = rand(rng, q, n_samples) for b in 1:n_samples zb = view(z, :, b) logπ, ∇logπ, ∇2logπ = LogDensityProblems.logdensity_gradient_and_hessian( prob, zb ) + + rdiv!(∇logπ, n_samples) + ∇logπ_div_nsamples = ∇logπ + + rdiv!(∇2logπ, n_samples) + ∇2logπ_div_nsamples = ∇2logπ + logπ_avg += logπ/n_samples - grad_buf += ∇logπ/n_samples - hess_buf += ∇2logπ/n_samples + grad_buf[:] .+= ∇logπ_div_nsamples + hess_buf[:, :] .+= ∇2logπ_div_nsamples end return logπ_avg, grad_buf, hess_buf end diff --git a/test/general/gauss_expected_grad_hess.jl b/test/general/gauss_expected_grad_hess.jl new file mode 100644 index 00000000..8cb8e990 --- /dev/null +++ b/test/general/gauss_expected_grad_hess.jl @@ -0,0 +1,56 @@ + +using BenchmarkTools + +struct TestQuad{S,C} + Σ::S + cap::C +end + +function LogDensityProblems.logdensity(model::TestQuad, x) + Σ = model.Σ + return -x'*Σ*x/2 +end + +function LogDensityProblems.logdensity_and_gradient(model::TestQuad, x) + Σ = model.Σ + return (LogDensityProblems.logdensity(model, x), -Σ*x) +end + +function LogDensityProblems.logdensity_gradient_and_hessian(model::TestQuad, x) + Σ = model.Σ + ℓp, ∇ℓp = LogDensityProblems.logdensity_and_gradient(model, x) + return (ℓp, ∇ℓp, -Σ) +end + +function LogDensityProblems.dimension(model::TestQuad) + return size(model.Σ, 1) +end + +function LogDensityProblems.capabilities(::Type{TestQuad{S,C}}) where {S,C} + return C() +end + +@testset "gauss_expected_grad_hess" begin + n_samples = 10^6 + d = 2 + Σ = [2.0 -0.1; -0.1 2.0] + q = FullRankGaussian(ones(d), LowerTriangular(diagm(fill(0.1, d)))) + + # True expected gradient is E_{x ~ N(μ, 1)} -Σ x = -Σ μ + # True expected Hessian is E_{x ~ N(μ, 1)} -Σ = -Σ + E_∇ℓπ = -Σ*q.location + E_∇2ℓπ = -Σ + + @testset "$(cap)-order capability" for cap in [ + LogDensityProblems.LogDensityOrder{1}(), LogDensityProblems.LogDensityOrder{2}() + ] + grad_buf = zeros(d) + hess_buf = zeros(d, d) + prob = TestQuad(Σ, cap) + display(@benchmark AdvancedVI.gaussian_expectation_gradient_and_hessian!( + Random.default_rng(), $q, $n_samples, $grad_buf, $hess_buf, $prob + )) + @test grad_buf ≈ E_∇ℓπ atol=1e-1 + @test hess_buf ≈ E_∇2ℓπ atol=1e-1 + end +end diff --git a/test/runtests.jl b/test/runtests.jl index 5840eecb..105b5bec 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -65,6 +65,7 @@ if GROUP == "All" || GROUP == "GENERAL" include("families/location_scale.jl") include("families/location_scale_low_rank.jl") + include("general/gauss_expected_grad_hess.jl") include("algorithms/klminwassfwdbwd.jl") include("algorithms/klminsqrtnaturalgraddescent.jl") include("algorithms/klminnaturalgraddescent.jl") @@ -85,3 +86,4 @@ if GROUP == "All" || GROUP == "AD" include("algorithms/scoregradelbo_locationscale.jl") include("algorithms/scoregradelbo_locationscale_bijectors.jl") end + From 82e9f156cc593890ba38a33c2f5fa859a27a3f48 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:22:33 -0500 Subject: [PATCH 09/16] fix always wrap `hess_buf` with a `Symmetric` (not `Hermitian`) --- src/algorithms/klminnaturalgraddescent.jl | 12 ++++++------ src/algorithms/klminsqrtnaturalgraddescent.jl | 2 +- src/algorithms/klminwassfwdbwd.jl | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/algorithms/klminnaturalgraddescent.jl b/src/algorithms/klminnaturalgraddescent.jl index d639579a..6a53459d 100644 --- a/src/algorithms/klminnaturalgraddescent.jl +++ b/src/algorithms/klminnaturalgraddescent.jl @@ -1,7 +1,7 @@ """ - KLMinNaturalGradDescent(stepsize, ensure_posdef, n_samples, subsampling) - KLMinNaturalGradDescent(; stepsize, ensure_posdef, n_samples, subsampling) + KLMinNaturalGradDescent(stepsize, n_samples, ensure_posdef, subsampling) + KLMinNaturalGradDescent(; stepsize, n_samples, ensure_posdef, subsampling) KL divergence minimization by running natural gradient descent[^KL2017][^KR2023], also called variational online Newton. This algorithm can be viewed as an instantiation of mirror descent, where the Bregman divergence is chosen to be the KL divergence. @@ -13,8 +13,8 @@ Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variatio # (Keyword) Arguments - `stepsize::Float64`: Step size. -- `ensure_posdef::Bool`: Ensure that the updated precision preserves positive definiteness. (default: `true`) - `n_samples::Int`: Number of samples used to estimate the natural gradient. (default: `1`) +- `ensure_posdef::Bool`: Ensure that the updated precision preserves positive definiteness. (default: `true`) - `subsampling::Union{Nothing,<:AbstractSubsampling}`: Optional subsampling strategy. !!! note @@ -42,8 +42,8 @@ The keyword arguments are as follows: @kwdef struct KLMinNaturalGradDescent{Sub<:Union{Nothing,<:AbstractSubsampling}} <: AbstractVariationalAlgorithm stepsize::Float64 - ensure_posdef::Bool = true n_samples::Int = 1 + ensure_posdef::Bool = true subsampling::Sub = nothing end @@ -112,9 +112,9 @@ function step( rng, q, n_samples, grad_buf, hess_buf, prob_sub ) - S′ = Hermitian(((1 - η) * S + η * (-hess_buf))) + S′ = Hermitian(((1 - η) * S + η * Symmetric(-hess_buf))) if ensure_posdef - G_hat = S - (-hess_buf) + G_hat = S - Symmetric(-hess_buf) S′ += η^2 / 2 * Hermitian(G_hat * (S′ \ G_hat)) end m′ = m - η * (S′ \ (-grad_buf)) diff --git a/src/algorithms/klminsqrtnaturalgraddescent.jl b/src/algorithms/klminsqrtnaturalgraddescent.jl index 68a673e4..fd53cd94 100644 --- a/src/algorithms/klminsqrtnaturalgraddescent.jl +++ b/src/algorithms/klminsqrtnaturalgraddescent.jl @@ -103,7 +103,7 @@ function step( rng, q, n_samples, grad_buf, hess_buf, prob_sub ) - CtHCmI = C'*-hess_buf*C - I + CtHCmI = C'*Symmetric(-hess_buf)*C - I CtHCmI_tril = LowerTriangular(tril(CtHCmI) - Diagonal(diag(CtHCmI))/2) m′ = m - η * C * (C' * -grad_buf) diff --git a/src/algorithms/klminwassfwdbwd.jl b/src/algorithms/klminwassfwdbwd.jl index 8d52bcdd..daefadd0 100644 --- a/src/algorithms/klminwassfwdbwd.jl +++ b/src/algorithms/klminwassfwdbwd.jl @@ -101,7 +101,7 @@ function step( ) m′ = m - η * (-grad_buf) - M = I - η*Hermitian(-hess_buf) + M = I - η*Symmetric(-hess_buf) Σ_half = Hermitian(M*Σ*M) # Compute the JKO proximal operator From 8fdecb16a10ba2d5c98123d2d3758d28089529b1 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:26:36 -0500 Subject: [PATCH 10/16] Apply suggestion from @sunxd3 Co-authored-by: Xianda Sun <5433119+sunxd3@users.noreply.github.com> --- src/algorithms/klminsqrtnaturalgraddescent.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/algorithms/klminsqrtnaturalgraddescent.jl b/src/algorithms/klminsqrtnaturalgraddescent.jl index fd53cd94..28c5c1c3 100644 --- a/src/algorithms/klminsqrtnaturalgraddescent.jl +++ b/src/algorithms/klminsqrtnaturalgraddescent.jl @@ -13,7 +13,7 @@ Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variatio - `subsampling::Union{Nothing,<:AbstractSubsampling}`: Optional subsampling strategy. !!! note - The `subsampling` strategy is only applied to the target `LogDensityProblem` but not to the variational approximation `q`. That is, `KLMinSqrtVarNewton` does not support amortization or structured variational families. + The `subsampling` strategy is only applied to the target `LogDensityProblem` but not to the variational approximation `q`. That is, `KLMinSqrtNaturalGradDescent` does not support amortization or structured variational families. # Output - `q`: The last iterate of the algorithm. From 3ff2c0f42fdcb1f25677477e2c83ab4a2f818ce7 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:26:48 -0500 Subject: [PATCH 11/16] Apply suggestion from @sunxd3 Co-authored-by: Xianda Sun <5433119+sunxd3@users.noreply.github.com> --- src/algorithms/klminsqrtnaturalgraddescent.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/algorithms/klminsqrtnaturalgraddescent.jl b/src/algorithms/klminsqrtnaturalgraddescent.jl index 28c5c1c3..22531bae 100644 --- a/src/algorithms/klminsqrtnaturalgraddescent.jl +++ b/src/algorithms/klminsqrtnaturalgraddescent.jl @@ -3,7 +3,7 @@ KLMinSqrtNaturalGradDescent(stepsize, n_samples, subsampling) KLMinSqrtNaturalGradDescent(; stepsize, n_samples, subsampling) -KL divergence minimization algorithm obtained by discretizing the natural gradient flow (the Riemmanian gradient flow with the Fisher information matrix as the metric tensor) under the square-root parameterization[^KMKL2025][^LDENKTM2024][^LDLNKS2023][^T2025]. +KL divergence minimization algorithm obtained by discretizing the natural gradient flow (the Riemannian gradient flow with the Fisher information matrix as the metric tensor) under the square-root parameterization[^KMKL2025][^LDENKTM2024][^LDLNKS2023][^T2025]. Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. From 45b998958dd68939301b957b8e495f9302c4a582 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:27:32 -0500 Subject: [PATCH 12/16] Apply suggestion from @github-actions[bot] Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- test/runtests.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/test/runtests.jl b/test/runtests.jl index 105b5bec..0d02d016 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -86,4 +86,3 @@ if GROUP == "All" || GROUP == "AD" include("algorithms/scoregradelbo_locationscale.jl") include("algorithms/scoregradelbo_locationscale_bijectors.jl") end - From 75e489dff789fed6ab60e34d251220e8199578c8 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:27:58 -0500 Subject: [PATCH 13/16] fix bug in init of klminnaturalgraddescent --- src/algorithms/klminnaturalgraddescent.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/algorithms/klminnaturalgraddescent.jl b/src/algorithms/klminnaturalgraddescent.jl index 6a53459d..4756d1a9 100644 --- a/src/algorithms/klminnaturalgraddescent.jl +++ b/src/algorithms/klminnaturalgraddescent.jl @@ -77,7 +77,7 @@ function init( grad_buf = Vector{eltype(q_init.location)}(undef, n_dims) hess_buf = Matrix{eltype(q_init.location)}(undef, n_dims, n_dims) return KLMinNaturalGradDescentState( - q_init, prob, cov(q_init), 0, sub_st, grad_buf, hess_buf + q_init, prob, inv(cov(q_init)), 0, sub_st, grad_buf, hess_buf ) end From 6f55a5c281b7b0d193795261db2f64969739dc3e Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:28:08 -0500 Subject: [PATCH 14/16] remove unintended benchmark code --- test/general/gauss_expected_grad_hess.jl | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/test/general/gauss_expected_grad_hess.jl b/test/general/gauss_expected_grad_hess.jl index 8cb8e990..ee254a6a 100644 --- a/test/general/gauss_expected_grad_hess.jl +++ b/test/general/gauss_expected_grad_hess.jl @@ -1,6 +1,4 @@ -using BenchmarkTools - struct TestQuad{S,C} Σ::S cap::C @@ -47,9 +45,9 @@ end grad_buf = zeros(d) hess_buf = zeros(d, d) prob = TestQuad(Σ, cap) - display(@benchmark AdvancedVI.gaussian_expectation_gradient_and_hessian!( - Random.default_rng(), $q, $n_samples, $grad_buf, $hess_buf, $prob - )) + AdvancedVI.gaussian_expectation_gradient_and_hessian!( + Random.default_rng(), q, n_samples, grad_buf, hess_buf, prob + ) @test grad_buf ≈ E_∇ℓπ atol=1e-1 @test hess_buf ≈ E_∇2ℓπ atol=1e-1 end From 78d3559edf48bae7eb2ff4e90cd7a48850c3c56b Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:29:53 -0500 Subject: [PATCH 15/16] update docs --- src/algorithms/gauss_expected_grad_hess.jl | 4 +++- src/algorithms/klminnaturalgraddescent.jl | 4 +++- src/algorithms/klminsqrtnaturalgraddescent.jl | 4 +++- src/algorithms/klminwassfwdbwd.jl | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/algorithms/gauss_expected_grad_hess.jl b/src/algorithms/gauss_expected_grad_hess.jl index f6a194bd..457439d1 100644 --- a/src/algorithms/gauss_expected_grad_hess.jl +++ b/src/algorithms/gauss_expected_grad_hess.jl @@ -2,7 +2,9 @@ """ gaussian_expectation_gradient_and_hessian!(rng, q, n_samples, grad_buf, hess_buf, prob) -Estimate the expectations of the gradient and Hessians of the log-density of `prob` taken over the Gaussian `q`. For estimating the expectation of the Hessian, if `prob` has second-order differentiation capability, this function uses the sample average of the Hessian. Otherwise, it uses Stein's identity. +Estimate the expectations of the gradient and Hessians of the log-density of `prob` taken over the Gaussian `q`. +For estimating the expectation of the Hessian, if `prob` has second-order differentiation capability, this function uses the sample average of the Hessian. +Otherwise, it uses Stein's identity. !!! warning The resulting `hess_buf` may not be perfectly symmetric due to numerical issues. It is therefore useful to wrap it in a `Symmetric` before usage. diff --git a/src/algorithms/klminnaturalgraddescent.jl b/src/algorithms/klminnaturalgraddescent.jl index 4756d1a9..5412a532 100644 --- a/src/algorithms/klminnaturalgraddescent.jl +++ b/src/algorithms/klminnaturalgraddescent.jl @@ -9,7 +9,9 @@ This algorithm can be viewed as an instantiation of mirror descent, where the Br If the `ensure_posdef` argument is true, the algorithm applies the technique by Lin *et al.*[^LSK2020], where the precision matrix update includes an additional term that guarantees positive definiteness. This, however, involves an additional set of matrix-matrix system solves that could be costly. -Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. +The original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$, where \$\$ \\log \\pi \$\$ is the target log-density and \$\$q\$\$ is the current variational approximation. +If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. +If the target has only first-order capability, we use Stein's identity. # (Keyword) Arguments - `stepsize::Float64`: Step size. diff --git a/src/algorithms/klminsqrtnaturalgraddescent.jl b/src/algorithms/klminsqrtnaturalgraddescent.jl index 22531bae..052e4124 100644 --- a/src/algorithms/klminsqrtnaturalgraddescent.jl +++ b/src/algorithms/klminsqrtnaturalgraddescent.jl @@ -5,7 +5,9 @@ KL divergence minimization algorithm obtained by discretizing the natural gradient flow (the Riemannian gradient flow with the Fisher information matrix as the metric tensor) under the square-root parameterization[^KMKL2025][^LDENKTM2024][^LDLNKS2023][^T2025]. -Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. +The original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$, where \$\$ \\log \\pi \$\$ is the target log-density and \$\$q\$\$ is the current variational approximation. +If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. +If the target has only first-order capability, we use Stein's identity. # (Keyword) Arguments - `stepsize::Float64`: Step size. diff --git a/src/algorithms/klminwassfwdbwd.jl b/src/algorithms/klminwassfwdbwd.jl index daefadd0..14bd52db 100644 --- a/src/algorithms/klminwassfwdbwd.jl +++ b/src/algorithms/klminwassfwdbwd.jl @@ -5,7 +5,9 @@ KL divergence minimization by running stochastic proximal gradient descent (forward-backward splitting) in Wasserstein space[^DBCS2023]. -Denoting the target log-density as \$\$ \\log \\pi \$\$ and the current variational approximation as \$\$q\$\$, the original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$. If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. If the target has only first-order capability, we use Stein's identity. +The original algorithm requires estimating the quantity \$\$ \\mathbb{E}_q \\nabla^2 \\log \\pi \$\$, where \$\$ \\log \\pi \$\$ is the target log-density and \$\$q\$\$ is the current variational approximation. +If the target `LogDensityProblem` associated with \$\$ \\log \\pi \$\$ has second-order differentiation [capability](https://www.tamaspapp.eu/LogDensityProblems.jl/dev/#LogDensityProblems.capabilities), we use the sample average of the Hessian. +If the target has only first-order capability, we use Stein's identity. # (Keyword) Arguments - `n_samples::Int`: Number of samples used to estimate the Wasserstein gradient. (default: `1`) From 020634db43b61670c68db1beb177b6cef39b9727 Mon Sep 17 00:00:00 2001 From: Kyurae Kim Date: Tue, 11 Nov 2025 11:36:35 -0500 Subject: [PATCH 16/16] fix relax Hermitian to Symmetric in NGVI ensure posdef --- src/algorithms/klminnaturalgraddescent.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/algorithms/klminnaturalgraddescent.jl b/src/algorithms/klminnaturalgraddescent.jl index 5412a532..88dafc5f 100644 --- a/src/algorithms/klminnaturalgraddescent.jl +++ b/src/algorithms/klminnaturalgraddescent.jl @@ -117,7 +117,7 @@ function step( S′ = Hermitian(((1 - η) * S + η * Symmetric(-hess_buf))) if ensure_posdef G_hat = S - Symmetric(-hess_buf) - S′ += η^2 / 2 * Hermitian(G_hat * (S′ \ G_hat)) + S′ += η^2 / 2 * Symmetric(G_hat * (S′ \ G_hat)) end m′ = m - η * (S′ \ (-grad_buf))