From 57fbfae4dcdd8bb6f80ed5c3d1a006a278dd1357 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 31 May 2024 11:06:57 -0400 Subject: [PATCH 01/14] Fresh attempt at DI integration --- Project.toml | 23 +- ext/OptimizationDIExt.jl | 372 +++++++++++++ ext/OptimizationFiniteDiffExt.jl | 470 ----------------- ext/OptimizationForwardDiffExt.jl | 341 ------------ ext/OptimizationMTKExt.jl | 10 +- ext/OptimizationReverseDiffExt.jl | 581 --------------------- ext/OptimizationSparseDiffExt.jl | 31 -- ext/OptimizationSparseFiniteDiff.jl | 541 ------------------- ext/OptimizationSparseForwardDiff.jl | 459 ---------------- ext/OptimizationSparseReverseDiff.jl | 751 --------------------------- ext/OptimizationTrackerExt.jl | 72 --- ext/OptimizationZygoteExt.jl | 2 +- 12 files changed, 382 insertions(+), 3271 deletions(-) create mode 100644 ext/OptimizationDIExt.jl delete mode 100644 ext/OptimizationFiniteDiffExt.jl delete mode 100644 ext/OptimizationForwardDiffExt.jl delete mode 100644 ext/OptimizationReverseDiffExt.jl delete mode 100644 ext/OptimizationSparseDiffExt.jl delete mode 100644 ext/OptimizationSparseFiniteDiff.jl delete mode 100644 ext/OptimizationSparseForwardDiff.jl delete mode 100644 ext/OptimizationSparseReverseDiff.jl delete mode 100644 ext/OptimizationTrackerExt.jl diff --git a/Project.toml b/Project.toml index 57851d6..0dbcfb2 100644 --- a/Project.toml +++ b/Project.toml @@ -17,41 +17,26 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SymbolicAnalysis = "4297ee4d-0239-47d8-ba5d-195ecdf594fe" SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" +SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" [weakdeps] -Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" -FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" -SparseDiffTools = "47a9eef4-7e08-11e9-0b38-333d64bd3804" -Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [extensions] -OptimizationEnzymeExt = "Enzyme" -OptimizationFiniteDiffExt = "FiniteDiff" -OptimizationForwardDiffExt = "ForwardDiff" +OptimizationDIExt = "DifferentiationInterface" OptimizationMTKExt = "ModelingToolkit" -OptimizationReverseDiffExt = "ReverseDiff" -OptimizationSparseDiffExt = ["SparseDiffTools", "ReverseDiff"] -OptimizationTrackerExt = "Tracker" -OptimizationZygoteExt = "Zygote" +OptimizationZygoteExt = ["Zygote", "DifferentiationInterface"] [compat] ADTypes = "1.3" ArrayInterface = "7.6" DocStringExtensions = "0.9" -Enzyme = "0.12.12" -FiniteDiff = "2.12" -ForwardDiff = "0.10.26" LinearAlgebra = "1.9, 1.10" -Manifolds = "0.9" ModelingToolkit = "9" -PDMats = "0.11" Reexport = "1.2" Requires = "1" -ReverseDiff = "1.14" SciMLBase = "2" SparseDiffTools = "2.14" SymbolicAnalysis = "0.1, 0.2" diff --git a/ext/OptimizationDIExt.jl b/ext/OptimizationDIExt.jl new file mode 100644 index 0000000..186327d --- /dev/null +++ b/ext/OptimizationDIExt.jl @@ -0,0 +1,372 @@ +module OptimizationDIExt + +import OptimizationBase, OptimizationBase.ArrayInterface +import OptimizationBase.SciMLBase: OptimizationFunction +import OptimizationBase.LinearAlgebra: I +import DifferentiationInterface +import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, prepare_jacobian, + gradient!, hessian!, hvp!, jacobian!, gradient, hessian, hvp, jacobian +using ADTypes + +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) + _f = (θ, args...) -> first(f.f(θ, p, args...)) + soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(res, θ) + gradient!(_f, res, adtype, θ, extras_grad) + end + else + grad = (G, θ, args...) -> f.grad(G, θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better + function hess(res, θ, args...) + hessian!(_f, res, adtype, θ, extras_hess) + end + else + hess = (H, θ, args...) -> f.hess(H, θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (H, θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp!(_f, H, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (res, θ) -> f.cons(res, θ, p) + cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (J, θ) + jacobian!(cons_oop, J, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + end + else + cons_j = (J, θ) -> f.cons_j(J, θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(H, θ) + for i in 1:num_cons + hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + end + end + else + cons_h = (res, θ) -> f.cons_h(res, θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) + x = cache.u0 + p = cache.p + _f = (θ, args...) -> first(f.f(θ, p, args...)) + soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(res, θ) + gradient!(_f, res, adtype, θ, extras_grad) + end + else + grad = (G, θ, args...) -> f.grad(G, θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) + function hess(res, θ, args...) + hessian!(_f, res, soadtype, θ, extras_hess) + end + else + hess = (H, θ, args...) -> f.hess(H, θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (H, θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp!(_f, H, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (res, θ) -> f.cons(res, θ, p) + cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (J, θ) + jacobian!(cons_oop, J, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + end + else + cons_j = (J, θ) -> f.cons_j(J, θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(H, θ) + for i in 1:num_cons + hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + end + end + else + cons_h = (res, θ) -> f.cons_h(res, θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + + +function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) + _f = (θ, args...) -> first(f.f(θ, p, args...)) + soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(θ) + gradient(_f, adtype, θ, extras_grad) + end + else + grad = (θ, args...) -> f.grad(θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better + function hess(θ, args...) + hessian(_f, adtype, θ, extras_hess) + end + else + hess = (θ, args...) -> f.hess(θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp(_f, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (θ) -> f.cons(θ, p) + cons_oop = cons + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (θ) + J = jacobian(cons_oop, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + return J + end + else + cons_j = (θ) -> f.cons_j(θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(θ) + H = map(1:num_cons) do i + hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + end + return H + end + else + cons_h = (res, θ) -> f.cons_h(res, θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + +function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) + x = cache.u0 + p = cache.p + _f = (θ, args...) -> first(f.f(θ, p, args...)) + soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(θ) + gradient(_f, adtype, θ, extras_grad) + end + else + grad = (θ, args...) -> f.grad(θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better + function hess(θ, args...) + hessian(_f, soadtype, θ, extras_hess) + end + else + hess = (θ, args...) -> f.hess(θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp(_f, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (θ) -> f.cons(θ, p) + cons_oop = cons + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (θ) + J = jacobian(cons_oop, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + return J + end + else + cons_j = (θ) -> f.cons_j(θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(θ) + H = map(1:num_cons) do i + hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + end + return H + end + else + cons_h = (θ) -> f.cons_h(θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + +end \ No newline at end of file diff --git a/ext/OptimizationFiniteDiffExt.jl b/ext/OptimizationFiniteDiffExt.jl deleted file mode 100644 index 641f99c..0000000 --- a/ext/OptimizationFiniteDiffExt.jl +++ /dev/null @@ -1,470 +0,0 @@ -module OptimizationFiniteDiffExt - -import OptimizationBase, OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase: OptimizationFunction -import OptimizationBase.ADTypes: AutoFiniteDiff -using OptimizationBase.LinearAlgebra -isdefined(Base, :get_extension) ? (using FiniteDiff) : (using ..FiniteDiff) - -const FD = FiniteDiff - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, - adtype::AutoFiniteDiff, p, - num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, p, args...)) - updatecache = (cache, x) -> (cache.xmm .= x; cache.xmp .= x; cache.xpm .= x; cache.xpp .= x; return cache) - - if f.grad === nothing - gradcache = FD.GradientCache(x, x, adtype.fdtype) - grad = (res, θ, args...) -> FD.finite_difference_gradient!( - res, x -> _f(x, args...), - θ, gradcache) - else - grad = (G, θ, args...) -> f.grad(G, θ, p, args...) - end - - if f.hess === nothing - hesscache = FD.HessianCache(x, adtype.fdhtype) - hess = (res, θ, args...) -> FD.finite_difference_hessian!(res, - x -> _f(x, args...), θ, - updatecache(hesscache, θ)) - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - T = eltype(θ) - ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(θ))) - @. θ += ϵ * v - cache2 = similar(θ) - grad(cache2, θ, args...) - @. θ -= 2ϵ * v - cache3 = similar(θ) - grad(cache3, θ, args...) - @. θ += ϵ * v - @. H = (cache2 - cache3) / (2ϵ) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - end - - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? (1:length(x)) : - f.cons_jac_colorvec - - if cons !== nothing && f.cons_j === nothing - cons_j = function (J, θ) - y0 = zeros(num_cons) - jaccache = FD.JacobianCache(copy(x), copy(y0), copy(y0), adtype.fdjtype; - colorvec = cons_jac_colorvec, - sparsity = f.cons_jac_prototype) - FD.finite_difference_jacobian!(J, cons, θ, jaccache) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - if cons !== nothing && f.cons_h === nothing - hess_cons_cache = [FD.HessianCache(copy(x), adtype.fdhtype) - for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons#note: colorvecs not yet supported by FiniteDiff for Hessians - FD.finite_difference_hessian!(res[i], - (x) -> (_res = zeros(eltype(θ), num_cons); - cons(_res, x); - _res[i]), θ, - updatecache(hess_cons_cache[i], θ)) - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - lag_hess_cache = FD.HessianCache(copy(x), adtype.fdhtype) - c = zeros(num_cons) - h = zeros(length(x), length(x)) - lag_h = let c = c, h = h - lag = function (θ, σ, μ) - f.cons(c, θ, p) - l = μ'c - if !iszero(σ) - l += σ * f.f(θ, p) - end - l - end - function (res, θ, σ, μ) - FD.finite_difference_hessian!(res, - (x) -> lag(x, σ, μ), - θ, - updatecache(lag_hess_cache, θ)) - end - end - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) - end - return OptimizationFunction{true}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - cons_jac_colorvec = cons_jac_colorvec, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, - cache::OptimizationBase.ReInitCache, - adtype::AutoFiniteDiff, num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - updatecache = (cache, x) -> (cache.xmm .= x; cache.xmp .= x; cache.xpm .= x; cache.xpp .= x; return cache) - - if f.grad === nothing - gradcache = FD.GradientCache(cache.u0, cache.u0, adtype.fdtype) - grad = (res, θ, args...) -> FD.finite_difference_gradient!( - res, x -> _f(x, args...), - θ, gradcache) - else - grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...) - end - - if f.hess === nothing - hesscache = FD.HessianCache(cache.u0, adtype.fdhtype) - hess = (res, θ, args...) -> FD.finite_difference_hessian!(res, x -> _f(x, args...), - θ, - updatecache(hesscache, θ)) - else - hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - T = eltype(θ) - ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(θ))) - @. θ += ϵ * v - cache2 = similar(θ) - grad(cache2, θ, args...) - @. θ -= 2ϵ * v - cache3 = similar(θ) - grad(cache3, θ, args...) - @. θ += ϵ * v - @. H = (cache2 - cache3) / (2ϵ) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, cache.p) - end - - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? (1:length(cache.u0)) : - f.cons_jac_colorvec - - if cons !== nothing && f.cons_j === nothing - cons_j = function (J, θ) - y0 = zeros(num_cons) - jaccache = FD.JacobianCache(copy(cache.u0), copy(y0), copy(y0), - adtype.fdjtype; - colorvec = cons_jac_colorvec, - sparsity = f.cons_jac_prototype) - FD.finite_difference_jacobian!(J, cons, θ, jaccache) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, cache.p) - end - - if cons !== nothing && f.cons_h === nothing - hess_cons_cache = [FD.HessianCache(copy(cache.u0), adtype.fdhtype) - for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons#note: colorvecs not yet supported by FiniteDiff for Hessians - FD.finite_difference_hessian!(res[i], - (x) -> (_res = zeros(eltype(θ), num_cons); - cons(_res, - x); - _res[i]), - θ, updatecache(hess_cons_cache[i], θ)) - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, cache.p) - end - if f.lag_h === nothing - lag_hess_cache = FD.HessianCache(copy(cache.u0), adtype.fdhtype) - c = zeros(num_cons) - h = zeros(length(cache.u0), length(cache.u0)) - lag_h = let c = c, h = h - lag = function (θ, σ, μ) - f.cons(c, θ, cache.p) - l = μ'c - if !iszero(σ) - l += σ * f.f(θ, cache.p) - end - l - end - function (res, θ, σ, μ) - FD.finite_difference_hessian!(h, - (x) -> lag(x, σ, μ), - θ, - updatecache(lag_hess_cache, θ)) - k = 1 - for i in 1:length(cache.u0), j in i:length(cache.u0) - res[k] = h[i, j] - k += 1 - end - end - end - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p) - end - return OptimizationFunction{true}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - cons_jac_colorvec = cons_jac_colorvec, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, - adtype::AutoFiniteDiff, p, - num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, p, args...)) - updatecache = (cache, x) -> (cache.xmm .= x; cache.xmp .= x; cache.xpm .= x; cache.xpp .= x; return cache) - - if f.grad === nothing - gradcache = FD.GradientCache(x, x, adtype.fdtype) - grad = (θ, args...) -> FD.finite_difference_gradient(x -> _f(x, args...), - θ, gradcache) - else - grad = (θ, args...) -> f.grad(G, θ, p, args...) - end - - if f.hess === nothing - hesscache = FD.HessianCache(x, adtype.fdhtype) - hess = (θ, args...) -> FD.finite_difference_hessian(x -> _f(x, args...), θ, - updatecache(hesscache, θ)) - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - T = eltype(θ) - ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(θ))) - @. θ += ϵ * v - cache2 = similar(θ) - grad(cache2, θ, args...) - @. θ -= 2ϵ * v - cache3 = similar(θ) - grad(cache3, θ, args...) - @. θ += ϵ * v - return @. (cache2 - cache3) / (2ϵ) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - end - - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? (1:length(x)) : - f.cons_jac_colorvec - - if cons !== nothing && f.cons_j === nothing - cons_j = function (θ) - y0 = zeros(eltype(θ), num_cons) - jaccache = FD.JacobianCache(copy(x), copy(y0), copy(y0), adtype.fdjtype; - colorvec = cons_jac_colorvec, - sparsity = f.cons_jac_prototype) - if num_cons > 1 - return FD.finite_difference_jacobian(cons, θ, jaccache) - else - return FD.finite_difference_jacobian(cons, θ, jaccache)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - if cons !== nothing && f.cons_h === nothing - hess_cons_cache = [FD.HessianCache(copy(x), adtype.fdhtype) - for i in 1:num_cons] - cons_h = function (θ) - return map(1:num_cons) do i - FD.finite_difference_hessian(x -> cons(x)[i], θ, - updatecache(hess_cons_cache[i], θ)) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_hess_cache = FD.HessianCache(copy(x), adtype.fdhtype) - c = zeros(num_cons) - h = zeros(length(x), length(x)) - lag_h = let c = c, h = h - lag = function (θ, σ, μ) - f.cons(c, θ, p) - l = μ'c - if !iszero(σ) - l += σ * f.f(θ, p) - end - l - end - function (θ, σ, μ) - FD.finite_difference_hessian((x) -> lag(x, σ, μ), - θ, - updatecache(lag_hess_cache, θ)) - end - end - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - cons_jac_colorvec = cons_jac_colorvec, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, - cache::OptimizationBase.ReInitCache, - adtype::AutoFiniteDiff, num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - updatecache = (cache, x) -> (cache.xmm .= x; cache.xmp .= x; cache.xpm .= x; cache.xpp .= x; return cache) - p = cache.p - - if f.grad === nothing - gradcache = FD.GradientCache(x, x, adtype.fdtype) - grad = (θ, args...) -> FD.finite_difference_gradient(x -> _f(x, args...), - θ, gradcache) - else - grad = (θ, args...) -> f.grad(G, θ, p, args...) - end - - if f.hess === nothing - hesscache = FD.HessianCache(x, adtype.fdhtype) - hess = (θ, args...) -> FD.finite_difference_hessian!(x -> _f(x, args...), θ, - updatecache(hesscache, θ)) - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - T = eltype(θ) - ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(θ))) - @. θ += ϵ * v - cache2 = similar(θ) - grad(cache2, θ, args...) - @. θ -= 2ϵ * v - cache3 = similar(θ) - grad(cache3, θ, args...) - @. θ += ϵ * v - return @. (cache2 - cache3) / (2ϵ) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - end - - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? (1:length(x)) : - f.cons_jac_colorvec - - if cons !== nothing && f.cons_j === nothing - cons_j = function (θ) - y0 = zeros(num_cons) - jaccache = FD.JacobianCache(copy(x), copy(y0), copy(y0), adtype.fdjtype; - colorvec = cons_jac_colorvec, - sparsity = f.cons_jac_prototype) - if num_cons > 1 - return FD.finite_difference_jacobian(cons, θ, jaccache) - else - return FD.finite_difference_jacobian(cons, θ, jaccache)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - if cons !== nothing && f.cons_h === nothing - hess_cons_cache = [FD.HessianCache(copy(x), adtype.fdhtype) - for i in 1:num_cons] - cons_h = function (θ) - return map(1:num_cons) do i - FD.finite_difference_hessian(x -> cons(x)[i], θ, - updatecache(hess_cons_cache[i], θ)) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_hess_cache = FD.HessianCache(copy(x), adtype.fdhtype) - c = zeros(num_cons) - h = zeros(length(x), length(x)) - lag_h = let c = c, h = h - lag = function (θ, σ, μ) - f.cons(c, θ, p) - l = μ'c - if !iszero(σ) - l += σ * f.f(θ, p) - end - l - end - function (θ, σ, μ) - FD.finite_difference_hessian((x) -> lag(x, σ, μ), - θ, - updatecache(lag_hess_cache, θ)) - end - end - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - cons_jac_colorvec = cons_jac_colorvec, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -end diff --git a/ext/OptimizationForwardDiffExt.jl b/ext/OptimizationForwardDiffExt.jl deleted file mode 100644 index f2732c4..0000000 --- a/ext/OptimizationForwardDiffExt.jl +++ /dev/null @@ -1,341 +0,0 @@ -module OptimizationForwardDiffExt - -import OptimizationBase, OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase: OptimizationFunction -import OptimizationBase.ADTypes: AutoForwardDiff -isdefined(Base, :get_extension) ? (using ForwardDiff) : (using ..ForwardDiff) - -function default_chunk_size(len) - if len < ForwardDiff.DEFAULT_CHUNK_THRESHOLD - len - else - ForwardDiff.DEFAULT_CHUNK_THRESHOLD - end -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, - adtype::AutoForwardDiff{_chunksize}, p, - num_cons = 0) where {_chunksize} - chunksize = _chunksize === nothing ? default_chunk_size(length(x)) : _chunksize - - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - grad = (res, θ, args...) -> ForwardDiff.gradient!(res, x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (G, θ, args...) -> f.grad(G, θ, p, args...) - end - - if f.hess === nothing - hesscfg = ForwardDiff.HessianConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - hess = (res, θ, args...) -> ForwardDiff.hessian!(res, x -> _f(x, args...), θ, - hesscfg, Val{false}()) - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - res = ArrayInterface.zeromatrix(θ) - hess(res, θ, args...) - H .= res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - if cons !== nothing && f.cons_j === nothing - cjconfig = ForwardDiff.JacobianConfig(cons_oop, x, ForwardDiff.Chunk{chunksize}()) - cons_j = function (J, θ) - ForwardDiff.jacobian!(J, cons_oop, θ, cjconfig) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - hess_config_cache = [ForwardDiff.HessianConfig(fncs[i], x, - ForwardDiff.Chunk{chunksize}()) - for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons - ForwardDiff.hessian!(res[i], fncs[i], θ, hess_config_cache[i], Val{true}()) - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) - end - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h, f.lag_hess_prototype) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, - cache::OptimizationBase.ReInitCache, - adtype::AutoForwardDiff{_chunksize}, - num_cons = 0) where {_chunksize} - chunksize = _chunksize === nothing ? default_chunk_size(length(cache.u0)) : _chunksize - - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, cache.u0, ForwardDiff.Chunk{chunksize}()) - grad = (res, θ, args...) -> ForwardDiff.gradient!(res, x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...) - end - - if f.hess === nothing - hesscfg = ForwardDiff.HessianConfig(_f, cache.u0, ForwardDiff.Chunk{chunksize}()) - hess = (res, θ, args...) -> (ForwardDiff.hessian!(res, x -> _f(x, args...), θ, - hesscfg, Val{false}())) - else - hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - res = ArrayInterface.zeromatrix(θ) - hess(res, θ, args...) - H .= res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, cache.p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - if cons !== nothing && f.cons_j === nothing - cjconfig = ForwardDiff.JacobianConfig(cons_oop, cache.u0, - ForwardDiff.Chunk{chunksize}()) - cons_j = function (J, θ) - ForwardDiff.jacobian!(J, cons_oop, θ, cjconfig) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, cache.p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - hess_config_cache = [ForwardDiff.HessianConfig(fncs[i], cache.u0, - ForwardDiff.Chunk{chunksize}()) - for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons - ForwardDiff.hessian!(res[i], fncs[i], θ, hess_config_cache[i], Val{true}()) - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, cache.p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p) - end - - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, - adtype::AutoForwardDiff{_chunksize}, p, - num_cons = 0) where {_chunksize} - chunksize = _chunksize === nothing ? default_chunk_size(length(x)) : _chunksize - - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - grad = (θ, args...) -> ForwardDiff.gradient(x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - if f.hess === nothing - hesscfg = ForwardDiff.HessianConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - hess = (θ, args...) -> ForwardDiff.hessian(x -> _f(x, args...), θ, - hesscfg, Val{false}()) - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - res = ArrayInterface.zeromatrix(θ) - hess(res, θ, args...) - return res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - cons_oop = cons - end - - if cons !== nothing && f.cons_j === nothing - cjconfig = ForwardDiff.JacobianConfig(cons_oop, x, ForwardDiff.Chunk{chunksize}()) - cons_j = function (θ) - if num_cons > 1 - return ForwardDiff.jacobian(cons_oop, θ, cjconfig) - else - return ForwardDiff.jacobian(cons_oop, θ, cjconfig)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - hess_config_cache = [ForwardDiff.HessianConfig(fncs[i], x, - ForwardDiff.Chunk{chunksize}()) - for i in 1:num_cons] - cons_h = function (θ) - map(1:num_cons) do i - ForwardDiff.hessian(fncs[i], θ, hess_config_cache[i], Val{true}()) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h, f.lag_hess_prototype) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, - cache::OptimizationBase.ReInitCache, - adtype::AutoForwardDiff{_chunksize}, - num_cons = 0) where {_chunksize} - chunksize = _chunksize === nothing ? default_chunk_size(length(cache.u0)) : _chunksize - - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - p = cache.p - x = cache.u0 - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - grad = (θ, args...) -> ForwardDiff.gradient(x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - if f.hess === nothing - hesscfg = ForwardDiff.HessianConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - hess = (θ, args...) -> ForwardDiff.hessian(x -> _f(x, args...), θ, - hesscfg, Val{false}()) - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - res = ArrayInterface.zeromatrix(θ) - hess(res, θ, args...) - return res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - cons_oop = cons - end - - if cons !== nothing && f.cons_j === nothing - cjconfig = ForwardDiff.JacobianConfig(cons_oop, x, ForwardDiff.Chunk{chunksize}()) - cons_j = function (θ) - if num_cons > 1 - return ForwardDiff.jacobian(cons_oop, θ, cjconfig) - else - return ForwardDiff.jacobian(cons_oop, θ, cjconfig)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - hess_config_cache = [ForwardDiff.HessianConfig(fncs[i], x, - ForwardDiff.Chunk{chunksize}()) - for i in 1:num_cons] - cons_h = function (θ) - map(1:num_cons) do i - ForwardDiff.hessian(fncs[i], θ, hess_config_cache[i], Val{true}()) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -end diff --git a/ext/OptimizationMTKExt.jl b/ext/OptimizationMTKExt.jl index 07ead62..1b84f72 100644 --- a/ext/OptimizationMTKExt.jl +++ b/ext/OptimizationMTKExt.jl @@ -7,7 +7,7 @@ import OptimizationBase.ADTypes: AutoModelingToolkit, AutoSymbolics, AutoSparse isdefined(Base, :get_extension) ? (using ModelingToolkit) : (using ..ModelingToolkit) function OptimizationBase.instantiate_function( - f, x, adtype::AutoSparse{<:AutoSymbolics, S, C}, p, + f::OptimizationFunction{true}, x, adtype::AutoSparse{<:AutoSymbolics, S, C}, p, num_cons = 0) where {S, C} p = isnothing(p) ? SciMLBase.NullParameters() : p @@ -52,7 +52,7 @@ function OptimizationBase.instantiate_function( observed = f.observed) end -function OptimizationBase.instantiate_function(f, cache::OptimizationBase.ReInitCache, +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::AutoSparse{<:AutoSymbolics, S, C}, num_cons = 0) where {S, C} p = isnothing(cache.p) ? SciMLBase.NullParameters() : cache.p @@ -98,7 +98,7 @@ function OptimizationBase.instantiate_function(f, cache::OptimizationBase.ReInit observed = f.observed) end -function OptimizationBase.instantiate_function(f, x, adtype::AutoSymbolics, p, +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::AutoSymbolics, p, num_cons = 0) p = isnothing(p) ? SciMLBase.NullParameters() : p @@ -143,7 +143,7 @@ function OptimizationBase.instantiate_function(f, x, adtype::AutoSymbolics, p, observed = f.observed) end -function OptimizationBase.instantiate_function(f, cache::OptimizationBase.ReInitCache, +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::AutoSymbolics, num_cons = 0) p = isnothing(cache.p) ? SciMLBase.NullParameters() : cache.p @@ -189,4 +189,4 @@ function OptimizationBase.instantiate_function(f, cache::OptimizationBase.ReInit observed = f.observed) end -end +end \ No newline at end of file diff --git a/ext/OptimizationReverseDiffExt.jl b/ext/OptimizationReverseDiffExt.jl deleted file mode 100644 index 58f1bf3..0000000 --- a/ext/OptimizationReverseDiffExt.jl +++ /dev/null @@ -1,581 +0,0 @@ -module OptimizationReverseDiffExt - -import OptimizationBase -import OptimizationBase.SciMLBase: OptimizationFunction -import OptimizationBase.ADTypes: AutoReverseDiff -# using SparseDiffTools, Symbolics -isdefined(Base, :get_extension) ? (using ReverseDiff, ReverseDiff.ForwardDiff) : -(using ..ReverseDiff, ..ReverseDiff.ForwardDiff) - -struct OptimizationReverseDiffTag end - -function default_chunk_size(len) - if len < ForwardDiff.DEFAULT_CHUNK_THRESHOLD - len - else - ForwardDiff.DEFAULT_CHUNK_THRESHOLD - end -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, - adtype::AutoReverseDiff, - p = SciMLBase.NullParameters(), - num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - chunksize = default_chunk_size(length(x)) - - if f.grad === nothing - if adtype.compile - _tape = ReverseDiff.GradientTape(_f, x) - tape = ReverseDiff.compile(_tape) - grad = function (res, θ, args...) - ReverseDiff.gradient!(res, tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(x) - grad = (res, θ, args...) -> ReverseDiff.gradient!(res, - x -> _f(x, args...), - θ, - cfg) - end - else - grad = (G, θ, args...) -> f.grad(G, θ, p, args...) - end - - if f.hess === nothing - if adtype.compile - T = ForwardDiff.Tag(OptimizationReverseDiffTag(), eltype(x)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(x), - chunksize - }.(x, Ref(ForwardDiff.Partials((ones(eltype(x), chunksize)...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(θ) - res1 = zeros(eltype(θ), length(θ)) - ReverseDiff.gradient!(res1, htape, θ) - end - jaccfg = ForwardDiff.JacobianConfig(g, x, ForwardDiff.Chunk{chunksize}(), T) - hess = function (res, θ, args...) - ForwardDiff.jacobian!(res, g, θ, jaccfg, Val{false}()) - end - else - hess = function (res, θ, args...) - ReverseDiff.hessian!(res, x -> _f(x, args...), θ) - end - end - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, _θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - res = zeros(length(θ), length(θ)) - hess(res, θ, args...) - H .= res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - if cons !== nothing && f.cons_j === nothing - if adtype.compile - _jac_tape = ReverseDiff.JacobianTape(cons_oop, x) - jac_tape = ReverseDiff.compile(_jac_tape) - cons_j = function (J, θ) - ReverseDiff.jacobian!(J, jac_tape, θ) - end - else - cjconfig = ReverseDiff.JacobianConfig(x) - cons_j = function (J, θ) - ReverseDiff.jacobian!(J, cons_oop, θ, cjconfig) - end - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - if adtype.compile - consh_tapes = ReverseDiff.GradientTape.(fncs, Ref(xdual)) - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(θ, htape) - res1 = zeros(eltype(θ), length(θ)) - ReverseDiff.gradient!(res1, htape, θ) - end - gs = [x -> grad_cons(x, conshtapes[i]) for i in 1:num_cons] - jaccfgs = [ForwardDiff.JacobianConfig(gs[i], - x, - ForwardDiff.Chunk{chunksize}(), - T) for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons - ForwardDiff.jacobian!(res[i], gs[i], θ, jaccfgs[i], Val{false}()) - end - end - else - cons_h = function (res, θ) - for i in 1:num_cons - ReverseDiff.hessian!(res[i], fncs[i], θ) - end - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) - end - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, - cache::OptimizationBase.ReInitCache, - adtype::AutoReverseDiff, num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - chunksize = default_chunk_size(length(cache.u0)) - - if f.grad === nothing - if adtype.compile - _tape = ReverseDiff.GradientTape(_f, cache.u0) - tape = ReverseDiff.compile(_tape) - grad = function (res, θ, args...) - ReverseDiff.gradient!(res, tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(cache.u0) - grad = (res, θ, args...) -> ReverseDiff.gradient!(res, - x -> _f(x, args...), - θ, - cfg) - end - else - grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...) - end - - if f.hess === nothing - if adtype.compile - T = ForwardDiff.Tag(OptimizationReverseDiffTag(), eltype(cache.u0)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(cache.u0), - chunksize - }.(cache.u0, Ref(ForwardDiff.Partials((ones(eltype(cache.u0), chunksize)...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(θ) - res1 = zeros(eltype(θ), length(θ)) - ReverseDiff.gradient!(res1, htape, θ) - end - jaccfg = ForwardDiff.JacobianConfig(g, - cache.u0, - ForwardDiff.Chunk{chunksize}(), - T) - hess = function (res, θ, args...) - ForwardDiff.jacobian!(res, g, θ, jaccfg, Val{false}()) - end - else - hess = function (res, θ, args...) - ReverseDiff.hessian!(res, x -> _f(x, args...), θ) - end - end - else - hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - res = zeros(length(θ), length(θ)) - hess(res, θ, args...) - H .= res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, cache.p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - if cons !== nothing && f.cons_j === nothing - if adtype.compile - _jac_tape = ReverseDiff.JacobianTape(cons_oop, cache.u0) - jac_tape = ReverseDiff.compile(_jac_tape) - cons_j = function (J, θ) - ReverseDiff.jacobian!(J, jac_tape, θ) - end - else - cjconfig = ReverseDiff.JacobianConfig(cache.u0) - cons_j = function (J, θ) - ReverseDiff.jacobian!(J, cons_oop, θ, cjconfig) - end - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, cache.p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - if adtype.compile - consh_tapes = ReverseDiff.GradientTape.(fncs, Ref(xdual)) - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(θ, htape) - res1 = zeros(eltype(θ), length(θ)) - ReverseDiff.gradient!(res1, htape, θ) - end - gs = [x -> grad_cons(x, conshtapes[i]) for i in 1:num_cons] - jaccfgs = [ForwardDiff.JacobianConfig(gs[i], - cache.u0, - ForwardDiff.Chunk{chunksize}(), - T) for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons - ForwardDiff.jacobian!(res[i], gs[i], θ, jaccfgs[i], Val{false}()) - end - end - else - cons_h = function (res, θ) - for i in 1:num_cons - ReverseDiff.hessian!(res[i], fncs[i], θ) - end - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, cache.p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p) - end - - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, - adtype::AutoReverseDiff, - p = SciMLBase.NullParameters(), - num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - chunksize = default_chunk_size(length(x)) - - if f.grad === nothing - if adtype.compile - _tape = ReverseDiff.GradientTape(_f, x) - tape = ReverseDiff.compile(_tape) - grad = function (θ, args...) - ReverseDiff.gradient!(tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(x) - grad = (θ, args...) -> ReverseDiff.gradient(x -> _f(x, args...), - θ, - cfg) - end - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - if f.hess === nothing - if adtype.compile - T = ForwardDiff.Tag(OptimizationReverseDiffTag(), eltype(x)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(x), - chunksize - }.(x, Ref(ForwardDiff.Partials((ones(eltype(x), chunksize)...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(θ) - ReverseDiff.gradient!(htape, θ) - end - jaccfg = ForwardDiff.JacobianConfig(g, x, ForwardDiff.Chunk{chunksize}(), T) - hess = function (θ, args...) - ForwardDiff.jacobian(g, θ, jaccfg, Val{false}()) - end - else - hess = function (θ, args...) - ReverseDiff.hessian(x -> _f(x, args...), θ) - end - end - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, _θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - return hess(θ, args...) * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - cons_oop = cons - end - - if cons !== nothing && f.cons_j === nothing - if adtype.compile - _jac_tape = ReverseDiff.JacobianTape(cons_oop, x) - jac_tape = ReverseDiff.compile(_jac_tape) - cons_j = function (θ) - if num_cons > 1 - ReverseDiff.jacobian!(jac_tape, θ) - else - ReverseDiff.jacobian!(jac_tape, θ)[1, :] - end - end - else - cjconfig = ReverseDiff.JacobianConfig(x) - cons_j = function (θ) - if num_cons > 1 - return ReverseDiff.jacobian(cons_oop, θ, cjconfig) - else - return ReverseDiff.jacobian(cons_oop, θ, cjconfig)[1, :] - end - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - if adtype.compile - consh_tapes = ReverseDiff.GradientTape.(fncs, Ref(xdual)) - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(θ, htape) - ReverseDiff.gradient!(htape, θ) - end - gs = [x -> grad_cons(x, conshtapes[i]) for i in 1:num_cons] - jaccfgs = [ForwardDiff.JacobianConfig(gs[i], - x, - ForwardDiff.Chunk{chunksize}(), - T) for i in 1:num_cons] - cons_h = function (θ) - map(1:num_cons) do i - ForwardDiff.jacobian(gs[i], θ, jaccfgs[i], Val{false}()) - end - end - else - cons_h = function (θ) - map(1:num_cons) do i - ReverseDiff.hessian(fncs[i], θ) - end - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, - cache::OptimizationBase.ReInitCache, - adtype::AutoReverseDiff, num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - chunksize = default_chunk_size(length(cache.u0)) - p = cache.p - - if f.grad === nothing - if adtype.compile - _tape = ReverseDiff.GradientTape(_f, x) - tape = ReverseDiff.compile(_tape) - grad = function (θ, args...) - ReverseDiff.gradient!(tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(x) - grad = (θ, args...) -> ReverseDiff.gradient(x -> _f(x, args...), - θ, - cfg) - end - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - if f.hess === nothing - if adtype.compile - T = ForwardDiff.Tag(OptimizationReverseDiffTag(), eltype(x)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(x), - chunksize - }.(x, Ref(ForwardDiff.Partials((ones(eltype(x), chunksize)...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(θ) - ReverseDiff.gradient!(htape, θ) - end - jaccfg = ForwardDiff.JacobianConfig(g, x, ForwardDiff.Chunk{chunksize}(), T) - hess = function (θ, args...) - ForwardDiff.jacobian(g, θ, jaccfg, Val{false}()) - end - else - hess = function (θ, args...) - ReverseDiff.hessian(x -> _f(x, args...), θ) - end - end - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, _θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - return hess(θ, args...) * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - cons_oop = cons - end - - if cons !== nothing && f.cons_j === nothing - if adtype.compile - _jac_tape = ReverseDiff.JacobianTape(cons_oop, x) - jac_tape = ReverseDiff.compile(_jac_tape) - cons_j = function (θ) - if num_cons > 1 - ReverseDiff.jacobian!(jac_tape, θ) - else - ReverseDiff.jacobian!(jac_tape, θ)[1, :] - end - end - else - cjconfig = ReverseDiff.JacobianConfig(x) - cons_j = function (θ) - if num_cons > 1 - return ReverseDiff.jacobian(cons_oop, θ, cjconfig) - else - return ReverseDiff.jacobian(cons_oop, θ, cjconfig)[1, :] - end - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - if adtype.compile - consh_tapes = ReverseDiff.GradientTape.(fncs, Ref(xdual)) - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(θ, htape) - ReverseDiff.gradient!(htape, θ) - end - gs = [x -> grad_cons(x, conshtapes[i]) for i in 1:num_cons] - jaccfgs = [ForwardDiff.JacobianConfig(gs[i], - x, - ForwardDiff.Chunk{chunksize}(), - T) for i in 1:num_cons] - cons_h = function (θ) - map(1:num_cons) do i - ForwardDiff.jacobian(gs[i], θ, jaccfgs[i], Val{false}()) - end - end - else - cons_h = function (θ) - map(1:num_cons) do i - ReverseDiff.hessian(fncs[i], θ) - end - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = f.hess_prototype, - cons_jac_prototype = f.cons_jac_prototype, - cons_hess_prototype = f.cons_hess_prototype, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -end diff --git a/ext/OptimizationSparseDiffExt.jl b/ext/OptimizationSparseDiffExt.jl deleted file mode 100644 index cc0ffd6..0000000 --- a/ext/OptimizationSparseDiffExt.jl +++ /dev/null @@ -1,31 +0,0 @@ -module OptimizationSparseDiffExt - -import OptimizationBase, OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase: OptimizationFunction -import OptimizationBase.ADTypes: AutoSparse, AutoFiniteDiff, AutoForwardDiff, - AutoReverseDiff -using OptimizationBase.LinearAlgebra, ReverseDiff -isdefined(Base, :get_extension) ? -(using SparseDiffTools, - SparseDiffTools.ForwardDiff, SparseDiffTools.FiniteDiff, Symbolics) : -(using ..SparseDiffTools, - ..SparseDiffTools.ForwardDiff, ..SparseDiffTools.FiniteDiff, ..Symbolics) - -function default_chunk_size(len) - if len < ForwardDiff.DEFAULT_CHUNK_THRESHOLD - len - else - ForwardDiff.DEFAULT_CHUNK_THRESHOLD - end -end - -include("OptimizationSparseForwardDiff.jl") - -const FD = FiniteDiff - -include("OptimizationSparseFiniteDiff.jl") - -struct OptimizationSparseReverseTag end - -include("OptimizationSparseReverseDiff.jl") -end diff --git a/ext/OptimizationSparseFiniteDiff.jl b/ext/OptimizationSparseFiniteDiff.jl deleted file mode 100644 index 686d614..0000000 --- a/ext/OptimizationSparseFiniteDiff.jl +++ /dev/null @@ -1,541 +0,0 @@ - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, - adtype::AutoSparse{<:AutoFiniteDiff, S, C}, p, - num_cons = 0) where {S, C} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - gradcache = FD.GradientCache(x, x) - grad = (res, θ, args...) -> FD.finite_difference_gradient!( - res, x -> _f(x, args...), - θ, gradcache) - else - grad = (G, θ, args...) -> f.grad(G, θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = isnothing(f.hess_prototype) ? Symbolics.hessian_sparsity(_f, x) : - f.hess_prototype - hess_colors = matrix_colors(hess_sparsity) - hess = (res, θ, args...) -> numauto_color_hessian!(res, x -> _f(x, args...), θ, - ForwardColorHesCache(_f, x, - hess_colors, - hess_sparsity, - (res, θ) -> grad(res, - θ, - args...))) - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - num_hesvec!(H, x -> _f(x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - cons_jac_prototype = f.cons_jac_prototype === nothing ? - Symbolics.jacobian_sparsity(cons, - zeros(eltype(x), num_cons), - x) : - f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? - matrix_colors(cons_jac_prototype) : - f.cons_jac_colorvec - cons_j = function (J, θ) - y0 = zeros(num_cons) - jaccache = FD.JacobianCache(copy(x), copy(y0), copy(y0); - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype) - FD.finite_difference_jacobian!(J, cons, θ, jaccache) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - conshess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x, i) - conshess_sparsity = isnothing(f.cons_hess_prototype) ? - copy(Symbolics.hessian_sparsity(_f, x)) : - f.cons_hess_prototype[i] - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, conshess_sparsity) - return hesscache - end - - fcons = [(x) -> (_res = zeros(eltype(x), num_cons); - cons(_res, x); - _res[i]) for i in 1:num_cons] - conshess_caches = [gen_conshess_cache(fcons[i], x, i) for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons - numauto_color_hessian!(res[i], fcons[i], θ, conshess_caches[i]) - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - # lag_hess_cache = FD.HessianCache(copy(x)) - # c = zeros(num_cons) - # h = zeros(length(x), length(x)) - # lag_h = let c = c, h = h - # lag = function (θ, σ, μ) - # f.cons(c, θ, p) - # l = μ'c - # if !iszero(σ) - # l += σ * f.f(θ, p) - # end - # l - # end - # function (res, θ, σ, μ) - # FD.finite_difference_hessian!(res, - # (x) -> lag(x, σ, μ), - # θ, - # updatecache(lag_hess_cache, θ)) - # end - # end - lag_h = nothing - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) - end - return OptimizationFunction{true}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = getfield.(conshess_caches, :sparsity), - cons_hess_colorvec = getfield.(conshess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, - cache::OptimizationBase.ReInitCache, - adtype::AutoSparse{<:AutoFiniteDiff, S, C}, num_cons = 0) where {S, C} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - x = cache.u0 - p = cache.p - - if f.grad === nothing - gradcache = FD.GradientCache(cache.u0, cache.u0) - grad = (res, θ, args...) -> FD.finite_difference_gradient!( - res, x -> _f(x, args...), - θ, gradcache) - else - grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = isnothing(f.hess_prototype) ? Symbolics.hessian_sparsity(_f, x) : - f.hess_prototype - hess_colors = matrix_colors(hess_sparsity) - hess = (res, θ, args...) -> numauto_color_hessian!(res, x -> _f(x, args...), θ, - ForwardColorHesCache(_f, x, - hess_colors, - hess_sparsity, - (res, θ) -> grad(res, - θ, - args...))) - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - num_hesvec!(H, x -> _f(x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - cons_jac_prototype = f.cons_jac_prototype === nothing ? - Symbolics.jacobian_sparsity(cons, - zeros(eltype(x), num_cons), - x) : - f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? - matrix_colors(cons_jac_prototype) : - f.cons_jac_colorvec - cons_j = function (J, θ) - y0 = zeros(num_cons) - jaccache = FD.JacobianCache(copy(x), copy(y0), copy(y0); - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype) - FD.finite_difference_jacobian!(J, cons, θ, jaccache) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - conshess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x, i) - conshess_sparsity = isnothing(f.cons_hess_prototype) ? - copy(Symbolics.hessian_sparsity(_f, x)) : - f.cons_hess_prototype[i] - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, conshess_sparsity) - return hesscache - end - - fcons = [(x) -> (_res = zeros(eltype(x), num_cons); - cons(_res, x); - _res[i]) for i in 1:num_cons] - conshess_caches = [gen_conshess_cache(fcons[i], x, i) for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons - numauto_color_hessian!(res[i], fcons[i], θ, conshess_caches[i]) - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - # lag_hess_cache = FD.HessianCache(copy(cache.u0)) - # c = zeros(num_cons) - # h = zeros(length(cache.u0), length(cache.u0)) - # lag_h = let c = c, h = h - # lag = function (θ, σ, μ) - # f.cons(c, θ, cache.p) - # l = μ'c - # if !iszero(σ) - # l += σ * f.f(θ, cache.p) - # end - # l - # end - # function (res, θ, σ, μ) - # FD.finite_difference_hessian!(h, - # (x) -> lag(x, σ, μ), - # θ, - # updatecache(lag_hess_cache, θ)) - # k = 1 - # for i in 1:length(cache.u0), j in i:length(cache.u0) - # res[k] = h[i, j] - # k += 1 - # end - # end - # end - lag_h = nothing - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p) - end - return OptimizationFunction{true}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = getfield.(conshess_caches, :sparsity), - cons_hess_colorvec = getfield.(conshess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, - adtype::AutoSparse{<:AutoFiniteDiff, S, C}, p, - num_cons = 0) where {S, C} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - gradcache = FD.GradientCache(x, x) - grad = (θ, args...) -> FD.finite_difference_gradient(x -> _f(x, args...), - θ, gradcache) - else - grad = (θ, args...) -> f.grad(θ, cache.p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, x) - hess_colors = matrix_colors(tril(hess_sparsity)) - hess = (θ, args...) -> numauto_color_hessian(x -> _f(x, args...), θ, - ForwardColorHesCache(_f, θ, - hess_colors, - hess_sparsity, - (res, θ) -> (res .= grad(θ, - args...)))) - else - hess = (θ, args...) -> f.hess(θ, cache.p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - return num_hesvec(x -> _f(x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - cons_jac_prototype = f.cons_jac_prototype === nothing ? - Symbolics.jacobian_sparsity((res, x) -> (res .= cons(x)), - zeros(eltype(x), num_cons), - x) : - f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? - matrix_colors(cons_jac_prototype) : - f.cons_jac_colorvec - cons_j = function (θ) - y0 = zeros(eltype(θ), num_cons) - jaccache = FD.JacobianCache(copy(θ), copy(y0), copy(y0); - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype) - if num_cons > 1 - return FD.finite_difference_jacobian(cons, θ, jaccache) - else - return FD.finite_difference_jacobian(cons, θ, jaccache)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - conshess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x) - conshess_sparsity = copy(Symbolics.hessian_sparsity(_f, x)) - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, - conshess_sparsity) - return hesscache - end - - fcons = [(x) -> cons(x)[i] for i in 1:num_cons] - conshess_caches = [gen_conshess_cache(fcons[i], x) for i in 1:num_cons] - cons_h = function (θ) - map(1:num_cons) do i - numauto_color_hessian(fcons[i], θ, conshess_caches[i]) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - if f.lag_h === nothing - # lag_hess_cache = FD.HessianCache(copy(cache.u0)) - # c = zeros(num_cons) - # h = zeros(length(cache.u0), length(cache.u0)) - # lag_h = let c = c, h = h - # lag = function (θ, σ, μ) - # f.cons(c, θ, cache.p) - # l = μ'c - # if !iszero(σ) - # l += σ * f.f(θ, cache.p) - # end - # l - # end - # function (res, θ, σ, μ) - # FD.finite_difference_hessian!(h, - # (x) -> lag(x, σ, μ), - # θ, - # updatecache(lag_hess_cache, θ)) - # k = 1 - # for i in 1:length(cache.u0), j in i:length(cache.u0) - # res[k] = h[i, j] - # k += 1 - # end - # end - # end - lag_h = nothing - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = getfield.(conshess_caches, :sparsity), - cons_hess_colorvec = getfield.(conshess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, - cache::OptimizationBase.ReInitCache, - adtype::AutoSparse{<:AutoFiniteDiff, S, C}, num_cons = 0) where {S, C} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - gradcache = FD.GradientCache(x, x) - grad = (θ, args...) -> FD.finite_difference_gradient(x -> _f(x, args...), - θ, gradcache) - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, x) - hess_colors = matrix_colors(tril(hess_sparsity)) - hess = (θ, args...) -> numauto_color_hessian(x -> _f(x, args...), θ, - ForwardColorHesCache(_f, θ, - hess_colors, - hess_sparsity, - (res, θ) -> (res .= grad(θ, - args...)))) - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - return num_hesvec(x -> _f(x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - cons_jac_prototype = f.cons_jac_prototype === nothing ? - Symbolics.jacobian_sparsity((res, x) -> (res .= cons(x)), - zeros(eltype(x), num_cons), - x) : - f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec === nothing ? - matrix_colors(cons_jac_prototype) : - f.cons_jac_colorvec - cons_j = function (θ) - y0 = zeros(eltype(θ), num_cons) - jaccache = FD.JacobianCache(copy(θ), copy(y0), copy(y0); - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype) - return FD.finite_difference_jacobian(cons, θ, jaccache) - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - conshess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x) - conshess_sparsity = copy(Symbolics.hessian_sparsity(_f, x)) - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, - conshess_sparsity) - return hesscache - end - - fcons = [(x) -> cons(x)[i] for i in 1:num_cons] - conshess_caches = [gen_conshess_cache(fcons[i], x) for i in 1:num_cons] - cons_h = function (θ) - map(1:num_cons) do i - numauto_color_hessian(fcons[i], θ, conshess_caches[i]) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - if f.lag_h === nothing - # lag_hess_cache = FD.HessianCache(copy(x)) - # c = zeros(num_cons) - # h = zeros(length(x), length(x)) - # lag_h = let c = c, h = h - # lag = function (θ, σ, μ) - # f.cons(c, θ, p) - # l = μ'c - # if !iszero(σ) - # l += σ * f.f(θ, p) - # end - # l - # end - # function (res, θ, σ, μ) - # FD.finite_difference_hessian!(h, - # (x) -> lag(x, σ, μ), - # θ, - # updatecache(lag_hess_cache, θ)) - # k = 1 - # for i in 1:length(x), j in i:length(x) - # res[k] = h[i, j] - # k += 1 - # end - # end - # end - lag_h = nothing - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = getfield.(conshess_caches, :sparsity), - cons_hess_colorvec = getfield.(conshess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end diff --git a/ext/OptimizationSparseForwardDiff.jl b/ext/OptimizationSparseForwardDiff.jl deleted file mode 100644 index bb8de67..0000000 --- a/ext/OptimizationSparseForwardDiff.jl +++ /dev/null @@ -1,459 +0,0 @@ -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, - adtype::AutoSparse{<:AutoForwardDiff{_chunksize}}, p, - num_cons = 0) where {_chunksize} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - chunksize = _chunksize === nothing ? default_chunk_size(length(x)) : _chunksize - - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - grad = (res, θ, args...) -> ForwardDiff.gradient!(res, x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (G, θ, args...) -> f.grad(G, θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = isnothing(f.hess_prototype) ? Symbolics.hessian_sparsity(_f, x) : - f.hess_prototype - hess_colors = matrix_colors(hess_sparsity) - hess = (res, θ, args...) -> numauto_color_hessian!(res, x -> _f(x, args...), θ, - ForwardColorHesCache(_f, x, - hess_colors, - hess_sparsity, - (res, θ) -> grad(res, - θ, - args...))) - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - num_hesvecgrad!(H, (res, x) -> grad(res, x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - cons_jac_prototype = isnothing(f.cons_jac_prototype) ? - Symbolics.jacobian_sparsity(cons, zeros(eltype(x), num_cons), - x) : f.cons_jac_prototype - cons_jac_colorvec = matrix_colors(cons_jac_prototype) - jaccache = ForwardColorJacCache(cons, - x, - chunksize; - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype, - dx = zeros(eltype(x), num_cons)) - cons_j = function (J, θ) - forwarddiff_color_jacobian!(J, cons, θ, jaccache) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - cons_hess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x, i) - conshess_sparsity = isnothing(f.cons_hess_prototype) ? - copy(Symbolics.hessian_sparsity(_f, x)) : - f.cons_hess_prototype[i] - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, - conshess_sparsity) - return hesscache - end - - fcons = [(x) -> (_res = zeros(eltype(x), num_cons); - cons(_res, x); - _res[i]) for i in 1:num_cons] - cons_hess_caches = [gen_conshess_cache(fcons[i], x, i) for i in 1:num_cons] - cons_h = function (res, θ) - fetch.([Threads.@spawn numauto_color_hessian!( - res[i], fcons[i], θ, cons_hess_caches[i]) for i in 1:num_cons]) - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) - end - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_colorvec = cons_jac_colorvec, - cons_jac_prototype = cons_jac_prototype, - cons_hess_prototype = getfield.(cons_hess_caches, :sparsity), - cons_hess_colorvec = getfield.(cons_hess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, - cache::OptimizationBase.ReInitCache, - adtype::AutoSparse{<:AutoForwardDiff{_chunksize}}, - num_cons = 0) where {_chunksize} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - chunksize = _chunksize === nothing ? default_chunk_size(length(cache.u0)) : _chunksize - - x = cache.u0 - p = cache.p - - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, cache.u0, ForwardDiff.Chunk{chunksize}()) - grad = (res, θ, args...) -> ForwardDiff.gradient!(res, x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = isnothing(f.hess_prototype) ? Symbolics.hessian_sparsity(_f, x) : - f.hess_prototype - hess_colors = matrix_colors(hess_sparsity) - hess = (res, θ, args...) -> numauto_color_hessian!(res, x -> _f(x, args...), θ, - ForwardColorHesCache(_f, x, - hess_colors, - hess_sparsity, - (res, θ) -> grad(res, - θ, - args...))) - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - num_hesvecgrad!(H, (res, x) -> grad(res, x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - cons_jac_prototype = isnothing(f.cons_jac_prototype) ? - Symbolics.jacobian_sparsity(cons, zeros(eltype(x), num_cons), - x) : f.cons_jac_prototype - cons_jac_colorvec = matrix_colors(cons_jac_prototype) - jaccache = ForwardColorJacCache(cons, - x, - chunksize; - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype, - dx = zeros(eltype(x), num_cons)) - cons_j = function (J, θ) - forwarddiff_color_jacobian!(J, cons, θ, jaccache) - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - cons_hess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x, i) - conshess_sparsity = isnothing(f.cons_hess_prototype) ? - copy(Symbolics.hessian_sparsity(_f, x)) : - f.cons_hess_prototype[i] - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, - conshess_sparsity) - return hesscache - end - - fcons = [(x) -> (_res = zeros(eltype(x), num_cons); - cons(_res, x); - _res[i]) for i in 1:num_cons] - cons_hess_caches = [gen_conshess_cache(fcons[i], x, i) for i in 1:num_cons] - cons_h = function (res, θ) - fetch.([Threads.@spawn numauto_color_hessian!( - res[i], fcons[i], θ, cons_hess_caches[i]) for i in 1:num_cons]) - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p) - end - - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = getfield.(cons_hess_caches, :sparsity), - cons_hess_colorvec = getfield.(cons_hess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, - adtype::AutoSparse{<:AutoForwardDiff{_chunksize}}, p, - num_cons = 0) where {_chunksize} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - chunksize = _chunksize === nothing ? default_chunk_size(length(x)) : _chunksize - - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - grad = (θ, args...) -> ForwardDiff.gradient(x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, x) - hess_colors = matrix_colors(tril(hess_sparsity)) - hess = (θ, args...) -> numauto_color_hessian(x -> _f(x, args...), θ, - ForwardColorHesCache(_f, x, - hess_colors, - hess_sparsity, - (G, θ) -> (G .= grad(θ, - args...)))) - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - num_hesvecgrad((x) -> grad(x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - res = zeros(eltype(x), num_cons) - cons_jac_prototype = Symbolics.jacobian_sparsity((res, x) -> (res .= cons(x)), - res, - x) - cons_jac_colorvec = matrix_colors(cons_jac_prototype) - jaccache = ForwardColorJacCache(cons, - x, - chunksize; - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype) - cons_j = function (θ) - if num_cons > 1 - return forwarddiff_color_jacobian(cons, θ, jaccache) - else - return forwarddiff_color_jacobian(cons, θ, jaccache)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - cons_hess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x) - conshess_sparsity = copy(Symbolics.hessian_sparsity(_f, x)) - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, - conshess_sparsity) - return hesscache - end - - fcons = [(x) -> cons(x)[i] for i in 1:num_cons] - cons_hess_caches = gen_conshess_cache.(fcons, Ref(x)) - cons_h = function (θ) - map(1:num_cons) do i - numauto_color_hessian(fcons[i], θ, cons_hess_caches[i]) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_colorvec = cons_jac_colorvec, - cons_jac_prototype = cons_jac_prototype, - cons_hess_prototype = getfield.(cons_hess_caches, :sparsity), - cons_hess_colorvec = getfield.(cons_hess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, - cache::OptimizationBase.ReInitCache, - adtype::AutoSparse{<:AutoForwardDiff{_chunksize}}, - num_cons = 0) where {_chunksize} - if maximum(getfield.(methods(f.f), :nargs)) > 3 - error("$(string(adtype)) with SparseDiffTools does not support functions with more than 2 arguments") - end - chunksize = _chunksize === nothing ? default_chunk_size(length(cache.u0)) : _chunksize - - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - p = cache.p - - if f.grad === nothing - gradcfg = ForwardDiff.GradientConfig(_f, x, ForwardDiff.Chunk{chunksize}()) - grad = (θ, args...) -> ForwardDiff.gradient(x -> _f(x, args...), θ, - gradcfg, Val{false}()) - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, x) - hess_colors = matrix_colors(tril(hess_sparsity)) - hess = (θ, args...) -> numauto_color_hessian(x -> _f(x, args...), θ, - ForwardColorHesCache(_f, x, - hess_colors, - hess_sparsity, - (G, θ) -> (G .= grad(θ, - args...)))) - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - num_hesvecgrad((x) -> grad(res, x, args...), θ, v) - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - res = zeros(eltype(x), num_cons) - cons_jac_prototype = Symbolics.jacobian_sparsity((res, x) -> (res .= cons(x)), - res, - x) - cons_jac_colorvec = matrix_colors(cons_jac_prototype) - jaccache = ForwardColorJacCache(cons, - x, - chunksize; - colorvec = cons_jac_colorvec, - sparsity = cons_jac_prototype) - cons_j = function (θ) - if num_cons > 1 - return forwarddiff_color_jacobian(cons, θ, jaccache) - else - return forwarddiff_color_jacobian(cons, θ, jaccache)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - cons_hess_caches = [(; sparsity = f.cons_hess_prototype, colors = f.cons_hess_colorvec)] - if cons !== nothing && f.cons_h === nothing - function gen_conshess_cache(_f, x) - conshess_sparsity = copy(Symbolics.hessian_sparsity(_f, x)) - conshess_colors = matrix_colors(conshess_sparsity) - hesscache = ForwardColorHesCache(_f, x, conshess_colors, - conshess_sparsity) - return hesscache - end - - fcons = [(x) -> cons(x)[i] for i in 1:num_cons] - cons_hess_caches = gen_conshess_cache.(fcons, Ref(x)) - cons_h = function (θ) - map(1:num_cons) do i - numauto_color_hessian(fcons[i], θ, cons_hess_caches[i]) - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_colorvec = cons_jac_colorvec, - cons_jac_prototype = cons_jac_prototype, - cons_hess_prototype = getfield.(cons_hess_caches, :sparsity), - cons_hess_colorvec = getfield.(cons_hess_caches, :colors), - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end diff --git a/ext/OptimizationSparseReverseDiff.jl b/ext/OptimizationSparseReverseDiff.jl deleted file mode 100644 index ac15215..0000000 --- a/ext/OptimizationSparseReverseDiff.jl +++ /dev/null @@ -1,751 +0,0 @@ -function OptimizationBase.ADTypes.AutoSparseReverseDiff(compile::Bool) - return AutoSparse(AutoReverseDiff(; compile)) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, - adtype::AutoSparse{<:AutoReverseDiff}, - p = SciMLBase.NullParameters(), - num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - chunksize = default_chunk_size(length(x)) - - if f.grad === nothing - if adtype.dense_ad.compile - _tape = ReverseDiff.GradientTape(_f, x) - tape = ReverseDiff.compile(_tape) - grad = function (res, θ, args...) - ReverseDiff.gradient!(res, tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(x) - grad = (res, θ, args...) -> ReverseDiff.gradient!(res, - x -> _f(x, args...), - θ, - cfg) - end - else - grad = (G, θ, args...) -> f.grad(G, θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, x) - hess_colors = SparseDiffTools.matrix_colors(tril(hess_sparsity)) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(x)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(x), - min(chunksize, maximum(hess_colors)) - }.(x, - Ref(ForwardDiff.Partials((ones(eltype(x), - min(chunksize, maximum(hess_colors)))...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(res1, θ) - ReverseDiff.gradient!(res1, htape, θ) - end - jaccfg = ForwardColorJacCache(g, - x; - tag = typeof(T), - colorvec = hess_colors, - sparsity = hess_sparsity) - hess = function (res, θ, args...) - SparseDiffTools.forwarddiff_color_jacobian!(res, g, θ, jaccfg) - end - else - hess = function (res, θ, args...) - res .= SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = hess_colors, - sparsity = hess_sparsity) do θ - ReverseDiff.gradient(x -> _f(x, args...), θ) - end - end - end - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, _θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - res = zeros(length(θ), length(θ)) - hess(res, θ, args...) - H .= res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (res, θ) -> f.cons(res, θ, p) - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - jaccache = SparseDiffTools.sparse_jacobian_cache(AutoSparseForwardDiff(), - SparseDiffTools.SymbolicsSparsityDetection(), - cons_oop, - x, - fx = zeros(eltype(x), num_cons)) - # let cons = cons, θ = cache.u0, cons_jac_colorvec = cons_jac_colorvec, cons_jac_prototype = cons_jac_prototype, num_cons = num_cons - # ForwardColorJacCache(cons, θ; - # colorvec = cons_jac_colorvec, - # sparsity = cons_jac_prototype, - # dx = zeros(eltype(θ), num_cons)) - # end - cons_jac_prototype = jaccache.jac_prototype - cons_jac_colorvec = jaccache.coloring - cons_j = function (J, θ, args...; cons = cons, cache = jaccache.cache) - forwarddiff_color_jacobian!(J, cons, θ, cache) - return - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, p) - end - - conshess_sparsity = f.cons_hess_prototype - conshess_colors = f.cons_hess_colorvec - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - conshess_sparsity = Symbolics.hessian_sparsity.(fncs, Ref(x)) - conshess_colors = SparseDiffTools.matrix_colors.(conshess_sparsity) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(x)) - xduals = [ForwardDiff.Dual{ - typeof(T), - eltype(x), - min(chunksize, maximum(conshess_colors[i])) - }.(x, - Ref(ForwardDiff.Partials((ones(eltype(x), - min(chunksize, maximum(conshess_colors[i])))...,)))) - for i in 1:num_cons] - consh_tapes = [ReverseDiff.GradientTape(fncs[i], xduals[i]) for i in 1:num_cons] - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(res1, θ, htape) - ReverseDiff.gradient!(res1, htape, θ) - end - gs = [(res1, x) -> grad_cons(res1, x, conshtapes[i]) for i in 1:num_cons] - jaccfgs = [ForwardColorJacCache(gs[i], - x; - tag = typeof(T), - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) for i in 1:num_cons] - cons_h = function (res, θ, args...) - for i in 1:num_cons - SparseDiffTools.forwarddiff_color_jacobian!(res[i], - gs[i], - θ, - jaccfgs[i]) - end - end - else - cons_h = function (res, θ) - for i in 1:num_cons - res[i] .= SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) do θ - ReverseDiff.gradient(fncs[i], θ) - end - end - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) - end - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = conshess_sparsity, - cons_hess_colorvec = conshess_colors, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, - cache::OptimizationBase.ReInitCache, - adtype::AutoSparse{<:AutoReverseDiff}, num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - chunksize = default_chunk_size(length(cache.u0)) - - if f.grad === nothing - if adtype.dense_ad.compile - _tape = ReverseDiff.GradientTape(_f, cache.u0) - tape = ReverseDiff.compile(_tape) - grad = function (res, θ, args...) - ReverseDiff.gradient!(res, tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(cache.u0) - grad = (res, θ, args...) -> ReverseDiff.gradient!(res, - x -> _f(x, args...), - θ, - cfg) - end - else - grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, cache.u0) - hess_colors = SparseDiffTools.matrix_colors(tril(hess_sparsity)) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(cache.u0)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(cache.u0), - min(chunksize, maximum(hess_colors)) - }.(cache.u0, - Ref(ForwardDiff.Partials((ones(eltype(cache.u0), - min(chunksize, maximum(hess_colors)))...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(res1, θ) - ReverseDiff.gradient!(res1, htape, θ) - end - jaccfg = ForwardColorJacCache(g, - cache.u0; - tag = typeof(T), - colorvec = hess_colors, - sparsity = hess_sparsity) - hess = function (res, θ, args...) - SparseDiffTools.forwarddiff_color_jacobian!(res, g, θ, jaccfg) - end - else - hess = function (res, θ, args...) - res .= SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = hess_colors, - sparsity = hess_sparsity) do θ - ReverseDiff.gradient(x -> _f(x, args...), θ) - end - end - end - else - hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...) - end - - if f.hv === nothing - hv = function (H, θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, _θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - res = zeros(length(θ), length(θ)) - hess(res, θ, args...) - H .= res * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = function (res, θ) - f.cons(res, θ, cache.p) - return - end - cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - # cons_jac_prototype = Symbolics.jacobian_sparsity(cons, - # zeros(eltype(cache.u0), num_cons), - # cache.u0) - # cons_jac_colorvec = matrix_colors(cons_jac_prototype) - jaccache = SparseDiffTools.sparse_jacobian_cache(AutoSparseForwardDiff(), - SparseDiffTools.SymbolicsSparsityDetection(), - cons_oop, - cache.u0, - fx = zeros(eltype(cache.u0), num_cons)) - # let cons = cons, θ = cache.u0, cons_jac_colorvec = cons_jac_colorvec, cons_jac_prototype = cons_jac_prototype, num_cons = num_cons - # ForwardColorJacCache(cons, θ; - # colorvec = cons_jac_colorvec, - # sparsity = cons_jac_prototype, - # dx = zeros(eltype(θ), num_cons)) - # end - cons_jac_prototype = jaccache.jac_prototype - cons_jac_colorvec = jaccache.coloring - cons_j = function (J, θ) - forwarddiff_color_jacobian!(J, cons, θ, jaccache.cache) - return - end - else - cons_j = (J, θ) -> f.cons_j(J, θ, cache.p) - end - - conshess_sparsity = f.cons_hess_prototype - conshess_colors = f.cons_hess_colorvec - if cons !== nothing && f.cons_h === nothing - fncs = map(1:num_cons) do i - function (x) - res = zeros(eltype(x), num_cons) - f.cons(res, x, cache.p) - return res[i] - end - end - conshess_sparsity = map(1:num_cons) do i - let fnc = fncs[i], θ = cache.u0 - Symbolics.hessian_sparsity(fnc, θ) - end - end - conshess_colors = SparseDiffTools.matrix_colors.(conshess_sparsity) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(cache.u0)) - xduals = [ForwardDiff.Dual{ - typeof(T), - eltype(cache.u0), - min(chunksize, maximum(conshess_colors[i])) - }.(cache.u0, - Ref(ForwardDiff.Partials((ones(eltype(cache.u0), - min(chunksize, maximum(conshess_colors[i])))...,)))) - for i in 1:num_cons] - consh_tapes = [ReverseDiff.GradientTape(fncs[i], xduals[i]) for i in 1:num_cons] - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(res1, θ, htape) - ReverseDiff.gradient!(res1, htape, θ) - end - gs = let conshtapes = conshtapes - map(1:num_cons) do i - function (res1, x) - grad_cons(res1, x, conshtapes[i]) - end - end - end - jaccfgs = [ForwardColorJacCache(gs[i], - cache.u0; - tag = typeof(T), - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) for i in 1:num_cons] - cons_h = function (res, θ) - for i in 1:num_cons - SparseDiffTools.forwarddiff_color_jacobian!(res[i], - gs[i], - θ, - jaccfgs[i]) - end - end - else - cons_h = function (res, θ) - for i in 1:num_cons - res[i] .= SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) do θ - ReverseDiff.gradient(fncs[i], θ) - end - end - end - end - else - cons_h = (res, θ) -> f.cons_h(res, θ, cache.p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, cache.p) - end - - return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = conshess_sparsity, - cons_hess_colorvec = conshess_colors, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, - adtype::AutoSparse{<:AutoReverseDiff}, - p = SciMLBase.NullParameters(), - num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - chunksize = default_chunk_size(length(x)) - - if f.grad === nothing - if adtype.dense_ad.compile - _tape = ReverseDiff.GradientTape(_f, x) - tape = ReverseDiff.compile(_tape) - grad = function (θ, args...) - ReverseDiff.gradient!(tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(x) - grad = (θ, args...) -> ReverseDiff.gradient(x -> _f(x, args...), - θ, - cfg) - end - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, x) - hess_colors = SparseDiffTools.matrix_colors(tril(hess_sparsity)) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(x)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(x), - min(chunksize, maximum(hess_colors)) - }.(x, - Ref(ForwardDiff.Partials((ones(eltype(x), - min(chunksize, maximum(hess_colors)))...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(θ) - ReverseDiff.gradient!(htape, θ) - end - jaccfg = ForwardColorJacCache(g, - x; - tag = typeof(T), - colorvec = hess_colors, - sparsity = hess_sparsity) - hess = function (θ, args...) - return SparseDiffTools.forwarddiff_color_jacobian(g, θ, jaccfg) - end - else - hess = function (θ, args...) - return SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = hess_colors, - sparsity = hess_sparsity) do θ - ReverseDiff.gradient(x -> _f(x, args...), θ) - end - end - end - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, _θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - res = zeros(length(θ), length(θ)) - hess(θ, args...) * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - cons_oop = cons - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - jaccache = SparseDiffTools.sparse_jacobian_cache(AutoSparseForwardDiff(), - SparseDiffTools.SymbolicsSparsityDetection(), - cons_oop, - x, - fx = zeros(eltype(x), num_cons)) - # let cons = cons, θ = cache.u0, cons_jac_colorvec = cons_jac_colorvec, cons_jac_prototype = cons_jac_prototype, num_cons = num_cons - # ForwardColorJacCache(cons, θ; - # colorvec = cons_jac_colorvec, - # sparsity = cons_jac_prototype, - # dx = zeros(eltype(θ), num_cons)) - # end - cons_jac_prototype = jaccache.jac_prototype - cons_jac_colorvec = jaccache.coloring - cons_j = function (θ, args...; cons = cons, cache = jaccache.cache) - if num_cons > 1 - return forwarddiff_color_jacobian(cons, θ, cache) - else - return forwarddiff_color_jacobian(cons, θ, cache)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - conshess_sparsity = f.cons_hess_prototype - conshess_colors = f.cons_hess_colorvec - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - conshess_sparsity = Symbolics.hessian_sparsity.(fncs, Ref(x)) - conshess_colors = SparseDiffTools.matrix_colors.(conshess_sparsity) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(x)) - xduals = [ForwardDiff.Dual{ - typeof(T), - eltype(x), - min(chunksize, maximum(conshess_colors[i])) - }.(x, - Ref(ForwardDiff.Partials((ones(eltype(x), - min(chunksize, maximum(conshess_colors[i])))...,)))) - for i in 1:num_cons] - consh_tapes = [ReverseDiff.GradientTape(fncs[i], xduals[i]) for i in 1:num_cons] - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(θ, htape) - ReverseDiff.gradient!(htape, θ) - end - gs = [(x) -> grad_cons(x, conshtapes[i]) for i in 1:num_cons] - jaccfgs = [ForwardColorJacCache(gs[i], - x; - tag = typeof(T), - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) for i in 1:num_cons] - cons_h = function (θ, args...) - map(1:num_cons) do i - SparseDiffTools.forwarddiff_color_jacobian(gs[i], - θ, - jaccfgs[i]) - end - end - else - cons_h = function (θ) - map(1:num_cons) do i - SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) do θ - ReverseDiff.gradient(fncs[i], θ) - end - end - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = conshess_sparsity, - cons_hess_colorvec = conshess_colors, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, - cache::OptimizationBase.ReInitCache, - adtype::AutoSparse{<:AutoReverseDiff}, num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - chunksize = default_chunk_size(length(cache.u0)) - p = cache.p - x = cache.u0 - - if f.grad === nothing - if adtype.dense_ad.compile - _tape = ReverseDiff.GradientTape(_f, x) - tape = ReverseDiff.compile(_tape) - grad = function (θ, args...) - ReverseDiff.gradient!(tape, θ) - end - else - cfg = ReverseDiff.GradientConfig(x) - grad = (θ, args...) -> ReverseDiff.gradient(x -> _f(x, args...), - θ, - cfg) - end - else - grad = (θ, args...) -> f.grad(θ, p, args...) - end - - hess_sparsity = f.hess_prototype - hess_colors = f.hess_colorvec - if f.hess === nothing - hess_sparsity = Symbolics.hessian_sparsity(_f, x) - hess_colors = SparseDiffTools.matrix_colors(tril(hess_sparsity)) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(x)) - xdual = ForwardDiff.Dual{ - typeof(T), - eltype(x), - min(chunksize, maximum(hess_colors)) - }.(x, - Ref(ForwardDiff.Partials((ones(eltype(x), - min(chunksize, maximum(hess_colors)))...,)))) - h_tape = ReverseDiff.GradientTape(_f, xdual) - htape = ReverseDiff.compile(h_tape) - function g(θ) - ReverseDiff.gradient!(htape, θ) - end - jaccfg = ForwardColorJacCache(g, - x; - tag = typeof(T), - colorvec = hess_colors, - sparsity = hess_sparsity) - hess = function (θ, args...) - return SparseDiffTools.forwarddiff_color_jacobian(g, θ, jaccfg) - end - else - hess = function (θ, args...) - return SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = hess_colors, - sparsity = hess_sparsity) do θ - ReverseDiff.gradient(x -> _f(x, args...), θ) - end - end - end - else - hess = (θ, args...) -> f.hess(θ, p, args...) - end - - if f.hv === nothing - hv = function (θ, v, args...) - # _θ = ForwardDiff.Dual.(θ, v) - # res = similar(_θ) - # grad(res, _θ, args...) - # H .= getindex.(ForwardDiff.partials.(res), 1) - res = zeros(length(θ), length(θ)) - hess(θ, args...) * v - end - else - hv = f.hv - end - - if f.cons === nothing - cons = nothing - else - cons = (θ) -> f.cons(θ, p) - cons_oop = cons - end - - cons_jac_prototype = f.cons_jac_prototype - cons_jac_colorvec = f.cons_jac_colorvec - if cons !== nothing && f.cons_j === nothing - jaccache = SparseDiffTools.sparse_jacobian_cache(AutoSparseForwardDiff(), - SparseDiffTools.SymbolicsSparsityDetection(), - cons_oop, - x, - fx = zeros(eltype(x), num_cons)) - # let cons = cons, θ = cache.u0, cons_jac_colorvec = cons_jac_colorvec, cons_jac_prototype = cons_jac_prototype, num_cons = num_cons - # ForwardColorJacCache(cons, θ; - # colorvec = cons_jac_colorvec, - # sparsity = cons_jac_prototype, - # dx = zeros(eltype(θ), num_cons)) - # end - cons_jac_prototype = jaccache.jac_prototype - cons_jac_colorvec = jaccache.coloring - cons_j = function (θ, args...; cons = cons, cache = jaccache.cache) - if num_cons > 1 - return forwarddiff_color_jacobian(cons, θ, cache) - else - return forwarddiff_color_jacobian(cons, θ, cache)[1, :] - end - end - else - cons_j = (θ) -> f.cons_j(θ, p) - end - - conshess_sparsity = f.cons_hess_prototype - conshess_colors = f.cons_hess_colorvec - if cons !== nothing && f.cons_h === nothing - fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] - conshess_sparsity = Symbolics.hessian_sparsity.(fncs, Ref(x)) - conshess_colors = SparseDiffTools.matrix_colors.(conshess_sparsity) - if adtype.dense_ad.compile - T = ForwardDiff.Tag(OptimizationSparseReverseTag(), eltype(x)) - xduals = [ForwardDiff.Dual{ - typeof(T), - eltype(x), - min(chunksize, maximum(conshess_colors[i])) - }.(x, - Ref(ForwardDiff.Partials((ones(eltype(x), - min(chunksize, maximum(conshess_colors[i])))...,)))) - for i in 1:num_cons] - consh_tapes = [ReverseDiff.GradientTape(fncs[i], xduals[i]) for i in 1:num_cons] - conshtapes = ReverseDiff.compile.(consh_tapes) - function grad_cons(θ, htape) - ReverseDiff.gradient!(htape, θ) - end - gs = [(x) -> grad_cons(x, conshtapes[i]) for i in 1:num_cons] - jaccfgs = [ForwardColorJacCache(gs[i], - x; - tag = typeof(T), - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) for i in 1:num_cons] - cons_h = function (θ, args...) - map(1:num_cons) do i - SparseDiffTools.forwarddiff_color_jacobian(gs[i], - θ, - jaccfgs[i]) - end - end - else - cons_h = function (θ) - map(1:num_cons) do i - SparseDiffTools.forwarddiff_color_jacobian(θ, - colorvec = conshess_colors[i], - sparsity = conshess_sparsity[i]) do θ - ReverseDiff.gradient(fncs[i], θ) - end - end - end - end - else - cons_h = (θ) -> f.cons_h(θ, p) - end - - if f.lag_h === nothing - lag_h = nothing # Consider implementing this - else - lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) - end - return OptimizationFunction{false}(f.f, adtype; grad = grad, hess = hess, hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_sparsity, - hess_colorvec = hess_colors, - cons_jac_prototype = cons_jac_prototype, - cons_jac_colorvec = cons_jac_colorvec, - cons_hess_prototype = conshess_sparsity, - cons_hess_colorvec = conshess_colors, - lag_h = lag_h, - lag_hess_prototype = f.lag_hess_prototype, - sys = f.sys, - expr = f.expr, - cons_expr = f.cons_expr) -end diff --git a/ext/OptimizationTrackerExt.jl b/ext/OptimizationTrackerExt.jl deleted file mode 100644 index f315d54..0000000 --- a/ext/OptimizationTrackerExt.jl +++ /dev/null @@ -1,72 +0,0 @@ -module OptimizationTrackerExt - -import OptimizationBase -import OptimizationBase.SciMLBase: OptimizationFunction -import OptimizationBase.ADTypes: AutoTracker -isdefined(Base, :get_extension) ? (using Tracker) : (using ..Tracker) - -function OptimizationBase.instantiate_function(f, x, adtype::AutoTracker, p, - num_cons = 0) - num_cons != 0 && error("AutoTracker does not currently support constraints") - _f = (θ, args...) -> first(f.f(θ, p, args...)) - - if f.grad === nothing - grad = (res, θ, args...) -> res .= Tracker.data(Tracker.gradient( - x -> _f(x, args...), - θ)[1]) - else - grad = (G, θ, args...) -> f.grad(G, θ, p, args...) - end - - if f.hess === nothing - hess = (res, θ, args...) -> error("Hessian based methods not supported with Tracker backend, pass in the `hess` kwarg") - else - hess = (H, θ, args...) -> f.hess(H, θ, p, args...) - end - - if f.hv === nothing - hv = (res, θ, args...) -> error("Hessian based methods not supported with Tracker backend, pass in the `hess` and `hv` kwargs") - else - hv = f.hv - end - - return OptimizationFunction{false}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = nothing, cons_j = nothing, cons_h = nothing, - hess_prototype = f.hess_prototype, - cons_jac_prototype = nothing, - cons_hess_prototype = nothing) -end - -function OptimizationBase.instantiate_function(f, cache::OptimizationBase.ReInitCache, - adtype::AutoTracker, num_cons = 0) - num_cons != 0 && error("AutoTracker does not currently support constraints") - _f = (θ, args...) -> first(f.f(θ, cache.p, args...)) - - if f.grad === nothing - grad = (res, θ, args...) -> res .= Tracker.data(Tracker.gradient( - x -> _f(x, args...), - θ)[1]) - else - grad = (G, θ, args...) -> f.grad(G, θ, cache.p, args...) - end - - if f.hess === nothing - hess = (res, θ, args...) -> error("Hessian based methods not supported with Tracker backend, pass in the `hess` kwarg") - else - hess = (H, θ, args...) -> f.hess(H, θ, cache.p, args...) - end - - if f.hv === nothing - hv = (res, θ, args...) -> error("Hessian based methods not supported with Tracker backend, pass in the `hess` and `hv` kwargs") - else - hv = f.hv - end - - return OptimizationFunction{false}(f, adtype; grad = grad, hess = hess, hv = hv, - cons = nothing, cons_j = nothing, cons_h = nothing, - hess_prototype = f.hess_prototype, - cons_jac_prototype = nothing, - cons_hess_prototype = nothing) -end - -end diff --git a/ext/OptimizationZygoteExt.jl b/ext/OptimizationZygoteExt.jl index 867472a..fe348ca 100644 --- a/ext/OptimizationZygoteExt.jl +++ b/ext/OptimizationZygoteExt.jl @@ -342,4 +342,4 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cons_expr = f.cons_expr) end -end +end \ No newline at end of file From e771c6c27808eb9f02aa8cb0520058a57de503bf Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 31 May 2024 16:30:15 -0400 Subject: [PATCH 02/14] DI to latest --- Project.toml | 1 + test/Project.toml | 2 ++ test/adtests.jl | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 0dbcfb2..5aec7a9 100644 --- a/Project.toml +++ b/Project.toml @@ -32,6 +32,7 @@ OptimizationZygoteExt = ["Zygote", "DifferentiationInterface"] [compat] ADTypes = "1.3" ArrayInterface = "7.6" +DifferentiationInterface = "0.5.2" DocStringExtensions = "0.9" LinearAlgebra = "1.9, 1.10" ModelingToolkit = "9" diff --git a/test/Project.toml b/test/Project.toml index 8cc5a79..6ad1f79 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -3,6 +3,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" +DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" DiffEqFlux = "aae7a2af-3d4f-5e19-a356-7da93b79d9d0" Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" @@ -30,6 +31,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [compat] Aqua = "0.8" ComponentArrays = ">= 0.13.9" +DifferentiationInterface = "0.5.2" DiffEqFlux = ">= 2" Flux = "0.13, 0.14" IterTools = ">= 1.3.0" diff --git a/test/adtests.jl b/test/adtests.jl index 2558eb6..4dc2e00 100644 --- a/test/adtests.jl +++ b/test/adtests.jl @@ -1,4 +1,4 @@ -using OptimizationBase, Test, SparseArrays, Symbolics +using OptimizationBase, Test, DifferentiationInterface,SparseArrays, Symbolics using ForwardDiff, Zygote, ReverseDiff, FiniteDiff, Tracker using ModelingToolkit, Enzyme, Random From 3b41754d04f85fc266c2c59539afc995c357a7fe Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Thu, 6 Jun 2024 19:55:29 -0400 Subject: [PATCH 03/14] Flesh out latest ideas --- Project.toml | 6 +- ext/OptimizationDIExt.jl | 8 +- ext/OptimizationDISparseExt.jl | 380 +++++++++++++++++++++++++++++++++ ext/OptimizationMTKExt.jl | 2 +- ext/OptimizationZygoteExt.jl | 3 +- 5 files changed, 394 insertions(+), 5 deletions(-) create mode 100644 ext/OptimizationDISparseExt.jl diff --git a/Project.toml b/Project.toml index 5aec7a9..fc524c2 100644 --- a/Project.toml +++ b/Project.toml @@ -18,14 +18,18 @@ SymbolicAnalysis = "4297ee4d-0239-47d8-ba5d-195ecdf594fe" SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" [weakdeps] DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" +Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" +ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [extensions] -OptimizationDIExt = "DifferentiationInterface" +OptimizationDIExt = ["DifferentiationInterface", "ForwardDiff", "ReverseDiff"] OptimizationMTKExt = "ModelingToolkit" OptimizationZygoteExt = ["Zygote", "DifferentiationInterface"] diff --git a/ext/OptimizationDIExt.jl b/ext/OptimizationDIExt.jl index 186327d..a8c1c42 100644 --- a/ext/OptimizationDIExt.jl +++ b/ext/OptimizationDIExt.jl @@ -7,10 +7,16 @@ import DifferentiationInterface import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, prepare_jacobian, gradient!, hessian!, hvp!, jacobian!, gradient, hessian, hvp, jacobian using ADTypes +import ForwardDiff, ReverseDiff function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) - soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if adtype isa ADTypes.ForwardMode + soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) + elseif adtype isa ADTypes.ReverseMode + soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + end if f.grad === nothing extras_grad = prepare_gradient(_f, adtype, x) diff --git a/ext/OptimizationDISparseExt.jl b/ext/OptimizationDISparseExt.jl new file mode 100644 index 0000000..6cff16b --- /dev/null +++ b/ext/OptimizationDISparseExt.jl @@ -0,0 +1,380 @@ +module OptimizationDIExt + +import OptimizationBase, OptimizationBase.ArrayInterface +import OptimizationBase.SciMLBase: OptimizationFunction +import OptimizationBase.LinearAlgebra: I +import DifferentiationInterface +import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, prepare_jacobian, + gradient!, hessian!, hvp!, jacobian!, gradient, hessian, hvp, jacobian +using ADTypes +using SparseConnectivityTracer, SparseMatrixColorings + +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AutoSparse, p = SciMLBase.NullParameters(), num_cons = 0) + _f = (θ, args...) -> first(f.f(θ, p, args...)) + + if adtype.sparsity_detector isa ADTypes.NoSparsityDetector && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) + elseif adtype.sparsity_detector isa ADTypes.NoSparsityDetector && !(adtype.coloring_algorithm isa AbstractADTypes.NoColoringAlgorithm) + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype.sparsity_detector isa ADTypes.NoSparsityDetector) && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) + end + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(res, θ) + gradient!(_f, res, adtype, θ, extras_grad) + end + else + grad = (G, θ, args...) -> f.grad(G, θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better + function hess(res, θ, args...) + hessian!(_f, res, adtype, θ, extras_hess) + end + else + hess = (H, θ, args...) -> f.hess(H, θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (H, θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp!(_f, H, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (res, θ) -> f.cons(res, θ, p) + cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (J, θ) + jacobian!(cons_oop, J, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + end + else + cons_j = (J, θ) -> f.cons_j(J, θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(H, θ) + for i in 1:num_cons + hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + end + end + else + cons_h = (res, θ) -> f.cons_h(res, θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) + x = cache.u0 + p = cache.p + _f = (θ, args...) -> first(f.f(θ, p, args...)) + soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(res, θ) + gradient!(_f, res, adtype, θ, extras_grad) + end + else + grad = (G, θ, args...) -> f.grad(G, θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) + function hess(res, θ, args...) + hessian!(_f, res, soadtype, θ, extras_hess) + end + else + hess = (H, θ, args...) -> f.hess(H, θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (H, θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp!(_f, H, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (res, θ) -> f.cons(res, θ, p) + cons_oop = (x) -> (_res = zeros(eltype(x), num_cons); cons(_res, x); _res) + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (J, θ) + jacobian!(cons_oop, J, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + end + else + cons_j = (J, θ) -> f.cons_j(J, θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(H, θ) + for i in 1:num_cons + hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + end + end + else + cons_h = (res, θ) -> f.cons_h(res, θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (res, θ, σ, μ) -> f.lag_h(res, θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + + +function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) + _f = (θ, args...) -> first(f.f(θ, p, args...)) + soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(θ) + gradient(_f, adtype, θ, extras_grad) + end + else + grad = (θ, args...) -> f.grad(θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better + function hess(θ, args...) + hessian(_f, adtype, θ, extras_hess) + end + else + hess = (θ, args...) -> f.hess(θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp(_f, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (θ) -> f.cons(θ, p) + cons_oop = cons + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (θ) + J = jacobian(cons_oop, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + return J + end + else + cons_j = (θ) -> f.cons_j(θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(θ) + H = map(1:num_cons) do i + hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + end + return H + end + else + cons_h = (res, θ) -> f.cons_h(res, θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + +function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) + x = cache.u0 + p = cache.p + _f = (θ, args...) -> first(f.f(θ, p, args...)) + soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if f.grad === nothing + extras_grad = prepare_gradient(_f, adtype, x) + function grad(θ) + gradient(_f, adtype, θ, extras_grad) + end + else + grad = (θ, args...) -> f.grad(θ, p, args...) + end + + hess_sparsity = f.hess_prototype + hess_colors = f.hess_colorvec + if f.hess === nothing + extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better + function hess(θ, args...) + hessian(_f, soadtype, θ, extras_hess) + end + else + hess = (θ, args...) -> f.hess(θ, p, args...) + end + + if f.hv === nothing + extras_hvp = nothing + hv = function (θ, v, args...) + if extras_hvp === nothing + global extras_hvp = prepare_hvp(_f, soadtype, x, v) + end + hvp(_f, soadtype, θ, v, extras_hvp) + end + else + hv = f.hv + end + + if f.cons === nothing + cons = nothing + else + cons = (θ) -> f.cons(θ, p) + cons_oop = cons + end + + cons_jac_prototype = f.cons_jac_prototype + cons_jac_colorvec = f.cons_jac_colorvec + if cons !== nothing && f.cons_j === nothing + extras_jac = prepare_jacobian(cons_oop, adtype, x) + cons_j = function (θ) + J = jacobian(cons_oop, adtype, θ, extras_jac) + if size(J, 1) == 1 + J = vec(J) + end + return J + end + else + cons_j = (θ) -> f.cons_j(θ, p) + end + + conshess_sparsity = f.cons_hess_prototype + conshess_colors = f.cons_hess_colorvec + if cons !== nothing && f.cons_h === nothing + fncs = [(x) -> cons_oop(x)[i] for i in 1:num_cons] + extras_cons_hess = prepare_hessian.(fncs, Ref(soadtype), Ref(x)) + + function cons_h(θ) + H = map(1:num_cons) do i + hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + end + return H + end + else + cons_h = (θ) -> f.cons_h(θ, p) + end + + if f.lag_h === nothing + lag_h = nothing # Consider implementing this + else + lag_h = (θ, σ, μ) -> f.lag_h(θ, σ, μ, p) + end + return OptimizationFunction{true}(f.f, adtype; grad = grad, hess = hess, hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_sparsity, + hess_colorvec = hess_colors, + cons_jac_prototype = cons_jac_prototype, + cons_jac_colorvec = cons_jac_colorvec, + cons_hess_prototype = conshess_sparsity, + cons_hess_colorvec = conshess_colors, + lag_h, f.lag_hess_prototype) +end + +end \ No newline at end of file diff --git a/ext/OptimizationMTKExt.jl b/ext/OptimizationMTKExt.jl index 1b84f72..716fd87 100644 --- a/ext/OptimizationMTKExt.jl +++ b/ext/OptimizationMTKExt.jl @@ -4,7 +4,7 @@ import OptimizationBase, OptimizationBase.ArrayInterface import OptimizationBase.SciMLBase import OptimizationBase.SciMLBase: OptimizationFunction import OptimizationBase.ADTypes: AutoModelingToolkit, AutoSymbolics, AutoSparse -isdefined(Base, :get_extension) ? (using ModelingToolkit) : (using ..ModelingToolkit) +using ModelingToolkit function OptimizationBase.instantiate_function( f::OptimizationFunction{true}, x, adtype::AutoSparse{<:AutoSymbolics, S, C}, p, diff --git a/ext/OptimizationZygoteExt.jl b/ext/OptimizationZygoteExt.jl index fe348ca..b72bc71 100644 --- a/ext/OptimizationZygoteExt.jl +++ b/ext/OptimizationZygoteExt.jl @@ -3,8 +3,7 @@ module OptimizationZygoteExt import OptimizationBase import OptimizationBase.SciMLBase: OptimizationFunction import OptimizationBase.ADTypes: AutoZygote -isdefined(Base, :get_extension) ? (using Zygote, Zygote.ForwardDiff) : -(using ..Zygote, ..Zygote.ForwardDiff) +using Zygote, Zygote.ForwardDiff function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::AutoZygote, p, From 3951c88aa264f8277acef9a3b6681b85e574c938 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Thu, 6 Jun 2024 22:41:09 -0400 Subject: [PATCH 04/14] EnzymeExt and fix mode checking --- Project.toml | 1 + ext/OptimizationDIExt.jl | 6 +++--- test/adtests.jl | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Project.toml b/Project.toml index fc524c2..7f3a805 100644 --- a/Project.toml +++ b/Project.toml @@ -30,6 +30,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [extensions] OptimizationDIExt = ["DifferentiationInterface", "ForwardDiff", "ReverseDiff"] +OptimizationEnzymeExt = "Enzyme" OptimizationMTKExt = "ModelingToolkit" OptimizationZygoteExt = ["Zygote", "DifferentiationInterface"] diff --git a/ext/OptimizationDIExt.jl b/ext/OptimizationDIExt.jl index a8c1c42..51d42a3 100644 --- a/ext/OptimizationDIExt.jl +++ b/ext/OptimizationDIExt.jl @@ -12,9 +12,9 @@ import ForwardDiff, ReverseDiff function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) - if adtype isa ADTypes.ForwardMode + if ADTypes.mode(adtype) isa ADTypes.ForwardMode soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) - elseif adtype isa ADTypes.ReverseMode + elseif ADTypes.mode(adtype) isa ADTypes.ReverseMode soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) end @@ -30,7 +30,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, hess_sparsity = f.hess_prototype hess_colors = f.hess_colorvec if f.hess === nothing - extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better + extras_hess = prepare_hessian(_f, soadtype, x) function hess(res, θ, args...) hessian!(_f, res, adtype, θ, extras_hess) end diff --git a/test/adtests.jl b/test/adtests.jl index 4dc2e00..cb2f67b 100644 --- a/test/adtests.jl +++ b/test/adtests.jl @@ -1,4 +1,4 @@ -using OptimizationBase, Test, DifferentiationInterface,SparseArrays, Symbolics +using OptimizationBase, Test, DifferentiationInterface, SparseArrays, Symbolics using ForwardDiff, Zygote, ReverseDiff, FiniteDiff, Tracker using ModelingToolkit, Enzyme, Random From ad7ca08f8f8de5cc1363b97ce1602368f5ef4920 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Mon, 8 Jul 2024 22:31:34 -0400 Subject: [PATCH 05/14] Use the changes from main in enzyme --- ext/OptimizationEnzymeExt.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/OptimizationEnzymeExt.jl b/ext/OptimizationEnzymeExt.jl index 54e1140..c0f6e1a 100644 --- a/ext/OptimizationEnzymeExt.jl +++ b/ext/OptimizationEnzymeExt.jl @@ -535,4 +535,4 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cons_hess_prototype = f.cons_hess_prototype) end -end +end \ No newline at end of file From 4fe1f54f313c587d71114621fb68e13307ae4d15 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Mon, 15 Jul 2024 15:42:19 -0400 Subject: [PATCH 06/14] Flesh out sparsity and secondorder things more, change the extension structure --- Project.toml | 11 ++-- ext/OptimizationFiniteDiffExt.jl | 5 ++ ext/OptimizationForwardDiffExt.jl | 5 ++ ext/OptimizationMTKExt.jl | 6 +- ext/OptimizationReverseDiffExt.jl | 5 ++ src/OptimizationBase.jl | 2 + {ext => src}/OptimizationDIExt.jl | 49 ++++++++++------- {ext => src}/OptimizationDISparseExt.jl | 73 +++++++++++++++++-------- src/function.jl | 59 +------------------- test/adtests.jl | 10 ++-- 10 files changed, 112 insertions(+), 113 deletions(-) create mode 100644 ext/OptimizationFiniteDiffExt.jl create mode 100644 ext/OptimizationForwardDiffExt.jl create mode 100644 ext/OptimizationReverseDiffExt.jl rename {ext => src}/OptimizationDIExt.jl (86%) rename {ext => src}/OptimizationDISparseExt.jl (76%) diff --git a/Project.toml b/Project.toml index 7f3a805..97bec92 100644 --- a/Project.toml +++ b/Project.toml @@ -7,6 +7,7 @@ version = "1.3.3" ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" ArrayInterface = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Manifolds = "1cead3c2-87b3-11e9-0ccd-23c62b72b94e" PDMats = "90014a1f-27ba-587c-ab20-58faa44d9150" @@ -21,18 +22,20 @@ SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" [weakdeps] -DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [extensions] -OptimizationDIExt = ["DifferentiationInterface", "ForwardDiff", "ReverseDiff"] +OptimizationForwardDiffExt = "ForwardDiff" +OptimizationFiniteDiffExt = "FiniteDiff" +OptimizationReverseDiffExt = "ReverseDiff" OptimizationEnzymeExt = "Enzyme" OptimizationMTKExt = "ModelingToolkit" -OptimizationZygoteExt = ["Zygote", "DifferentiationInterface"] +OptimizationZygoteExt = "Zygote" [compat] ADTypes = "1.3" @@ -44,11 +47,9 @@ ModelingToolkit = "9" Reexport = "1.2" Requires = "1" SciMLBase = "2" -SparseDiffTools = "2.14" SymbolicAnalysis = "0.1, 0.2" SymbolicIndexingInterface = "0.3" Symbolics = "5.12" -Tracker = "0.2.29" Zygote = "0.6.67" julia = "1.10" diff --git a/ext/OptimizationFiniteDiffExt.jl b/ext/OptimizationFiniteDiffExt.jl new file mode 100644 index 0000000..b0a95a6 --- /dev/null +++ b/ext/OptimizationFiniteDiffExt.jl @@ -0,0 +1,5 @@ +module OptimizationFiniteDiffExt + +using DifferentiationInterface, FiniteDiff + +end \ No newline at end of file diff --git a/ext/OptimizationForwardDiffExt.jl b/ext/OptimizationForwardDiffExt.jl new file mode 100644 index 0000000..0da99fb --- /dev/null +++ b/ext/OptimizationForwardDiffExt.jl @@ -0,0 +1,5 @@ +module OptimizationForwardDiffExt + +using DifferentiationInterface, ForwardDiff + +end \ No newline at end of file diff --git a/ext/OptimizationMTKExt.jl b/ext/OptimizationMTKExt.jl index 716fd87..08ebef9 100644 --- a/ext/OptimizationMTKExt.jl +++ b/ext/OptimizationMTKExt.jl @@ -7,8 +7,8 @@ import OptimizationBase.ADTypes: AutoModelingToolkit, AutoSymbolics, AutoSparse using ModelingToolkit function OptimizationBase.instantiate_function( - f::OptimizationFunction{true}, x, adtype::AutoSparse{<:AutoSymbolics, S, C}, p, - num_cons = 0) where {S, C} + f::OptimizationFunction{true}, x, adtype::AutoSparse{<:AutoSymbolics}, p, + num_cons = 0) p = isnothing(p) ? SciMLBase.NullParameters() : p sys = complete(ModelingToolkit.modelingtoolkitize(OptimizationProblem(f, x, p; @@ -53,7 +53,7 @@ function OptimizationBase.instantiate_function( end function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, - adtype::AutoSparse{<:AutoSymbolics, S, C}, num_cons = 0) where {S, C} + adtype::AutoSparse{<:AutoSymbolics}, num_cons = 0) p = isnothing(cache.p) ? SciMLBase.NullParameters() : cache.p sys = complete(ModelingToolkit.modelingtoolkitize(OptimizationProblem(f, cache.u0, diff --git a/ext/OptimizationReverseDiffExt.jl b/ext/OptimizationReverseDiffExt.jl new file mode 100644 index 0000000..23ec82d --- /dev/null +++ b/ext/OptimizationReverseDiffExt.jl @@ -0,0 +1,5 @@ +module OptimizationReverseDiffExt + +using DifferentiationInterface, ReverseDiff + +end \ No newline at end of file diff --git a/src/OptimizationBase.jl b/src/OptimizationBase.jl index 33f738d..ee9486a 100644 --- a/src/OptimizationBase.jl +++ b/src/OptimizationBase.jl @@ -32,6 +32,8 @@ Base.length(::NullData) = 0 include("adtypes.jl") include("cache.jl") include("function.jl") +include("OptimizationDIExt.jl") +include("OptimizationDISparseExt.jl") export solve, OptimizationCache, DEFAULT_CALLBACK, DEFAULT_DATA diff --git a/ext/OptimizationDIExt.jl b/src/OptimizationDIExt.jl similarity index 86% rename from ext/OptimizationDIExt.jl rename to src/OptimizationDIExt.jl index 51d42a3..66bd88d 100644 --- a/ext/OptimizationDIExt.jl +++ b/src/OptimizationDIExt.jl @@ -1,20 +1,18 @@ -module OptimizationDIExt - -import OptimizationBase, OptimizationBase.ArrayInterface +using OptimizationBase +import OptimizationBase.ArrayInterface import OptimizationBase.SciMLBase: OptimizationFunction import OptimizationBase.LinearAlgebra: I import DifferentiationInterface import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, prepare_jacobian, gradient!, hessian!, hvp!, jacobian!, gradient, hessian, hvp, jacobian -using ADTypes -import ForwardDiff, ReverseDiff +using ADTypes, SciMLBase function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) - if ADTypes.mode(adtype) isa ADTypes.ForwardMode - soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) - elseif ADTypes.mode(adtype) isa ADTypes.ReverseMode + if !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ForwardMode + soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) end @@ -32,7 +30,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, if f.hess === nothing extras_hess = prepare_hessian(_f, soadtype, x) function hess(res, θ, args...) - hessian!(_f, res, adtype, θ, extras_hess) + hessian!(_f, res, soadtype, θ, extras_hess) end else hess = (H, θ, args...) -> f.hess(H, θ, p, args...) @@ -79,7 +77,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, function cons_h(H, θ) for i in 1:num_cons - hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + hessian!(fncs[i], H[i], soadtype, θ, extras_cons_hess[i]) end end else @@ -106,7 +104,12 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) - soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ForwardMode + soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode + soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + end if f.grad === nothing extras_grad = prepare_gradient(_f, adtype, x) @@ -169,7 +172,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca function cons_h(H, θ) for i in 1:num_cons - hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + hessian!(fncs[i], H[i], soadtype, θ, extras_cons_hess[i]) end end else @@ -195,7 +198,12 @@ end function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) - soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ForwardMode + soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode + soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + end if f.grad === nothing extras_grad = prepare_gradient(_f, adtype, x) @@ -211,7 +219,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x if f.hess === nothing extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better function hess(θ, args...) - hessian(_f, adtype, θ, extras_hess) + hessian(_f, soadtype, θ, extras_hess) end else hess = (θ, args...) -> f.hess(θ, p, args...) @@ -259,7 +267,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x function cons_h(θ) H = map(1:num_cons) do i - hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + hessian(fncs[i], soadtype, θ, extras_cons_hess[i]) end return H end @@ -287,7 +295,12 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, c x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) - soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + if !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ForwardMode + soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode + soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + end if f.grad === nothing extras_grad = prepare_gradient(_f, adtype, x) @@ -351,7 +364,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, c function cons_h(θ) H = map(1:num_cons) do i - hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + hessian(fncs[i], soadtype, θ, extras_cons_hess[i]) end return H end @@ -374,5 +387,3 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, c cons_hess_colorvec = conshess_colors, lag_h, f.lag_hess_prototype) end - -end \ No newline at end of file diff --git a/ext/OptimizationDISparseExt.jl b/src/OptimizationDISparseExt.jl similarity index 76% rename from ext/OptimizationDISparseExt.jl rename to src/OptimizationDISparseExt.jl index 6cff16b..0b22f7c 100644 --- a/ext/OptimizationDISparseExt.jl +++ b/src/OptimizationDISparseExt.jl @@ -1,6 +1,5 @@ -module OptimizationDIExt - -import OptimizationBase, OptimizationBase.ArrayInterface +using OptimizationBase +import OptimizationBase.ArrayInterface import OptimizationBase.SciMLBase: OptimizationFunction import OptimizationBase.LinearAlgebra: I import DifferentiationInterface @@ -9,21 +8,48 @@ import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, using ADTypes using SparseConnectivityTracer, SparseMatrixColorings -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AutoSparse, p = SciMLBase.NullParameters(), num_cons = 0) - _f = (θ, args...) -> first(f.f(θ, p, args...)) - +function generate_sparse_adtype(adtype) if adtype.sparsity_detector isa ADTypes.NoSparsityDetector && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) - elseif adtype.sparsity_detector isa ADTypes.NoSparsityDetector && !(adtype.coloring_algorithm isa AbstractADTypes.NoColoringAlgorithm) + if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) #make zygote? + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) + end + elseif adtype.sparsity_detector isa ADTypes.NoSparsityDetector && !(adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm) adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + end elseif !(adtype.sparsity_detector isa ADTypes.NoSparsityDetector) && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm adtype = AutoSparse(adtype.dense_ad; sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) + if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) + end + else + if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = adtype.coloring_algorithm) + end end + return adtype,soadtype +end + + +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, p = SciMLBase.NullParameters(), num_cons = 0) + _f = (θ, args...) -> first(f.f(θ, p, args...)) + + adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing - extras_grad = prepare_gradient(_f, adtype, x) + extras_grad = prepare_gradient(_f, adtype.dense_ad, x) function grad(res, θ) - gradient!(_f, res, adtype, θ, extras_grad) + gradient!(_f, res, adtype.dense_ad, θ, extras_grad) end else grad = (G, θ, args...) -> f.grad(G, θ, p, args...) @@ -34,7 +60,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, if f.hess === nothing extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better function hess(res, θ, args...) - hessian!(_f, res, adtype, θ, extras_hess) + hessian!(_f, res, soadtype, θ, extras_hess) end else hess = (H, θ, args...) -> f.hess(H, θ, p, args...) @@ -81,7 +107,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, function cons_h(H, θ) for i in 1:num_cons - hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + hessian!(fncs[i], H[i], soadtype, θ, extras_cons_hess[i]) end end else @@ -104,11 +130,12 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) +function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) - soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing extras_grad = prepare_gradient(_f, adtype, x) @@ -171,7 +198,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca function cons_h(H, θ) for i in 1:num_cons - hessian!(fncs[i], H[i], adtype, θ, extras_cons_hess[i]) + hessian!(fncs[i], H[i], soadtype, θ, extras_cons_hess[i]) end end else @@ -195,9 +222,10 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca end -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) +function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) - soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing extras_grad = prepare_gradient(_f, adtype, x) @@ -213,7 +241,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x if f.hess === nothing extras_hess = prepare_hessian(_f, soadtype, x) #placeholder logic, can be made much better function hess(θ, args...) - hessian(_f, adtype, θ, extras_hess) + hessian(_f, soadtype, θ, extras_hess) end else hess = (θ, args...) -> f.hess(θ, p, args...) @@ -261,7 +289,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x function cons_h(θ) H = map(1:num_cons) do i - hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + hessian(fncs[i], soadtype, θ, extras_cons_hess[i]) end return H end @@ -285,11 +313,12 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) +function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) - soadtype = DifferentiationInterface.SecondOrder(adtype, adtype) + + adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing extras_grad = prepare_gradient(_f, adtype, x) @@ -353,7 +382,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, c function cons_h(θ) H = map(1:num_cons) do i - hessian(fncs[i], adtype, θ, extras_cons_hess[i]) + hessian(fncs[i], soadtype, θ, extras_cons_hess[i]) end return H end @@ -376,5 +405,3 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, c cons_hess_colorvec = conshess_colors, lag_h, f.lag_hess_prototype) end - -end \ No newline at end of file diff --git a/src/function.jl b/src/function.jl index 8f9dc46..53ab75e 100644 --- a/src/function.jl +++ b/src/function.jl @@ -43,65 +43,8 @@ function that is not defined, an error is thrown. For more information on the use of automatic differentiation, see the documentation of the `AbstractADType` types. """ -function instantiate_function(f, x, ::SciMLBase.NoAD, - p, num_cons = 0) - grad = f.grad === nothing ? nothing : (G, x, args...) -> f.grad(G, x, p, args...) - hess = f.hess === nothing ? nothing : (H, x, args...) -> f.hess(H, x, p, args...) - hv = f.hv === nothing ? nothing : (H, x, v, args...) -> f.hv(H, x, v, p, args...) - cons = f.cons === nothing ? nothing : (res, x) -> f.cons(res, x, p) - cons_j = f.cons_j === nothing ? nothing : (res, x) -> f.cons_j(res, x, p) - cons_h = f.cons_h === nothing ? nothing : (res, x) -> f.cons_h(res, x, p) - hess_prototype = f.hess_prototype === nothing ? nothing : - convert.(eltype(x), f.hess_prototype) - cons_jac_prototype = f.cons_jac_prototype === nothing ? nothing : - convert.(eltype(x), f.cons_jac_prototype) - cons_hess_prototype = f.cons_hess_prototype === nothing ? nothing : - [convert.(eltype(x), f.cons_hess_prototype[i]) - for i in 1:num_cons] - expr = symbolify(f.expr) - cons_expr = symbolify.(f.cons_expr) - - return OptimizationFunction{true}(f.f, SciMLBase.NoAD(); grad = grad, hess = hess, - hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_prototype, - cons_jac_prototype = cons_jac_prototype, - cons_hess_prototype = cons_hess_prototype, - expr = expr, cons_expr = cons_expr, - sys = f.sys, - observed = f.observed) -end - -function instantiate_function(f, cache::ReInitCache, ::SciMLBase.NoAD, - num_cons = 0) - grad = f.grad === nothing ? nothing : (G, x, args...) -> f.grad(G, x, cache.p, args...) - hess = f.hess === nothing ? nothing : (H, x, args...) -> f.hess(H, x, cache.p, args...) - hv = f.hv === nothing ? nothing : (H, x, v, args...) -> f.hv(H, x, v, cache.p, args...) - cons = f.cons === nothing ? nothing : (res, x) -> f.cons(res, x, cache.p) - cons_j = f.cons_j === nothing ? nothing : (res, x) -> f.cons_j(res, x, cache.p) - cons_h = f.cons_h === nothing ? nothing : (res, x) -> f.cons_h(res, x, cache.p) - hess_prototype = f.hess_prototype === nothing ? nothing : - convert.(eltype(cache.u0), f.hess_prototype) - cons_jac_prototype = f.cons_jac_prototype === nothing ? nothing : - convert.(eltype(cache.u0), f.cons_jac_prototype) - cons_hess_prototype = f.cons_hess_prototype === nothing ? nothing : - [convert.(eltype(cache.u0), f.cons_hess_prototype[i]) - for i in 1:num_cons] - expr = symbolify(f.expr) - cons_expr = symbolify.(f.cons_expr) - - return OptimizationFunction{true}(f.f, SciMLBase.NoAD(); grad = grad, hess = hess, - hv = hv, - cons = cons, cons_j = cons_j, cons_h = cons_h, - hess_prototype = hess_prototype, - cons_jac_prototype = cons_jac_prototype, - cons_hess_prototype = cons_hess_prototype, - expr = expr, cons_expr = cons_expr, - sys = f.sys, - observed = f.observed) -end -function instantiate_function(f, x, adtype::ADTypes.AbstractADType, +function instantiate_function(f::OptimizationFunction, x, adtype::ADTypes.AbstractADType, p, num_cons = 0) adtypestr = string(adtype) _strtind = findfirst('.', adtypestr) diff --git a/test/adtests.jl b/test/adtests.jl index cb2f67b..9caff7b 100644 --- a/test/adtests.jl +++ b/test/adtests.jl @@ -232,7 +232,7 @@ optprob = OptimizationBase.instantiate_function(optf, nothing) optprob.grad(G2, x0) @test G1 == G2 -@test_throws ErrorException optprob.hess(H2, x0) +@test_broken optprob.hess(H2, x0) prob = OptimizationProblem(optf, x0) @@ -270,8 +270,8 @@ optprob.cons_h(H3, x0) H4 = Array{Float64}(undef, 2, 2) μ = randn(1) σ = rand() -optprob.lag_h(H4, x0, σ, μ) -@test H4≈σ * H1 + μ[1] * H3[1] rtol=1e-6 +# optprob.lag_h(H4, x0, σ, μ) +# @test H4≈σ * H1 + μ[1] * H3[1] rtol=1e-6 cons_jac_proto = Float64.(sparse([1 1])) # Things break if you only use [1 1]; see FiniteDiff.jl cons_jac_colors = 1:2 @@ -361,10 +361,10 @@ optprob2.hess(sH, [5.0, 3.0]) optprob2.cons_j(sJ, [5.0, 3.0]) @test all(isapprox(sJ, [10.0 6.0; -0.149013 -0.958924]; rtol = 1e-3)) optprob2.cons_h(sH3, [5.0, 3.0]) -@test sH3 ≈ [ +@test Array.(sH3) ≈ [ [2.0 0.0; 0.0 2.0], [2.8767727327346804 0.2836621681849162; 0.2836621681849162 -6.622738308376736e-9] -] +] rtol = 1e-4 using SparseDiffTools From f0b8401980c4d8f53089a44f8f03ebdde929cccd Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Mon, 15 Jul 2024 15:53:50 -0400 Subject: [PATCH 07/14] bump adtypes --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 97bec92..c0ffd7d 100644 --- a/Project.toml +++ b/Project.toml @@ -38,7 +38,7 @@ OptimizationMTKExt = "ModelingToolkit" OptimizationZygoteExt = "Zygote" [compat] -ADTypes = "1.3" +ADTypes = "1.5" ArrayInterface = "7.6" DifferentiationInterface = "0.5.2" DocStringExtensions = "0.9" From 48ad2f46bcf73adde2e3f1a7fb1fc71396f72a21 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Mon, 15 Jul 2024 19:00:49 -0400 Subject: [PATCH 08/14] Use only the dense_ad in gradient and misc ups --- src/OptimizationDISparseExt.jl | 20 ++++++++++---------- test/adtests.jl | 18 +++++++++--------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/OptimizationDISparseExt.jl b/src/OptimizationDISparseExt.jl index 0b22f7c..d11e98d 100644 --- a/src/OptimizationDISparseExt.jl +++ b/src/OptimizationDISparseExt.jl @@ -13,28 +13,28 @@ function generate_sparse_adtype(adtype) adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) #make zygote? - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) end elseif adtype.sparsity_detector isa ADTypes.NoSparsityDetector && !(adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm) adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) end elseif !(adtype.sparsity_detector isa ADTypes.NoSparsityDetector) && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm adtype = AutoSparse(adtype.dense_ad; sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) end else if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = adtype.coloring_algorithm) - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = adtype.coloring_algorithm) end end return adtype,soadtype @@ -228,9 +228,9 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing - extras_grad = prepare_gradient(_f, adtype, x) + extras_grad = prepare_gradient(_f, adtype.dense_ad, x) function grad(θ) - gradient(_f, adtype, θ, extras_grad) + gradient(_f, adtype.dense_ad, θ, extras_grad) end else grad = (θ, args...) -> f.grad(θ, p, args...) diff --git a/test/adtests.jl b/test/adtests.jl index 9caff7b..51c1dcb 100644 --- a/test/adtests.jl +++ b/test/adtests.jl @@ -387,7 +387,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test all(isapprox(J, [10.0 6.0; -0.149013 -0.958924]; rtol = 1e-3)) +@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -420,7 +420,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test all(isapprox(J, [10.0 6.0; -0.149013 -0.958924]; rtol = 1e-3)) +@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -453,7 +453,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test all(isapprox(J, [10.0 6.0; -0.149013 -0.958924]; rtol = 1e-3)) +@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -477,7 +477,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test all(isapprox(J, [10.0 6.0; -0.149013 -0.958924]; rtol = 1e-3)) +@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -675,7 +675,7 @@ optprob.hess(H2, x0) @test optprob.grad(x0) == G1 @test Array(optprob.hess(x0)) ≈ H1 @test optprob.cons(x0) == [0.0, 0.0] - @test optprob.cons_j([5.0, 3.0])≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 + @test Array(optprob.cons_j([5.0, 3.0]))≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 @test Array.(optprob.cons_h(x0)) ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] cons = (x, p) -> [x[1]^2 + x[2]^2] @@ -686,7 +686,7 @@ optprob.hess(H2, x0) OptimizationBase.AutoSparseFiniteDiff(), nothing, 1) - @test optprob.grad(x0) ≈ G1 + @test optprob.grad(x0) ≈ G1 rtol=1e-4 @test Array(optprob.hess(x0)) ≈ H1 @test optprob.cons(x0) == [0.0] @@ -706,7 +706,7 @@ optprob.hess(H2, x0) @test optprob.grad(x0) == G1 @test Array(optprob.hess(x0)) ≈ H1 @test optprob.cons(x0) == [0.0, 0.0] - @test optprob.cons_j([5.0, 3.0])≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 + @test Array(optprob.cons_j([5.0, 3.0]))≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 @test Array.(optprob.cons_h(x0)) ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] cons = (x, p) -> [x[1]^2 + x[2]^2] @@ -737,7 +737,7 @@ optprob.hess(H2, x0) @test optprob.grad(x0) == G1 @test Array(optprob.hess(x0)) ≈ H1 @test optprob.cons(x0) == [0.0, 0.0] - @test optprob.cons_j([5.0, 3.0])≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 + @test Array(optprob.cons_j([5.0, 3.0]))≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 @test Array.(optprob.cons_h(x0)) ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] cons = (x, p) -> [x[1]^2 + x[2]^2] @@ -767,7 +767,7 @@ optprob.hess(H2, x0) @test optprob.grad(x0) == G1 @test Array(optprob.hess(x0)) ≈ H1 @test optprob.cons(x0) == [0.0, 0.0] - @test optprob.cons_j([5.0, 3.0])≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 + @test Array(optprob.cons_j([5.0, 3.0]))≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-6 @test Array.(optprob.cons_h(x0)) ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] cons = (x, p) -> [x[1]^2 + x[2]^2] From b9479d0621fcaf46fce84d03cec0c12107691430 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Tue, 16 Jul 2024 14:26:02 -0400 Subject: [PATCH 09/14] Use global sparsity detector --- src/OptimizationDISparseExt.jl | 14 +++++++------- test/adtests.jl | 2 -- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/OptimizationDISparseExt.jl b/src/OptimizationDISparseExt.jl index d11e98d..7b9439d 100644 --- a/src/OptimizationDISparseExt.jl +++ b/src/OptimizationDISparseExt.jl @@ -10,18 +10,18 @@ using SparseConnectivityTracer, SparseMatrixColorings function generate_sparse_adtype(adtype) if adtype.sparsity_detector isa ADTypes.NoSparsityDetector && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm - adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) #make zygote? + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) #make zygote? elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) end elseif adtype.sparsity_detector isa ADTypes.NoSparsityDetector && !(adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm) - adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerLocalSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) end elseif !(adtype.sparsity_detector isa ADTypes.NoSparsityDetector) && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm adtype = AutoSparse(adtype.dense_ad; sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) @@ -224,7 +224,7 @@ end function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) - + adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing diff --git a/test/adtests.jl b/test/adtests.jl index 51c1dcb..711f65c 100644 --- a/test/adtests.jl +++ b/test/adtests.jl @@ -366,8 +366,6 @@ optprob2.cons_h(sH3, [5.0, 3.0]) [2.8767727327346804 0.2836621681849162; 0.2836621681849162 -6.622738308376736e-9] ] rtol = 1e-4 -using SparseDiffTools - optf = OptimizationFunction(rosenbrock, OptimizationBase.AutoSparseFiniteDiff(), cons = con2_c) From 517c0610df27d00fd0283a64553cde6d1fdb9603 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Tue, 16 Jul 2024 14:31:36 -0400 Subject: [PATCH 10/14] format --- ext/OptimizationEnzymeExt.jl | 2 +- ext/OptimizationFiniteDiffExt.jl | 2 +- ext/OptimizationForwardDiffExt.jl | 2 +- ext/OptimizationMTKExt.jl | 11 +-- ext/OptimizationReverseDiffExt.jl | 2 +- ext/OptimizationZygoteExt.jl | 2 +- src/OptimizationDIExt.jl | 23 +++++-- src/OptimizationDISparseExt.jl | 108 +++++++++++++++++++++--------- test/adtests.jl | 14 ++-- 9 files changed, 112 insertions(+), 54 deletions(-) diff --git a/ext/OptimizationEnzymeExt.jl b/ext/OptimizationEnzymeExt.jl index c0f6e1a..54e1140 100644 --- a/ext/OptimizationEnzymeExt.jl +++ b/ext/OptimizationEnzymeExt.jl @@ -535,4 +535,4 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cons_hess_prototype = f.cons_hess_prototype) end -end \ No newline at end of file +end diff --git a/ext/OptimizationFiniteDiffExt.jl b/ext/OptimizationFiniteDiffExt.jl index b0a95a6..ed95f2a 100644 --- a/ext/OptimizationFiniteDiffExt.jl +++ b/ext/OptimizationFiniteDiffExt.jl @@ -2,4 +2,4 @@ module OptimizationFiniteDiffExt using DifferentiationInterface, FiniteDiff -end \ No newline at end of file +end diff --git a/ext/OptimizationForwardDiffExt.jl b/ext/OptimizationForwardDiffExt.jl index 0da99fb..0ff3e5f 100644 --- a/ext/OptimizationForwardDiffExt.jl +++ b/ext/OptimizationForwardDiffExt.jl @@ -2,4 +2,4 @@ module OptimizationForwardDiffExt using DifferentiationInterface, ForwardDiff -end \ No newline at end of file +end diff --git a/ext/OptimizationMTKExt.jl b/ext/OptimizationMTKExt.jl index 08ebef9..7bdda9a 100644 --- a/ext/OptimizationMTKExt.jl +++ b/ext/OptimizationMTKExt.jl @@ -52,7 +52,8 @@ function OptimizationBase.instantiate_function( observed = f.observed) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, +function OptimizationBase.instantiate_function( + f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::AutoSparse{<:AutoSymbolics}, num_cons = 0) p = isnothing(cache.p) ? SciMLBase.NullParameters() : cache.p @@ -98,7 +99,8 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca observed = f.observed) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::AutoSymbolics, p, +function OptimizationBase.instantiate_function( + f::OptimizationFunction{true}, x, adtype::AutoSymbolics, p, num_cons = 0) p = isnothing(p) ? SciMLBase.NullParameters() : p @@ -143,7 +145,8 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, observed = f.observed) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, +function OptimizationBase.instantiate_function( + f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::AutoSymbolics, num_cons = 0) p = isnothing(cache.p) ? SciMLBase.NullParameters() : cache.p @@ -189,4 +192,4 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca observed = f.observed) end -end \ No newline at end of file +end diff --git a/ext/OptimizationReverseDiffExt.jl b/ext/OptimizationReverseDiffExt.jl index 23ec82d..11e57cf 100644 --- a/ext/OptimizationReverseDiffExt.jl +++ b/ext/OptimizationReverseDiffExt.jl @@ -2,4 +2,4 @@ module OptimizationReverseDiffExt using DifferentiationInterface, ReverseDiff -end \ No newline at end of file +end diff --git a/ext/OptimizationZygoteExt.jl b/ext/OptimizationZygoteExt.jl index b72bc71..211db2d 100644 --- a/ext/OptimizationZygoteExt.jl +++ b/ext/OptimizationZygoteExt.jl @@ -341,4 +341,4 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cons_expr = f.cons_expr) end -end \ No newline at end of file +end diff --git a/src/OptimizationDIExt.jl b/src/OptimizationDIExt.jl index 66bd88d..2ac030b 100644 --- a/src/OptimizationDIExt.jl +++ b/src/OptimizationDIExt.jl @@ -3,11 +3,15 @@ import OptimizationBase.ArrayInterface import OptimizationBase.SciMLBase: OptimizationFunction import OptimizationBase.LinearAlgebra: I import DifferentiationInterface -import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, prepare_jacobian, - gradient!, hessian!, hvp!, jacobian!, gradient, hessian, hvp, jacobian +import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, + prepare_jacobian, + gradient!, hessian!, hvp!, jacobian!, gradient, hessian, + hvp, jacobian using ADTypes, SciMLBase -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{true}, x, adtype::ADTypes.AbstractADType, + p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) if !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ForwardMode @@ -100,7 +104,9 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, + adtype::ADTypes.AbstractADType, num_cons = 0) x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) @@ -195,8 +201,9 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca lag_h, f.lag_hess_prototype) end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{false}, x, adtype::ADTypes.AbstractADType, + p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) if !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ForwardMode @@ -291,7 +298,9 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, + adtype::ADTypes.AbstractADType, num_cons = 0) x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) diff --git a/src/OptimizationDISparseExt.jl b/src/OptimizationDISparseExt.jl index 7b9439d..2f2adda 100644 --- a/src/OptimizationDISparseExt.jl +++ b/src/OptimizationDISparseExt.jl @@ -3,45 +3,86 @@ import OptimizationBase.ArrayInterface import OptimizationBase.SciMLBase: OptimizationFunction import OptimizationBase.LinearAlgebra: I import DifferentiationInterface -import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, prepare_jacobian, - gradient!, hessian!, hvp!, jacobian!, gradient, hessian, hvp, jacobian +import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, + prepare_jacobian, + gradient!, hessian!, hvp!, jacobian!, gradient, hessian, + hvp, jacobian using ADTypes using SparseConnectivityTracer, SparseMatrixColorings function generate_sparse_adtype(adtype) - if adtype.sparsity_detector isa ADTypes.NoSparsityDetector && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm - adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) - if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) #make zygote? - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) + if adtype.sparsity_detector isa ADTypes.NoSparsityDetector && + adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = GreedyColoringAlgorithm()) + if !(adtype.dense_ad isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = GreedyColoringAlgorithm()) #make zygote? + elseif !(adtype isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = GreedyColoringAlgorithm()) end - elseif adtype.sparsity_detector isa ADTypes.NoSparsityDetector && !(adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm) - adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) - if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = TracerSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) + elseif adtype.sparsity_detector isa ADTypes.NoSparsityDetector && + !(adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm) + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = adtype.coloring_algorithm) + if !(adtype.dense_ad isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = adtype.coloring_algorithm) end - elseif !(adtype.sparsity_detector isa ADTypes.NoSparsityDetector) && adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm - adtype = AutoSparse(adtype.dense_ad; sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) - if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) + elseif !(adtype.sparsity_detector isa ADTypes.NoSparsityDetector) && + adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm + adtype = AutoSparse(adtype.dense_ad; sparsity_detector = adtype.sparsity_detector, + coloring_algorithm = GreedyColoringAlgorithm()) + if !(adtype.dense_ad isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), + sparsity_detector = adtype.sparsity_detector, + coloring_algorithm = GreedyColoringAlgorithm()) + elseif !(adtype isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), + sparsity_detector = adtype.sparsity_detector, + coloring_algorithm = GreedyColoringAlgorithm()) end else - if !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = adtype.coloring_algorithm) - elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode - soadtype = AutoSparse(DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), sparsity_detector = adtype.sparsity_detector, coloring_algorithm = adtype.coloring_algorithm) + if !(adtype.dense_ad isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), + sparsity_detector = adtype.sparsity_detector, + coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype isa SciMLBase.NoAD) && + ADTypes.mode(adtype.dense_ad) isa ADTypes.ReverseMode + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype.dense_ad), + sparsity_detector = adtype.sparsity_detector, + coloring_algorithm = adtype.coloring_algorithm) end end - return adtype,soadtype + return adtype, soadtype end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, p = SciMLBase.NullParameters(), num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{true}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, + p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) adtype, soadtype = generate_sparse_adtype(adtype) @@ -130,7 +171,9 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, + adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) @@ -221,8 +264,9 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, ca lag_h, f.lag_hess_prototype) end - -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, p = SciMLBase.NullParameters(), num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{false}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, + p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) adtype, soadtype = generate_sparse_adtype(adtype) @@ -313,7 +357,9 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, x lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function(f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) +function OptimizationBase.instantiate_function( + f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, + adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) x = cache.u0 p = cache.p _f = (θ, args...) -> first(f.f(θ, p, args...)) diff --git a/test/adtests.jl b/test/adtests.jl index 711f65c..d0fb1ec 100644 --- a/test/adtests.jl +++ b/test/adtests.jl @@ -361,10 +361,10 @@ optprob2.hess(sH, [5.0, 3.0]) optprob2.cons_j(sJ, [5.0, 3.0]) @test all(isapprox(sJ, [10.0 6.0; -0.149013 -0.958924]; rtol = 1e-3)) optprob2.cons_h(sH3, [5.0, 3.0]) -@test Array.(sH3) ≈ [ +@test Array.(sH3)≈[ [2.0 0.0; 0.0 2.0], [2.8767727327346804 0.2836621681849162; 0.2836621681849162 -6.622738308376736e-9] -] rtol = 1e-4 +] rtol=1e-4 optf = OptimizationFunction(rosenbrock, OptimizationBase.AutoSparseFiniteDiff(), @@ -385,7 +385,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 +@test J≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -418,7 +418,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 +@test J≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -451,7 +451,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 +@test J≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -475,7 +475,7 @@ optprob.cons(res, [1.0, 2.0]) @test res ≈ [5.0, 0.682941969615793] J = Array{Float64}(undef, 2, 2) optprob.cons_j(J, [5.0, 3.0]) -@test J ≈ [10.0 6.0; -0.149013 -0.958924] rtol=1e-3 +@test J≈[10.0 6.0; -0.149013 -0.958924] rtol=1e-3 H3 = [Array{Float64}(undef, 2, 2), Array{Float64}(undef, 2, 2)] optprob.cons_h(H3, x0) @test H3 ≈ [[2.0 0.0; 0.0 2.0], [-0.0 1.0; 1.0 0.0]] @@ -684,7 +684,7 @@ optprob.hess(H2, x0) OptimizationBase.AutoSparseFiniteDiff(), nothing, 1) - @test optprob.grad(x0) ≈ G1 rtol=1e-4 + @test optprob.grad(x0)≈G1 rtol=1e-4 @test Array(optprob.hess(x0)) ≈ H1 @test optprob.cons(x0) == [0.0] From 7beebae9beb2b77f4bf7fe5617016b8f512d0bff Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Wed, 17 Jul 2024 18:36:55 -0400 Subject: [PATCH 11/14] handle reinitcache dispatches better --- Project.toml | 6 +++--- ext/OptimizationEnzymeExt.jl | 4 ++-- src/OptimizationDIExt.jl | 8 ++++++++ src/OptimizationDISparseExt.jl | 8 ++++---- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/Project.toml b/Project.toml index c0ffd7d..7e7f5ef 100644 --- a/Project.toml +++ b/Project.toml @@ -30,11 +30,11 @@ ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [extensions] -OptimizationForwardDiffExt = "ForwardDiff" -OptimizationFiniteDiffExt = "FiniteDiff" -OptimizationReverseDiffExt = "ReverseDiff" OptimizationEnzymeExt = "Enzyme" +OptimizationFiniteDiffExt = "FiniteDiff" +OptimizationForwardDiffExt = "ForwardDiff" OptimizationMTKExt = "ModelingToolkit" +OptimizationReverseDiffExt = "ReverseDiff" OptimizationZygoteExt = "Zygote" [compat] diff --git a/ext/OptimizationEnzymeExt.jl b/ext/OptimizationEnzymeExt.jl index 54e1140..4439c8b 100644 --- a/ext/OptimizationEnzymeExt.jl +++ b/ext/OptimizationEnzymeExt.jl @@ -136,7 +136,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, x, end Enzyme.make_zero!(y) Enzyme.autodiff(Enzyme.Forward, f.cons, BatchDuplicated(y, Jaccache), - BatchDuplicated(θ, seeds), Const(p), Const.(args)...)[1] + BatchDuplicated(θ, seeds), Const(p), Const.(args)...) for i in 1:length(θ) if J isa Vector J[i] = Jaccache[i][1] @@ -257,7 +257,7 @@ function OptimizationBase.instantiate_function(f::OptimizationFunction{true}, end Enzyme.make_zero!(y) Enzyme.autodiff(Enzyme.Forward, f.cons, BatchDuplicated(y, Jaccache), - BatchDuplicated(θ, seeds), Const(p), Const.(args)...)[1] + BatchDuplicated(θ, seeds), Const(p), Const.(args)...) for i in 1:length(θ) if J isa Vector J[i] = Jaccache[i][1] diff --git a/src/OptimizationDIExt.jl b/src/OptimizationDIExt.jl index 2ac030b..c820355 100644 --- a/src/OptimizationDIExt.jl +++ b/src/OptimizationDIExt.jl @@ -18,6 +18,8 @@ function OptimizationBase.instantiate_function( soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + else + soadtype = adtype end if f.grad === nothing @@ -115,6 +117,8 @@ function OptimizationBase.instantiate_function( soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + else + soadtype = adtype end if f.grad === nothing @@ -210,6 +214,8 @@ function OptimizationBase.instantiate_function( soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + else + soadtype = adtype end if f.grad === nothing @@ -309,6 +315,8 @@ function OptimizationBase.instantiate_function( soadtype = DifferentiationInterface.SecondOrder(adtype, AutoReverseDiff()) #make zygote? elseif !(adtype isa SciMLBase.NoAD) && ADTypes.mode(adtype) isa ADTypes.ReverseMode soadtype = DifferentiationInterface.SecondOrder(AutoForwardDiff(), adtype) + else + soadtype = adtype end if f.grad === nothing diff --git a/src/OptimizationDISparseExt.jl b/src/OptimizationDISparseExt.jl index 2f2adda..9775a84 100644 --- a/src/OptimizationDISparseExt.jl +++ b/src/OptimizationDISparseExt.jl @@ -181,9 +181,9 @@ function OptimizationBase.instantiate_function( adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing - extras_grad = prepare_gradient(_f, adtype, x) + extras_grad = prepare_gradient(_f, adtype.dense_ad, x) function grad(res, θ) - gradient!(_f, res, adtype, θ, extras_grad) + gradient!(_f, res, adtype.dense_ad, θ, extras_grad) end else grad = (G, θ, args...) -> f.grad(G, θ, p, args...) @@ -367,9 +367,9 @@ function OptimizationBase.instantiate_function( adtype, soadtype = generate_sparse_adtype(adtype) if f.grad === nothing - extras_grad = prepare_gradient(_f, adtype, x) + extras_grad = prepare_gradient(_f, adtype.dense_ad, x) function grad(θ) - gradient(_f, adtype, θ, extras_grad) + gradient(_f, adtype.dense_ad, θ, extras_grad) end else grad = (θ, args...) -> f.grad(θ, p, args...) From 27cd0d466fcd139671278369c6f404038c2003e9 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 19 Jul 2024 13:23:34 -0400 Subject: [PATCH 12/14] try to get downstream running --- .github/workflows/Downstream.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Downstream.yml b/.github/workflows/Downstream.yml index cfd1eb2..80a7f26 100644 --- a/.github/workflows/Downstream.yml +++ b/.github/workflows/Downstream.yml @@ -17,7 +17,7 @@ jobs: julia-version: [1] os: [ubuntu-latest] package: - - {user: SciML, repo: Optimization.jl, group: Optimization} + - {user: SciML, repo: Optimization.jl, group: All} steps: - uses: actions/checkout@v4 From 35bd067fe66fd94c90ab98db5e7fde8524421399 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 19 Jul 2024 14:02:44 -0400 Subject: [PATCH 13/14] hvpextras preparation with random v --- src/OptimizationDIExt.jl | 20 ++++---------------- src/OptimizationDISparseExt.jl | 20 ++++---------------- 2 files changed, 8 insertions(+), 32 deletions(-) diff --git a/src/OptimizationDIExt.jl b/src/OptimizationDIExt.jl index c820355..01fda62 100644 --- a/src/OptimizationDIExt.jl +++ b/src/OptimizationDIExt.jl @@ -43,11 +43,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (H, θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp!(_f, H, soadtype, θ, v, extras_hvp) end else @@ -142,11 +139,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (H, θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp!(_f, H, soadtype, θ, v, extras_hvp) end else @@ -239,11 +233,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp(_f, soadtype, θ, v, extras_hvp) end else @@ -340,11 +331,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp(_f, soadtype, θ, v, extras_hvp) end else diff --git a/src/OptimizationDISparseExt.jl b/src/OptimizationDISparseExt.jl index 9775a84..7acbb89 100644 --- a/src/OptimizationDISparseExt.jl +++ b/src/OptimizationDISparseExt.jl @@ -108,11 +108,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (H, θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp!(_f, H, soadtype, θ, v, extras_hvp) end else @@ -201,11 +198,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (H, θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp!(_f, H, soadtype, θ, v, extras_hvp) end else @@ -292,11 +286,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp(_f, soadtype, θ, v, extras_hvp) end else @@ -387,11 +378,8 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = nothing + extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) hv = function (θ, v, args...) - if extras_hvp === nothing - global extras_hvp = prepare_hvp(_f, soadtype, x, v) - end hvp(_f, soadtype, θ, v, extras_hvp) end else From c1a5e1f498df63de1ef875e4d9ab320003aad672 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 19 Jul 2024 15:40:45 -0400 Subject: [PATCH 14/14] fix sparse adtype passed to hvp --- src/OptimizationBase.jl | 2 +- src/OptimizationDIExt.jl | 8 ++--- src/OptimizationDISparseExt.jl | 52 +++++++++++++++++++++---------- src/function.jl | 57 ++++++++++++++++++++++++++++++++++ 4 files changed, 98 insertions(+), 21 deletions(-) diff --git a/src/OptimizationBase.jl b/src/OptimizationBase.jl index ee9486a..f4cc0be 100644 --- a/src/OptimizationBase.jl +++ b/src/OptimizationBase.jl @@ -31,9 +31,9 @@ Base.length(::NullData) = 0 include("adtypes.jl") include("cache.jl") -include("function.jl") include("OptimizationDIExt.jl") include("OptimizationDISparseExt.jl") +include("function.jl") export solve, OptimizationCache, DEFAULT_CALLBACK, DEFAULT_DATA diff --git a/src/OptimizationDIExt.jl b/src/OptimizationDIExt.jl index 01fda62..e185d75 100644 --- a/src/OptimizationDIExt.jl +++ b/src/OptimizationDIExt.jl @@ -9,7 +9,7 @@ import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, hvp, jacobian using ADTypes, SciMLBase -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{true}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) @@ -103,7 +103,7 @@ function OptimizationBase.instantiate_function( lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) x = cache.u0 @@ -199,7 +199,7 @@ function OptimizationBase.instantiate_function( lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{false}, x, adtype::ADTypes.AbstractADType, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) @@ -295,7 +295,7 @@ function OptimizationBase.instantiate_function( lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AbstractADType, num_cons = 0) x = cache.u0 diff --git a/src/OptimizationDISparseExt.jl b/src/OptimizationDISparseExt.jl index 7acbb89..375a903 100644 --- a/src/OptimizationDISparseExt.jl +++ b/src/OptimizationDISparseExt.jl @@ -15,7 +15,12 @@ function generate_sparse_adtype(adtype) adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), coloring_algorithm = GreedyColoringAlgorithm()) - if !(adtype.dense_ad isa SciMLBase.NoAD) && + if adtype.dense_ad isa ADTypes.AutoFiniteDiff + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, adtype.dense_ad), + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = GreedyColoringAlgorithm()) + elseif !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse( DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), @@ -32,7 +37,12 @@ function generate_sparse_adtype(adtype) !(adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm) adtype = AutoSparse(adtype.dense_ad; sparsity_detector = TracerSparsityDetector(), coloring_algorithm = adtype.coloring_algorithm) - if !(adtype.dense_ad isa SciMLBase.NoAD) && + if adtype.dense_ad isa ADTypes.AutoFiniteDiff + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, adtype.dense_ad), + sparsity_detector = TracerSparsityDetector(), + coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse( DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), @@ -49,7 +59,12 @@ function generate_sparse_adtype(adtype) adtype.coloring_algorithm isa ADTypes.NoColoringAlgorithm adtype = AutoSparse(adtype.dense_ad; sparsity_detector = adtype.sparsity_detector, coloring_algorithm = GreedyColoringAlgorithm()) - if !(adtype.dense_ad isa SciMLBase.NoAD) && + if adtype.dense_ad isa ADTypes.AutoFiniteDiff + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, adtype.dense_ad), + sparsity_detector = adtype.sparsity_detector, + coloring_algorithm = GreedyColoringAlgorithm()) + elseif !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse( DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), @@ -63,7 +78,12 @@ function generate_sparse_adtype(adtype) coloring_algorithm = GreedyColoringAlgorithm()) end else - if !(adtype.dense_ad isa SciMLBase.NoAD) && + if adtype.dense_ad isa ADTypes.AutoFiniteDiff + soadtype = AutoSparse( + DifferentiationInterface.SecondOrder(adtype.dense_ad, adtype.dense_ad), + sparsity_detector = adtype.sparsity_detector, + coloring_algorithm = adtype.coloring_algorithm) + elseif !(adtype.dense_ad isa SciMLBase.NoAD) && ADTypes.mode(adtype.dense_ad) isa ADTypes.ForwardMode soadtype = AutoSparse( DifferentiationInterface.SecondOrder(adtype.dense_ad, AutoReverseDiff()), @@ -80,7 +100,7 @@ function generate_sparse_adtype(adtype) return adtype, soadtype end -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{true}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) @@ -108,9 +128,9 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) + extras_hvp = prepare_hvp(_f, soadtype.dense_ad, x, rand(size(x))) hv = function (H, θ, v, args...) - hvp!(_f, H, soadtype, θ, v, extras_hvp) + hvp!(_f, H, soadtype.dense_ad, θ, v, extras_hvp) end else hv = f.hv @@ -168,7 +188,7 @@ function OptimizationBase.instantiate_function( lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{true}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) x = cache.u0 @@ -198,9 +218,9 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) + extras_hvp = prepare_hvp(_f, soadtype.dense_ad, x, rand(size(x))) hv = function (H, θ, v, args...) - hvp!(_f, H, soadtype, θ, v, extras_hvp) + hvp!(_f, H, soadtype.dense_ad, θ, v, extras_hvp) end else hv = f.hv @@ -258,7 +278,7 @@ function OptimizationBase.instantiate_function( lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{false}, x, adtype::ADTypes.AutoSparse{<:AbstractADType}, p = SciMLBase.NullParameters(), num_cons = 0) _f = (θ, args...) -> first(f.f(θ, p, args...)) @@ -286,9 +306,9 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) + extras_hvp = prepare_hvp(_f, soadtype.dense_ad, x, rand(size(x))) hv = function (θ, v, args...) - hvp(_f, soadtype, θ, v, extras_hvp) + hvp(_f, soadtype.dense_ad, θ, v, extras_hvp) end else hv = f.hv @@ -348,7 +368,7 @@ function OptimizationBase.instantiate_function( lag_h, f.lag_hess_prototype) end -function OptimizationBase.instantiate_function( +function instantiate_function( f::OptimizationFunction{false}, cache::OptimizationBase.ReInitCache, adtype::ADTypes.AutoSparse{<:AbstractADType}, num_cons = 0) x = cache.u0 @@ -378,9 +398,9 @@ function OptimizationBase.instantiate_function( end if f.hv === nothing - extras_hvp = prepare_hvp(_f, soadtype, x, rand(size(x))) + extras_hvp = prepare_hvp(_f, soadtype.dense_ad, x, rand(size(x))) hv = function (θ, v, args...) - hvp(_f, soadtype, θ, v, extras_hvp) + hvp(_f, soadtype.dense_ad, θ, v, extras_hvp) end else hv = f.hv diff --git a/src/function.jl b/src/function.jl index 53ab75e..257d680 100644 --- a/src/function.jl +++ b/src/function.jl @@ -43,6 +43,63 @@ function that is not defined, an error is thrown. For more information on the use of automatic differentiation, see the documentation of the `AbstractADType` types. """ +function instantiate_function(f::OptimizationFunction{true}, x, ::SciMLBase.NoAD, + p, num_cons = 0) + grad = f.grad === nothing ? nothing : (G, x, args...) -> f.grad(G, x, p, args...) + hess = f.hess === nothing ? nothing : (H, x, args...) -> f.hess(H, x, p, args...) + hv = f.hv === nothing ? nothing : (H, x, v, args...) -> f.hv(H, x, v, p, args...) + cons = f.cons === nothing ? nothing : (res, x) -> f.cons(res, x, p) + cons_j = f.cons_j === nothing ? nothing : (res, x) -> f.cons_j(res, x, p) + cons_h = f.cons_h === nothing ? nothing : (res, x) -> f.cons_h(res, x, p) + hess_prototype = f.hess_prototype === nothing ? nothing : + convert.(eltype(x), f.hess_prototype) + cons_jac_prototype = f.cons_jac_prototype === nothing ? nothing : + convert.(eltype(x), f.cons_jac_prototype) + cons_hess_prototype = f.cons_hess_prototype === nothing ? nothing : + [convert.(eltype(x), f.cons_hess_prototype[i]) + for i in 1:num_cons] + expr = symbolify(f.expr) + cons_expr = symbolify.(f.cons_expr) + + return OptimizationFunction{true}(f.f, SciMLBase.NoAD(); grad = grad, hess = hess, + hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_prototype, + cons_jac_prototype = cons_jac_prototype, + cons_hess_prototype = cons_hess_prototype, + expr = expr, cons_expr = cons_expr, + sys = f.sys, + observed = f.observed) +end + +function instantiate_function(f::OptimizationFunction{true}, cache::ReInitCache, ::SciMLBase.NoAD, + num_cons = 0) + grad = f.grad === nothing ? nothing : (G, x, args...) -> f.grad(G, x, cache.p, args...) + hess = f.hess === nothing ? nothing : (H, x, args...) -> f.hess(H, x, cache.p, args...) + hv = f.hv === nothing ? nothing : (H, x, v, args...) -> f.hv(H, x, v, cache.p, args...) + cons = f.cons === nothing ? nothing : (res, x) -> f.cons(res, x, cache.p) + cons_j = f.cons_j === nothing ? nothing : (res, x) -> f.cons_j(res, x, cache.p) + cons_h = f.cons_h === nothing ? nothing : (res, x) -> f.cons_h(res, x, cache.p) + hess_prototype = f.hess_prototype === nothing ? nothing : + convert.(eltype(cache.u0), f.hess_prototype) + cons_jac_prototype = f.cons_jac_prototype === nothing ? nothing : + convert.(eltype(cache.u0), f.cons_jac_prototype) + cons_hess_prototype = f.cons_hess_prototype === nothing ? nothing : + [convert.(eltype(cache.u0), f.cons_hess_prototype[i]) + for i in 1:num_cons] + expr = symbolify(f.expr) + cons_expr = symbolify.(f.cons_expr) + + return OptimizationFunction{true}(f.f, SciMLBase.NoAD(); grad = grad, hess = hess, + hv = hv, + cons = cons, cons_j = cons_j, cons_h = cons_h, + hess_prototype = hess_prototype, + cons_jac_prototype = cons_jac_prototype, + cons_hess_prototype = cons_hess_prototype, + expr = expr, cons_expr = cons_expr, + sys = f.sys, + observed = f.observed) +end function instantiate_function(f::OptimizationFunction, x, adtype::ADTypes.AbstractADType, p, num_cons = 0)