Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "AutoDiffOperators"
uuid = "6e1301d5-4f4d-4fb5-9679-7191e22f0e0e"
version = "0.1.6"
version = "0.1.7"

[deps]
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
Expand Down
2 changes: 1 addition & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a"

[compat]
Documenter = "~0.27"
Documenter = "1"
2 changes: 1 addition & 1 deletion docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ makedocs(
],
doctest = ("fixdoctests" in ARGS) ? :fix : true,
linkcheck = !("nonstrict" in ARGS),
strict = !("nonstrict" in ARGS),
warnonly = ("nonstrict" in ARGS),
)

deploydocs(
Expand Down
31 changes: 30 additions & 1 deletion ext/AutoDiffOperatorsForwardDiffExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,36 @@ end


# ToDo: Use AD parameters
AutoDiffOperators.with_gradient(f, x::AbstractVector{<:Real}, ad::ForwardDiffAD) = f(x), ForwardDiff.gradient(f, x)
function AutoDiffOperators.with_gradient(f, x::AbstractVector{<:Real}, ad::ForwardDiffAD)
T = typeof(x)
U = Core.Compiler.return_type(f, Tuple{typeof(x)})
y = f(x)
R = promote_type(eltype(x), eltype(y))
n_y, n_x = length(y), length(x)
dy = similar(x, R)
dy .= ForwardDiff.gradient(f, x)
return y, dy
end


function AutoDiffOperators.only_gradient(f, x, ad::ForwardDiffAD)
T = eltype(x)
U = Core.Compiler.return_type(f, Tuple{typeof(x)})
R = promote_type(T, U)
_only_gradient_impl(f, x, ad, R)
end

function _only_gradient_impl(f, x, ad::ForwardDiffAD, ::Type{R}) where {R <: Real}
dy = similar(x, R)
dy .= ForwardDiff.gradient(f, x)
return dy
end

function _only_gradient_impl(f, x, ad::ForwardDiffAD, ::Type)
return ForwardDiff.gradient(f, x)
end



# ToDo: Specialize `AutoDiffOperators.with_gradient!!(f, δx, x, ad::ForwardDiffAD)`

Expand Down
15 changes: 14 additions & 1 deletion src/gradient.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,19 @@ export with_gradient!!
with_gradient!!(f, @nospecialize(δx), x, ad::ADSelector) = with_gradient(f, x, ad::ADSelector)


"""
only_gradient(f, x, ad::ADSelector)

Returns the gradient ∇f(x) of `f` at `x`.

See also [`with_gradient(f, x, ad)`](@ref).
"""
function only_gradient end
export only_gradient

only_gradient(f, x, ad::ADSelector) = with_gradient(f, x, ad)[2]



struct _ValGradFunc{F,AD} <: Function
f::F
Expand Down Expand Up @@ -72,7 +85,7 @@ struct _GenericGradientFunc{F,AD} <: Function
end
_GenericGradientFunc(::Type{FT}, ad::AD) where {FT,AD<:ADSelector} = _GenericGradientFunc{Type{FT},AD}(FT, ad)

(f::_GenericGradientFunc)(x) = with_gradient(f.f, x, f.ad)[2]
(f::_GenericGradientFunc)(x) = only_gradient(f.f, x, f.ad)

"""
gradient_func(f, ad::ADSelector)
Expand Down
2 changes: 1 addition & 1 deletion test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
Documenter = "~0.27"
Documenter = "1"
2 changes: 2 additions & 0 deletions test/testutils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ function test_adsel_functionality(ad::ADSelector)
@test_deprecated jacobian_matrix(f, x, ad) ≈ J_f_ref
@test with_gradient(g, x, ad)[1] ≈ y_g_ref
@test with_gradient(g, x, ad)[2] ≈ grad_g_x_ref
@test only_gradient(g, x, ad) ≈ grad_g_x_ref

let δx = similar(x)
fill!(δx, NaN)
Expand All @@ -57,6 +58,7 @@ function test_adsel_functionality(ad::ADSelector)

if AutoDiffOperators.supports_structargs(reverse_ad_selector(ad))
@test approx_cmp(with_gradient(f_nv, x_nv, ad), (y_nv_ref, grad_nv_ref))
@test approx_cmp(only_gradient(f_nv, x_nv, ad), grad_nv_ref)
@test approx_cmp(valgrad_func(f_nv, ad)(x_nv), (y_nv_ref, grad_nv_ref))
@test approx_cmp(gradient_func(f_nv, ad)(x_nv), grad_nv_ref)
end
Expand Down