@@ -13,7 +13,7 @@ for common workflows of the package and give copy-pastable starting points.
13
13
14
14
``` @example rosenbrock
15
15
# Define the problem to solve
16
- using Optimization, ForwardDiff, Zygote, Test, Random
16
+ using Optimization, ForwardDiff, Zygote
17
17
18
18
rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
19
19
x0 = zeros(2)
@@ -33,14 +33,14 @@ sol = solve(prob, SimulatedAnnealing())
33
33
prob = OptimizationProblem(f, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8])
34
34
sol = solve(prob, SAMIN())
35
35
36
- l1 = rosenbrock(x0)
37
- prob = OptimizationProblem(rosenbrock, x0)
36
+ l1 = rosenbrock(x0, _p )
37
+ prob = OptimizationProblem(rosenbrock, x0, _p )
38
38
sol = solve(prob, NelderMead())
39
39
40
40
# Now a gradient-based optimizer with forward-mode automatic differentiation
41
41
42
- optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons)
43
- prob = OptimizationProblem(optf, x0)
42
+ optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
43
+ prob = OptimizationProblem(optf, x0, _p)
44
44
sol = solve(prob, BFGS())
45
45
46
46
# Now a second order optimizer using Hessians generated by forward-mode automatic differentiation
@@ -53,39 +53,44 @@ sol = solve(prob, Optim.KrylovTrustRegion())
53
53
54
54
# Now derivative-based optimizers with various constraints
55
55
56
- cons = (x,p) -> [x[1]^2 + x[2]^2]
57
- optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons)
58
- prob = OptimizationProblem(optf, x0)
59
- sol = solve(prob, IPNewton())
56
+ cons = (x,p) -> [x[1]^2 + x[2]^2]
57
+ optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons)
58
+ # prob = OptimizationProblem(optf, x0, _p )
59
+ # sol = solve(prob, IPNewton()) # No lcons or rcons, so constraints not satisfied
60
60
61
- prob = OptimizationProblem(optf, x0, lcons = [-Inf], ucons = [Inf])
62
- sol = solve(prob, IPNewton())
61
+ prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf])
62
+ sol = solve(prob, IPNewton()) # Note that -Inf < x[1]^2 + x[2]^2 < Inf is always true
63
63
64
- prob = OptimizationProblem(optf, x0, lcons = [-5.0], ucons = [10.0])
65
- sol = solve(prob, IPNewton())
64
+ prob = OptimizationProblem(optf, x0, _p, lcons = [-5.0], ucons = [10.0])
65
+ sol = solve(prob, IPNewton()) # Again, -5.0 < x[1]^2 + x[2]^2 < 10.0
66
66
67
- prob = OptimizationProblem(optf, x0, lcons = [-Inf], ucons = [Inf],
67
+ prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf],
68
68
lb = [-500.0,-500.0], ub=[50.0,50.0])
69
69
sol = solve(prob, IPNewton())
70
70
71
+ prob = OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5],
72
+ lb = [-500.0,-500.0], ub=[50.0,50.0])
73
+ sol = solve(prob, IPNewton()) # Notice now that x[1]^2 + x[2]^2 ≈ 0.5:
74
+ # cons(sol.minimizer, _p) = 0.49999999999999994
75
+
71
76
function con2_c(x,p)
72
77
[x[1]^2 + x[2]^2, x[2]*sin(x[1])-x[1]]
73
78
end
74
79
75
80
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= con2_c)
76
- prob = OptimizationProblem(optf, x0, lcons = [-Inf,-Inf], ucons = [Inf,Inf])
81
+ prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf,-Inf], ucons = [Inf,Inf])
77
82
sol = solve(prob, IPNewton())
78
83
79
84
cons_circ = (x,p) -> [x[1]^2 + x[2]^2]
80
85
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons_circ)
81
- prob = OptimizationProblem(optf, x0, lcons = [-Inf], ucons = [0.25^2])
82
- sol = solve(prob, IPNewton())
86
+ prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [0.25^2])
87
+ sol = solve(prob, IPNewton()) # -Inf < cons_circ(sol.minimizer, _p) = 0.25^2
83
88
84
89
# Now let's switch over to OptimizationOptimisers with reverse-mode AD
85
90
86
91
using OptimizationOptimisers
87
92
optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote())
88
- prob = OptimizationProblem(optf, x0)
93
+ prob = OptimizationProblem(optf, x0, _p )
89
94
sol = solve(prob, Adam(0.05), maxiters = 1000, progress = false)
90
95
91
96
## Try out CMAEvolutionStrategy.jl's evolutionary methods
@@ -97,27 +102,27 @@ sol = solve(prob, CMAEvolutionStrategyOpt())
97
102
98
103
using OptimizationNLopt, ModelingToolkit
99
104
optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit())
100
- prob = OptimizationProblem(optf, x0)
105
+ prob = OptimizationProblem(optf, x0, _p )
101
106
102
107
sol = solve(prob, Opt(:LN_BOBYQA, 2))
103
108
sol = solve(prob, Opt(:LD_LBFGS, 2))
104
109
105
110
## Add some box constarints and solve with a few NLopt.jl methods
106
111
107
- prob = OptimizationProblem(optf, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8])
112
+ prob = OptimizationProblem(optf, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8])
108
113
sol = solve(prob, Opt(:LD_LBFGS, 2))
109
- sol = solve(prob, Opt(:G_MLSL_LDS, 2), nstart=2, local_method = Opt(:LD_LBFGS, 2), maxiters=10000)
114
+ # sol = solve(prob, Opt(:G_MLSL_LDS, 2), nstart=2, local_method = Opt(:LD_LBFGS, 2), maxiters=10000)
110
115
111
116
## Evolutionary.jl Solvers
112
117
113
118
using OptimizationEvolutionary
114
- sol = solve(prob, CMAES(μ =40 , λ = 100),abstol=1e-15)
119
+ sol = solve(prob, CMAES(μ =40 , λ = 100),abstol=1e-15) # -1.0 ≤ x[1], x[2] ≤ 0.8
115
120
116
121
## BlackBoxOptim.jl Solvers
117
122
118
123
using OptimizationBBO
119
- prob = Optimization.OptimizationProblem(rosenbrock, x0, lb=[-1.0, -1.0 ], ub=[0.8, 0.8 ])
120
- sol = solve(prob, BBO ())
124
+ prob = Optimization.OptimizationProblem(rosenbrock, x0, _p, lb=[-1.0, 0.2 ], ub=[0.8, 0.43 ])
125
+ sol = solve(prob, BBO_adaptive_de_rand_1_bin ()) # -1.0 ≤ x[1] ≤ 0.8, 0.2 ≤ x[2] ≤ 0.43
121
126
```
122
127
123
- And this is only a small subset of what Optimization.jl has to offer!
128
+ And this is only a small subset of what Optimization.jl has to offer!
0 commit comments