Skip to content

Commit a9be183

Browse files
Merge pull request #315 from DanielVandH/patch-1
Cleanup Rosenbrock tutorial
2 parents 49b7ea5 + 9b6ed39 commit a9be183

File tree

1 file changed

+30
-25
lines changed

1 file changed

+30
-25
lines changed

docs/src/tutorials/rosenbrock.md

Lines changed: 30 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ for common workflows of the package and give copy-pastable starting points.
1313

1414
```@example rosenbrock
1515
# Define the problem to solve
16-
using Optimization, ForwardDiff, Zygote, Test, Random
16+
using Optimization, ForwardDiff, Zygote
1717
1818
rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
1919
x0 = zeros(2)
@@ -33,14 +33,14 @@ sol = solve(prob, SimulatedAnnealing())
3333
prob = OptimizationProblem(f, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8])
3434
sol = solve(prob, SAMIN())
3535
36-
l1 = rosenbrock(x0)
37-
prob = OptimizationProblem(rosenbrock, x0)
36+
l1 = rosenbrock(x0, _p)
37+
prob = OptimizationProblem(rosenbrock, x0, _p)
3838
sol = solve(prob, NelderMead())
3939
4040
# Now a gradient-based optimizer with forward-mode automatic differentiation
4141
42-
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons)
43-
prob = OptimizationProblem(optf, x0)
42+
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
43+
prob = OptimizationProblem(optf, x0, _p)
4444
sol = solve(prob, BFGS())
4545
4646
# Now a second order optimizer using Hessians generated by forward-mode automatic differentiation
@@ -53,39 +53,44 @@ sol = solve(prob, Optim.KrylovTrustRegion())
5353
5454
# Now derivative-based optimizers with various constraints
5555
56-
cons = (x,p) -> [x[1]^2 + x[2]^2]
57-
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons)
58-
prob = OptimizationProblem(optf, x0)
59-
sol = solve(prob, IPNewton())
56+
cons = (x,p) -> [x[1]^2 + x[2]^2]
57+
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons)
58+
#prob = OptimizationProblem(optf, x0, _p)
59+
#sol = solve(prob, IPNewton()) # No lcons or rcons, so constraints not satisfied
6060
61-
prob = OptimizationProblem(optf, x0, lcons = [-Inf], ucons = [Inf])
62-
sol = solve(prob, IPNewton())
61+
prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf])
62+
sol = solve(prob, IPNewton()) # Note that -Inf < x[1]^2 + x[2]^2 < Inf is always true
6363
64-
prob = OptimizationProblem(optf, x0, lcons = [-5.0], ucons = [10.0])
65-
sol = solve(prob, IPNewton())
64+
prob = OptimizationProblem(optf, x0, _p, lcons = [-5.0], ucons = [10.0])
65+
sol = solve(prob, IPNewton()) # Again, -5.0 < x[1]^2 + x[2]^2 < 10.0
6666
67-
prob = OptimizationProblem(optf, x0, lcons = [-Inf], ucons = [Inf],
67+
prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf],
6868
lb = [-500.0,-500.0], ub=[50.0,50.0])
6969
sol = solve(prob, IPNewton())
7070
71+
prob = OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5],
72+
lb = [-500.0,-500.0], ub=[50.0,50.0])
73+
sol = solve(prob, IPNewton()) # Notice now that x[1]^2 + x[2]^2 ≈ 0.5:
74+
# cons(sol.minimizer, _p) = 0.49999999999999994
75+
7176
function con2_c(x,p)
7277
[x[1]^2 + x[2]^2, x[2]*sin(x[1])-x[1]]
7378
end
7479
7580
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= con2_c)
76-
prob = OptimizationProblem(optf, x0, lcons = [-Inf,-Inf], ucons = [Inf,Inf])
81+
prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf,-Inf], ucons = [Inf,Inf])
7782
sol = solve(prob, IPNewton())
7883
7984
cons_circ = (x,p) -> [x[1]^2 + x[2]^2]
8085
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff();cons= cons_circ)
81-
prob = OptimizationProblem(optf, x0, lcons = [-Inf], ucons = [0.25^2])
82-
sol = solve(prob, IPNewton())
86+
prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [0.25^2])
87+
sol = solve(prob, IPNewton()) # -Inf < cons_circ(sol.minimizer, _p) = 0.25^2
8388
8489
# Now let's switch over to OptimizationOptimisers with reverse-mode AD
8590
8691
using OptimizationOptimisers
8792
optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote())
88-
prob = OptimizationProblem(optf, x0)
93+
prob = OptimizationProblem(optf, x0, _p)
8994
sol = solve(prob, Adam(0.05), maxiters = 1000, progress = false)
9095
9196
## Try out CMAEvolutionStrategy.jl's evolutionary methods
@@ -97,27 +102,27 @@ sol = solve(prob, CMAEvolutionStrategyOpt())
97102
98103
using OptimizationNLopt, ModelingToolkit
99104
optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit())
100-
prob = OptimizationProblem(optf, x0)
105+
prob = OptimizationProblem(optf, x0, _p)
101106
102107
sol = solve(prob, Opt(:LN_BOBYQA, 2))
103108
sol = solve(prob, Opt(:LD_LBFGS, 2))
104109
105110
## Add some box constarints and solve with a few NLopt.jl methods
106111
107-
prob = OptimizationProblem(optf, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8])
112+
prob = OptimizationProblem(optf, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8])
108113
sol = solve(prob, Opt(:LD_LBFGS, 2))
109-
sol = solve(prob, Opt(:G_MLSL_LDS, 2), nstart=2, local_method = Opt(:LD_LBFGS, 2), maxiters=10000)
114+
# sol = solve(prob, Opt(:G_MLSL_LDS, 2), nstart=2, local_method = Opt(:LD_LBFGS, 2), maxiters=10000)
110115
111116
## Evolutionary.jl Solvers
112117
113118
using OptimizationEvolutionary
114-
sol = solve(prob, CMAES(μ =40 , λ = 100),abstol=1e-15)
119+
sol = solve(prob, CMAES(μ =40 , λ = 100),abstol=1e-15) # -1.0 ≤ x[1], x[2] ≤ 0.8
115120
116121
## BlackBoxOptim.jl Solvers
117122
118123
using OptimizationBBO
119-
prob = Optimization.OptimizationProblem(rosenbrock, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8])
120-
sol = solve(prob, BBO())
124+
prob = Optimization.OptimizationProblem(rosenbrock, x0, _p, lb=[-1.0, 0.2], ub=[0.8, 0.43])
125+
sol = solve(prob, BBO_adaptive_de_rand_1_bin()) # -1.0 ≤ x[1] ≤ 0.8, 0.2 ≤ x[2] ≤ 0.43
121126
```
122127

123-
And this is only a small subset of what Optimization.jl has to offer!
128+
And this is only a small subset of what Optimization.jl has to offer!

0 commit comments

Comments
 (0)