@@ -161,3 +161,103 @@ sol = solve(prob, Optim.KrylovTrustRegion())
161
161
162
162
sol = solve (prob, Optimisers. ADAM (0.1 ), maxiters= 1000 )
163
163
@test 10 * sol. minimum < l1
164
+
165
+ # Test new constraints
166
+ cons = (x, p) -> [x[1 ]^ 2 + x[2 ]^ 2 ]
167
+ optf = OptimizationFunction (rosenbrock, Optimization. AutoFiniteDiff (), cons= cons)
168
+ optprob = Optimization. instantiate_function (optf, x0, Optimization. AutoFiniteDiff (), nothing , 1 )
169
+ optprob. grad (G2, x0)
170
+ @test G1 ≈ G2 rtol = 1e-6
171
+ optprob. hess (H2, x0)
172
+ @test H1 ≈ H2 rtol = 1e-6
173
+ @test optprob. cons (x0) == [0.0 ]
174
+ @test optprob. cons ([1.0 , 4.0 ]) == [17.0 ]
175
+ J = zeros (1 , 2 )
176
+ optprob. cons_j (J, [5.0 , 3.0 ])
177
+ @test J ≈ [10.0 6.0 ]
178
+ H3 = [Array {Float64} (undef, 2 , 2 )]
179
+ optprob. cons_h (H3, x0)
180
+ @test H3 ≈ [[2.0 0.0 ; 0.0 2.0 ]]
181
+
182
+ cons_jac_proto = Float64 .(sparse ([1 1 ])) # Things break if you only use [1 1]; see FiniteDiff.jl
183
+ cons_jac_colors = 1 : 2
184
+ optf = OptimizationFunction (rosenbrock, Optimization. AutoFiniteDiff (), cons= cons, cons_jac_prototype = cons_jac_proto, cons_jac_colorvec = cons_jac_colors)
185
+ optprob = Optimization. instantiate_function (optf, x0, Optimization. AutoFiniteDiff (), nothing , 1 )
186
+ @test optprob. cons_jac_prototype == sparse ([1.0 1.0 ]) # make sure it's still using it
187
+ @test optprob. cons_jac_colorvec == 1 : 2
188
+ J = zeros (1 , 2 )
189
+ optprob. cons_j (J, [5.0 , 3.0 ])
190
+ @test J ≈ [10.0 6.0 ]
191
+
192
+ function con2_c (x, p)
193
+ [x[1 ]^ 2 + x[2 ]^ 2 , x[2 ] * sin (x[1 ]) - x[1 ]]
194
+ end
195
+ optf = OptimizationFunction (rosenbrock, Optimization. AutoFiniteDiff (), cons= con2_c)
196
+ optprob = Optimization. instantiate_function (optf, x0, Optimization. AutoFiniteDiff (), nothing , 2 )
197
+ optprob. grad (G2, x0)
198
+ @test G1 ≈ G2 rtol = 1e-6
199
+ optprob. hess (H2, x0)
200
+ @test H1 ≈ H2 rtol = 1e-6
201
+ @test optprob. cons (x0) == [0.0 , 0.0 ]
202
+ @test optprob. cons ([1.0 , 2.0 ]) ≈ [5.0 , 0.682941969615793 ]
203
+ J = Array {Float64} (undef, 2 , 2 )
204
+ optprob. cons_j (J, [5.0 , 3.0 ])
205
+ @test all (isapprox (J, [10.0 6.0 ; - 0.149013 - 0.958924 ]; rtol= 1e-3 ))
206
+ H3 = [Array {Float64} (undef, 2 , 2 ), Array {Float64} (undef, 2 , 2 )]
207
+ optprob. cons_h (H3, x0)
208
+ @test H3 ≈ [[2.0 0.0 ; 0.0 2.0 ], [- 0.0 1.0 ; 1.0 0.0 ]]
209
+
210
+ cons_jac_proto = Float64 .(sparse ([1 1 ; 1 1 ]))
211
+ cons_jac_colors = 1 : 2
212
+ optf = OptimizationFunction (rosenbrock, Optimization. AutoFiniteDiff (), cons= con2_c, cons_jac_prototype = cons_jac_proto, cons_jac_colorvec = cons_jac_colors)
213
+ optprob = Optimization. instantiate_function (optf, x0, Optimization. AutoFiniteDiff (), nothing , 2 )
214
+ @test optprob. cons_jac_prototype == sparse ([1.0 1.0 ; 1.0 1.0 ]) # make sure it's still using it
215
+ @test optprob. cons_jac_colorvec == 1 : 2
216
+ J = Array {Float64} (undef, 2 , 2 )
217
+ optprob. cons_j (J, [5.0 , 3.0 ])
218
+ @test all (isapprox (J, [10.0 6.0 ; - 0.149013 - 0.958924 ]; rtol= 1e-3 ))
219
+
220
+ # Can we solve problems? Using AutoForwardDiff to test since we know that works
221
+ for consf in [cons, con2_c]
222
+ optf1 = OptimizationFunction (rosenbrock, Optimization. AutoFiniteDiff (); cons = consf)
223
+ prob1 = OptimizationProblem (optf1, [0.3 , 0.5 ], lb = [0.2 , 0.4 ], ub = [0.6 , 0.8 ])
224
+ sol1 = solve (prob1,BFGS ())
225
+ optf2 = OptimizationFunction (rosenbrock, Optimization. AutoForwardDiff (); cons = consf)
226
+ prob2 = OptimizationProblem (optf2, [0.3 , 0.5 ], lb = [0.2 , 0.4 ], ub = [0.6 , 0.8 ])
227
+ sol2 = solve (prob2,BFGS ())
228
+ @test sol1. minimum ≈ sol2. minimum
229
+ @test sol1. u ≈ sol2. u
230
+
231
+ optf1 = OptimizationFunction (rosenbrock, Optimization. AutoFiniteDiff (); cons = consf)
232
+ lcons = consf == cons ? [0.2 ] : [0.2 , - 0.81 ]
233
+ ucons = consf == cons ? [0.55 ] : [0.55 , - 0.1 ]
234
+ prob1 = OptimizationProblem (optf1, [0.3 , 0.5 ], lb = [0.2 , 0.4 ], ub = [0.6 , 0.8 ], lcons = lcons, ucons = ucons)
235
+ sol1 = solve (prob1,Optim. SAMIN (), maxiters = 10000 ) # a lot of iterations... doesn't even converge actually
236
+ optf2 = OptimizationFunction (rosenbrock, Optimization. AutoForwardDiff (); cons = consf)
237
+ prob2 = OptimizationProblem (optf2, [0.3 , 0.5 ], lb = [0.2 , 0.4 ], ub = [0.6 , 0.8 ], lcons = lcons, ucons = ucons)
238
+ sol2 = solve (prob2,Optim. SAMIN (), maxiters = 10000 )
239
+ @test sol1. minimum ≈ sol2. minimum rtol = 1e-4
240
+ @test sol1. u ≈ sol2. u
241
+ @test lcons[1 ] ≤ consf (sol1. u, nothing )[1 ] ≤ ucons[1 ]
242
+ if consf == con2_c
243
+ @test lcons[2 ] ≤ consf (sol1. u, nothing )[2 ] ≤ ucons[2 ]
244
+ end
245
+
246
+ # --- These equality constraints are so fiddly. Can't get it to pass with consf(sol1.u, nothing)[1] ≈ lcons[1] rtol = 0.1 being true
247
+ # (I can get sol1.minimum ≈ sol2.minimum and sol1.u ≈ sol2.u, though, just not the constraint - or I can get the constraint and not
248
+ # sol1.minimum ≈ sol2.minimum, sol1.u ≈ sol2.u)
249
+ lcons = consf == cons ? [0.2 ] : [0.2 , 0.5 ]
250
+ ucons = consf == cons ? [0.2 ] : [0.2 , 0.5 ]
251
+ optf1 = OptimizationFunction (rosenbrock, Optimization. AutoFiniteDiff (); cons = consf)
252
+ prob1 = OptimizationProblem (optf1, [0.5 , 0.5 ], lcons = lcons, ucons = ucons)
253
+ sol1 = solve (prob1,Optim. IPNewton ())
254
+ optf2 = OptimizationFunction (rosenbrock, Optimization. AutoForwardDiff (); cons = consf)
255
+ prob2 = OptimizationProblem (optf2, [0.5 , 0.5 ], lcons = lcons, ucons = ucons)
256
+ sol2 = solve (prob2,Optim. IPNewton ())
257
+ @test_broken sol1. minimum ≈ sol2. minimum
258
+ @test_broken sol1. u ≈ sol2. u
259
+ @test consf (sol1. u, nothing )[1 ] ≈ lcons[1 ] rtol = 0.1
260
+ if consf == con2_c
261
+ @test_broken consf (sol1. u, nothing )[2 ] ≈ lcons[2 ]
262
+ end
263
+ end
0 commit comments