32
32
from botorch .acquisition .decoupled import DecoupledAcquisitionFunction
33
33
from botorch .acquisition .knowledge_gradient import ProjectedAcquisitionFunction
34
34
from botorch .acquisition .multi_objective .base import MultiObjectiveMCAcquisitionFunction
35
+ from botorch .acquisition .multi_objective .logei import qLogExpectedHypervolumeImprovement
35
36
from botorch .acquisition .multi_objective .monte_carlo import (
36
37
qExpectedHypervolumeImprovement ,
37
38
)
38
39
from botorch .acquisition .multi_objective .objective import MCMultiOutputObjective
39
- from botorch .exceptions .errors import UnsupportedError
40
+ from botorch .exceptions .errors import BotorchError , UnsupportedError
40
41
from botorch .exceptions .warnings import NumericsWarning
41
42
from botorch .models .deterministic import PosteriorMeanModel
42
43
from botorch .models .model import Model
47
48
from botorch .utils .multi_objective .box_decompositions .non_dominated import (
48
49
FastNondominatedPartitioning ,
49
50
)
51
+ from botorch .utils .safe_math import logdiffexp , logmeanexp
50
52
from botorch .utils .transforms import (
51
53
average_over_ensemble_models ,
52
54
match_batch_shape ,
@@ -91,6 +93,7 @@ def __init__(
91
93
current_value : Tensor | None = None ,
92
94
use_posterior_mean : bool = True ,
93
95
cost_aware_utility : CostAwareUtility | None = None ,
96
+ log : bool = False ,
94
97
) -> None :
95
98
r"""q-Hypervolume Knowledge Gradient.
96
99
@@ -133,6 +136,9 @@ def __init__(
133
136
[Daulton2023hvkg]_ for details.
134
137
cost_aware_utility: A CostAwareUtility specifying the cost function for
135
138
evaluating the `X` on the objectives indicated by `evaluation_mask`.
139
+ log: If True, then returns the log of the HVKG value. If True, then it
140
+ expects current_value to be in log-space and cost_aware_utility to
141
+ output log utilities.
136
142
"""
137
143
if sampler is None :
138
144
# base samples should be fixed for joint optimization over X, X_fantasies
@@ -170,6 +176,8 @@ def __init__(
170
176
self .cost_aware_utility = cost_aware_utility
171
177
self ._cost_sampler = None
172
178
179
+ self ._log = log
180
+
173
181
@property
174
182
def cost_sampler (self ):
175
183
if self ._cost_sampler is None :
@@ -242,6 +250,7 @@ def forward(self, X: Tensor) -> Tensor:
242
250
objective = self .objective ,
243
251
sampler = self .inner_sampler ,
244
252
use_posterior_mean = self .use_posterior_mean ,
253
+ log = self ._log ,
245
254
)
246
255
247
256
# make sure to propagate gradients to the fantasy model train inputs
@@ -259,9 +268,23 @@ def forward(self, X: Tensor) -> Tensor:
259
268
values = value_function (X = X_fantasies .reshape (shape )) # num_fantasies x b
260
269
261
270
if self .current_value is not None :
262
- values = values - self .current_value
271
+ if self ._log :
272
+ values = logdiffexp (self .current_value , values )
273
+ else :
274
+ values = values - self .current_value
263
275
264
276
if self .cost_aware_utility is not None :
277
+ if self ._log :
278
+ # check whether cost_aware_utility has a _log flag
279
+ # raises an error if it does not or if _log is False
280
+ if (
281
+ not hasattr (self .cost_aware_utility , "_log" )
282
+ or not self .cost_aware_utility ._log
283
+ ):
284
+ raise BotorchError (
285
+ "Cost-aware HVKG has _log=True and requires cost_aware_utility"
286
+ "to output log utilities."
287
+ )
265
288
values = self .cost_aware_utility (
266
289
# exclude pending points
267
290
X = X_actual [..., :q , :],
@@ -271,7 +294,10 @@ def forward(self, X: Tensor) -> Tensor:
271
294
)
272
295
273
296
# return average over the fantasy samples
274
- return values .mean (dim = 0 )
297
+ if self ._log :
298
+ return logmeanexp (values , dim = 0 )
299
+ else :
300
+ return values .mean (dim = 0 )
275
301
276
302
def get_augmented_q_batch_size (self , q : int ) -> int :
277
303
r"""Get augmented q batch size for one-shot optimization.
@@ -329,6 +355,7 @@ def __init__(
329
355
valfunc_cls : type [AcquisitionFunction ] | None = None ,
330
356
valfunc_argfac : Callable [[Model ], dict [str , Any ]] | None = None ,
331
357
use_posterior_mean : bool = True ,
358
+ log : bool = False ,
332
359
** kwargs : Any ,
333
360
) -> None :
334
361
r"""Multi-Fidelity q-Knowledge Gradient (one-shot optimization).
@@ -376,6 +403,9 @@ def __init__(
376
403
valfunc_argfac: An argument factory, i.e. callable that maps a `Model`
377
404
to a dictionary of kwargs for the terminal value function (e.g.
378
405
`best_f` for `ExpectedImprovement`).
406
+ log: If True, then returns the log of the HVKG value. If True, then it
407
+ expects current_value to be in log-space and cost_aware_utility to
408
+ output log utilities.
379
409
"""
380
410
381
411
super ().__init__ (
@@ -392,6 +422,7 @@ def __init__(
392
422
current_value = current_value ,
393
423
use_posterior_mean = use_posterior_mean ,
394
424
cost_aware_utility = cost_aware_utility ,
425
+ log = log ,
395
426
)
396
427
self .project = project
397
428
if kwargs .get ("expand" ) is not None :
@@ -465,6 +496,7 @@ def forward(self, X: Tensor) -> Tensor:
465
496
valfunc_cls = self .valfunc_cls ,
466
497
valfunc_argfac = self .valfunc_argfac ,
467
498
use_posterior_mean = self .use_posterior_mean ,
499
+ log = self ._log ,
468
500
)
469
501
470
502
# make sure to propagate gradients to the fantasy model train inputs
@@ -481,9 +513,24 @@ def forward(self, X: Tensor) -> Tensor:
481
513
)
482
514
values = value_function (X = X_fantasies .reshape (shape )) # num_fantasies x b
483
515
if self .current_value is not None :
484
- values = values - self .current_value
516
+ if self ._log :
517
+ # Assumes current value is in log-space
518
+ values = logdiffexp (self .current_value , values )
519
+ else :
520
+ values = values - self .current_value
485
521
486
522
if self .cost_aware_utility is not None :
523
+ if self ._log :
524
+ # check whether cost_aware_utility has a _log flag
525
+ # raises an error if it does not or if _log is False
526
+ if (
527
+ not hasattr (self .cost_aware_utility , "_log" )
528
+ or not self .cost_aware_utility ._log
529
+ ):
530
+ raise BotorchError (
531
+ "Cost-aware HVKG has _log=True and requires cost_aware_utility"
532
+ "to output log utilities."
533
+ )
487
534
values = self .cost_aware_utility (
488
535
# exclude pending points
489
536
X = X_actual [..., :q , :],
@@ -493,7 +540,7 @@ def forward(self, X: Tensor) -> Tensor:
493
540
)
494
541
495
542
# return average over the fantasy samples
496
- return values .mean (dim = 0 )
543
+ return logmeanexp ( values , dim = 0 ) if self . _log else values .mean (dim = 0 )
497
544
498
545
499
546
def _get_hv_value_function (
@@ -505,6 +552,7 @@ def _get_hv_value_function(
505
552
valfunc_cls : type [AcquisitionFunction ] | None = None ,
506
553
valfunc_argfac : Callable [[Model ], dict [str , Any ]] | None = None ,
507
554
use_posterior_mean : bool = False ,
555
+ log : bool = False ,
508
556
) -> AcquisitionFunction :
509
557
r"""Construct value function (i.e. inner acquisition function).
510
558
This is a method for computing hypervolume.
@@ -518,7 +566,13 @@ def _get_hv_value_function(
518
566
action = "ignore" ,
519
567
category = NumericsWarning ,
520
568
)
521
- base_value_function = qExpectedHypervolumeImprovement (
569
+
570
+ base_value_function_class = (
571
+ qLogExpectedHypervolumeImprovement
572
+ if log
573
+ else qExpectedHypervolumeImprovement
574
+ )
575
+ base_value_function = base_value_function_class (
522
576
model = model ,
523
577
ref_point = ref_point ,
524
578
partitioning = FastNondominatedPartitioning (
0 commit comments