6
6
import functools
7
7
import logging
8
8
import warnings
9
- from typing import Callable , Dict , Iterable , List , Optional , Tuple , Union
9
+ from typing import Callable , Dict , Iterable , Optional , Tuple , Union
10
10
11
11
from gpytorch .models import ExactGP
12
12
13
- from blackboxopt import (
14
- ConstraintsError ,
15
- Evaluation ,
16
- EvaluationSpecification ,
13
+ from blackboxopt .base import (
17
14
Objective ,
18
15
OptimizerNotReady ,
19
- sort_evaluations ,
20
- )
21
- from blackboxopt .base import (
22
16
SingleObjectiveOptimizer ,
23
17
call_functions_with_evaluations_and_collect_errors ,
24
18
validate_objectives ,
25
19
)
20
+ from blackboxopt .evaluation import Evaluation , EvaluationSpecification
21
+ from blackboxopt .utils import sort_evaluations
26
22
27
23
try :
28
24
import numpy as np
34
30
from botorch .models .model import Model
35
31
from botorch .optim import optimize_acqf , optimize_acqf_discrete
36
32
from botorch .sampling .samplers import IIDNormalSampler
37
- from sklearn .impute import SimpleImputer
33
+
34
+ from blackboxopt .optimizers .botorch_utils import (
35
+ filter_y_nans ,
36
+ impute_nans_with_constant ,
37
+ to_numerical ,
38
+ )
38
39
39
40
except ImportError as e :
40
41
raise ImportError (
43
44
) from e
44
45
45
46
46
- def impute_nans_with_constant (x : torch .Tensor , c : float = - 1.0 ) -> torch .Tensor :
47
- """Impute `NaN` values with given constant value.
48
-
49
- Args:
50
- x: Input tensor of shape `n x d` or `b x n x d`.
51
- c: Constant used as fill value to replace `NaNs`.
52
-
53
- Returns:
54
- - x_i - `x` where all `NaN`s are replaced with given constant.
55
- """
56
- if x .numel () == 0 : # empty tensor, nothing to impute
57
- return x
58
- x_i = x .clone ()
59
-
60
- # cast n x d to 1 x n x d (cover non-batch case)
61
- if len (x .shape ) == 2 :
62
- x_i = x_i .reshape (torch .Size ((1 ,)) + x_i .shape )
63
-
64
- for b in range (x_i .shape [0 ]):
65
- x_1 = x_i [b , :, :]
66
- x_1 = torch .tensor (
67
- SimpleImputer (
68
- missing_values = np .nan , strategy = "constant" , fill_value = c
69
- ).fit_transform (x_1 ),
70
- dtype = x .dtype ,
71
- )
72
- x_i [b , :, :] = x_1
73
-
74
- # cast 1 x n x d back to n x d if originally non-batch
75
- if len (x .shape ) == 2 :
76
- x_i = x_i .reshape (x .shape )
77
- return x_i
78
-
79
-
80
- def to_numerical (
81
- evaluations : Iterable [Evaluation ],
82
- search_space : ps .ParameterSpace ,
83
- objective : Objective ,
84
- constraint_names : Optional [List [str ]] = None ,
85
- batch_shape : torch .Size = torch .Size (),
86
- torch_dtype : torch .dtype = torch .float32 ,
87
- ) -> Tuple [torch .Tensor , torch .Tensor ]:
88
- """Convert evaluations to one `(#batch, #evaluations, #parameters)` tensor
89
- containing the numerical representations of the configurations and
90
- one `(#batch, #evaluations, 1)` tensor containing the loss representation of
91
- the evaluations' objective value (flips the sign for objective value
92
- if `objective.greater_is_better=True`) and optionally constraints value.
93
-
94
- Args:
95
- evaluations: List of evaluations that were collected during optimization.
96
- search_space: Search space used during optimization.
97
- objective: Objective that was used for optimization.
98
- constraint_names: Name of constraints that are used for optimization.
99
- batch_shape: Batch dimension(s) used for batched models.
100
- torch_dtype: Type of returned tensors.
101
-
102
- Returns:
103
- - X: Numerical representation of the configurations
104
- - Y: Numerical representation of the objective values and optionally constraints
105
-
106
- Raises:
107
- ValueError: If one of configurations is not valid w.r.t. search space.
108
- ValueError: If one of configurations includes parameters that are not part of
109
- the search space.
110
- ConstraintError: If one of the constraint names is not defined in evaluations.
111
- """
112
- # validate configuration values and dimensions
113
- parameter_names = search_space .get_parameter_names () + list (
114
- search_space .get_constant_names ()
115
- )
116
- for e in evaluations :
117
- with warnings .catch_warnings ():
118
- # we already raise error if search space not valid, thus can ignore warnings
119
- warnings .filterwarnings (
120
- "ignore" , category = RuntimeWarning , message = "Parameter"
121
- )
122
- if not search_space .check_validity (e .configuration ):
123
- raise ValueError (
124
- f"The provided configuration { e .configuration } is not valid."
125
- )
126
- if not set (parameter_names ) >= set (e .configuration .keys ()):
127
- raise ValueError (
128
- f"Mismatch in parameter names from search space { parameter_names } and "
129
- + f"configuration { e .configuration } "
130
- )
131
-
132
- X = torch .tensor (
133
- np .array ([search_space .to_numerical (e .configuration ) for e in evaluations ]),
134
- dtype = torch_dtype ,
135
- )
136
- X = X .reshape (* batch_shape + X .shape )
137
- Y = torch .tensor (
138
- np .array ([[e .objectives [objective .name ]] for e in evaluations ], dtype = float ),
139
- dtype = torch_dtype ,
140
- )
141
-
142
- if objective .greater_is_better :
143
- Y *= - 1
144
-
145
- if constraint_names is not None :
146
- try :
147
- Y_constraints = torch .tensor (
148
- np .array (
149
- [[e .constraints [c ] for c in constraint_names ] for e in evaluations ],
150
- dtype = float ,
151
- ),
152
- dtype = torch_dtype ,
153
- )
154
- Y = torch .cat ((Y , Y_constraints ), dim = 1 )
155
- except KeyError as e :
156
- raise ConstraintsError (
157
- f"Constraint name { e } is not defined in input evaluations."
158
- )
159
- except TypeError :
160
- raise ConstraintsError (
161
- f"Constraint name(s) { constraint_names } are not defined in input evaluations."
162
- )
163
-
164
- Y = Y .reshape (* batch_shape + Y .shape )
165
-
166
- return X , Y
167
-
168
-
169
47
def _acquisition_function_optimizer_factory (
170
48
search_space : ps .ParameterSpace ,
171
49
af_opt_kwargs : Optional [dict ],
172
50
torch_dtype : torch .dtype ,
173
51
) -> Callable [[AcquisitionFunction ], Tuple [torch .Tensor , torch .Tensor ]]:
174
52
"""Prepare either BoTorch's `optimize_acqf_discrete` or `optimize_acqf` depending
175
53
on whether the search space is fully discrete or not and set required defaults if
176
- not overridden by `af_opt_kwargs`.
54
+ not overridden by `af_opt_kwargs`. If any of the af optimizer specific required
55
+ kwargs are set, this overrides the automatic discrete space detection.
177
56
178
57
Args:
179
58
search_space: Search space used for optimization.
180
59
af_opt_kwargs: Acquisition function optimizer configuration, e.g. containing
181
- values for `n_samples ` for discrete optimization, and `num_restarts`,
182
- `raw_samples` for the continuous optimization case.
60
+ values for `num_random_choices ` for discrete optimization, and
61
+ `num_restarts`, ` raw_samples` for the continuous optimization case.
183
62
torch_dtype: Torch tensor type.
184
63
185
64
Returns:
@@ -188,64 +67,32 @@ def _acquisition_function_optimizer_factory(
188
67
"""
189
68
kwargs = {} if af_opt_kwargs is None else af_opt_kwargs .copy ()
190
69
191
- is_fully_discrete_space = not any (
70
+ space_has_continuous_parameters = any (
192
71
search_space [n ]["parameter" ].is_continuous
193
72
for n in search_space .get_parameter_names ()
194
73
)
195
- if is_fully_discrete_space :
196
- choices = torch .Tensor (
197
- [
198
- search_space .to_numerical (search_space .sample ())
199
- for _ in range (kwargs .pop ("n_samples" , 5_000 ))
200
- ]
201
- ).to (dtype = torch_dtype )
202
- return functools .partial (optimize_acqf_discrete , q = 1 , choices = choices , ** kwargs )
203
-
204
- return functools .partial (
205
- optimize_acqf ,
206
- q = 1 ,
207
- # The numerical representation always lives on the unit hypercube
208
- bounds = torch .tensor ([[0 , 1 ]] * len (search_space ), dtype = torch_dtype ).T ,
209
- num_restarts = kwargs .pop ("num_restarts" , 4 ),
210
- raw_samples = kwargs .pop ("raw_samples" , 1024 ),
211
- ** kwargs ,
212
- )
213
-
214
-
215
- def filter_y_nans (
216
- x : torch .Tensor , y : torch .Tensor
217
- ) -> Tuple [torch .Tensor , torch .Tensor ]:
218
- """Filter rows jointly for `x` and `y`, where `y` is `NaN`.
219
-
220
- Args:
221
- x: Input tensor of shape `n x d` or `1 x n x d`.
222
- y: Input tensor of shape `n x m` or `1 x n x m`.
223
-
224
- Returns:
225
- - x_f: Filtered `x`.
226
- - y_f: Filtered `y`.
227
-
228
- Raises:
229
- ValueError: If input is 3D (batched representation) with first dimension not
230
- `1` (multiple batches).
231
- """
232
- if (len (x .shape ) == 3 and x .shape [0 ] > 1 ) or (len (y .shape ) == 3 and y .shape [0 ] > 1 ):
233
- raise ValueError ("Multiple batches are not supported for now." )
234
-
235
- x_f = x .clone ()
236
- y_f = y .clone ()
237
-
238
- # filter rows jointly where y is NaN
239
- x_f = x_f [~ torch .any (y_f .isnan (), dim = - 1 )]
240
- y_f = y_f [~ torch .any (y_f .isnan (), dim = - 1 )]
241
-
242
- # cast n x d back to 1 x n x d if originally batch case
243
- if len (x .shape ) == 3 :
244
- x_f = x_f .reshape (torch .Size ((1 ,)) + x_f .shape )
245
- if len (y .shape ) == 3 :
246
- y_f = y_f .reshape (torch .Size ((1 ,)) + y_f .shape )
74
+ if "num_random_choices" not in kwargs and (
75
+ "num_restarts" in kwargs
76
+ or "raw_samples" in kwargs
77
+ or space_has_continuous_parameters
78
+ ):
79
+ return functools .partial (
80
+ optimize_acqf ,
81
+ q = 1 ,
82
+ # The numerical representation always lives on the unit hypercube
83
+ bounds = torch .tensor ([[0 , 1 ]] * len (search_space ), dtype = torch_dtype ).T ,
84
+ num_restarts = kwargs .pop ("num_restarts" , 4 ),
85
+ raw_samples = kwargs .pop ("raw_samples" , 1024 ),
86
+ ** kwargs ,
87
+ )
247
88
248
- return x_f , y_f
89
+ choices = torch .Tensor (
90
+ [
91
+ search_space .to_numerical (search_space .sample ())
92
+ for _ in range (kwargs .pop ("num_random_choices" , 5_000 ))
93
+ ]
94
+ ).to (dtype = torch_dtype )
95
+ return functools .partial (optimize_acqf_discrete , q = 1 , choices = choices , ** kwargs )
249
96
250
97
251
98
class SingleObjectiveBOTorchOptimizer (SingleObjectiveOptimizer ):
@@ -280,7 +127,9 @@ def __init__(
280
127
`functools.partial(UpperConfidenceBound, beta=6.0, maximize=False)`.
281
128
af_optimizer_kwargs: Settings for acquisition function optimizer,
282
129
see `botorch.optim.optimize_acqf` and in case the whole search space
283
- is discrete: `botorch.optim.optimize_acqf_discrete`.
130
+ is discrete: `botorch.optim.optimize_acqf_discrete`. The former can be
131
+ enforced by providing `raw_samples` or `num_restarts`, the latter by
132
+ providing `num_random_choices`.
284
133
num_initial_random_samples: Size of the initial space-filling design that
285
134
is used before starting BO. The points are sampled randomly in the
286
135
search space. If no random sampling is required, set it to 0.
0 commit comments