@@ -26,10 +26,11 @@ def run_optimization_loop(
26
26
optimizer : Union [SingleObjectiveOptimizer , MultiObjectiveOptimizer ],
27
27
evaluation_function : Callable [[EvaluationSpecification ], Evaluation ],
28
28
timeout_s : float = float ("inf" ),
29
- max_evaluations : int = None ,
29
+ max_evaluations : Optional [ int ] = None ,
30
30
catch_exceptions_from_evaluation_function : bool = False ,
31
+ pre_evaluation_callback : Optional [Callable [[EvaluationSpecification ], Any ]] = None ,
31
32
post_evaluation_callback : Optional [Callable [[Evaluation ], Any ]] = None ,
32
- logger : logging .Logger = None ,
33
+ logger : Optional [ logging .Logger ] = None ,
33
34
) -> List [Evaluation ]:
34
35
"""Convenience wrapper for an optimization loop that sequentially fetches evaluation
35
36
specifications until a given timeout or maximum number of evaluations is reached.
@@ -53,9 +54,12 @@ def run_optimization_loop(
53
54
exception raised by the evaluation function or instead store their stack
54
55
trace in the evaluation's `stacktrace` attribute. Set to True if there are
55
56
spurious errors due to e.g. numerical instability that should not halt the
56
- optimization loop.
57
+ optimization loop. For more details, see the wrapper that is used internally
58
+ `blackboxopt.optimization_loops.utils.evaluation_function_wrapper`
59
+ pre_evaluation_callback: Reference to a callable that is invoked before each
60
+ evaluation and takes a `blackboxopt.EvaluationSpecification` as an argument.
57
61
post_evaluation_callback: Reference to a callable that is invoked after each
58
- evaluation and takes a `blackboxopt.Evaluation` as its argument.
62
+ evaluation and takes a `blackboxopt.Evaluation` as an argument.
59
63
logger: The logger to use for logging progress. Default: `blackboxopt.logger`
60
64
61
65
Returns:
@@ -82,10 +86,13 @@ def run_optimization_loop(
82
86
83
87
try :
84
88
evaluation_specification = optimizer .generate_evaluation_specification ()
89
+
85
90
logger .info (
86
- "The optimizer proposed a specification for evaluation:\n "
87
- + f" { json .dumps (evaluation_specification .to_dict (), indent = 2 )} "
91
+ "The optimizer proposed the following evaluation specification :\n %s" ,
92
+ json .dumps (evaluation_specification .to_dict (), indent = 2 ),
88
93
)
94
+ if pre_evaluation_callback is not None :
95
+ pre_evaluation_callback (evaluation_specification )
89
96
90
97
evaluation = evaluation_function_wrapper (
91
98
evaluation_function = evaluation_function ,
@@ -94,16 +101,25 @@ def run_optimization_loop(
94
101
objectives = objectives ,
95
102
catch_exceptions_from_evaluation_function = catch_exceptions_from_evaluation_function ,
96
103
)
104
+
97
105
logger .info (
98
- "Reporting the result from the evaluation function to the optimizer:\n "
99
- + f"{ json .dumps (evaluation .to_dict (), indent = 2 )} "
106
+ "Reporting the following evaluation result to the optimizer:\n %s" ,
107
+ json .dumps (
108
+ {
109
+ # Stringify the user_info because it is not guaranteed to be
110
+ # json serializable
111
+ k : str (v ) if k == "user_info" else v
112
+ for k , v in evaluation .to_dict ().items ()
113
+ },
114
+ indent = 2 ,
115
+ ),
100
116
)
101
- optimizer .report (evaluation )
102
- evaluations .append (evaluation )
103
-
104
117
if post_evaluation_callback is not None :
105
118
post_evaluation_callback (evaluation )
106
119
120
+ optimizer .report (evaluation )
121
+ evaluations .append (evaluation )
122
+
107
123
except OptimizerNotReady :
108
124
logger .info ("Optimizer is not ready yet, retrying in two seconds" )
109
125
time .sleep (2 )
0 commit comments