@@ -124,7 +124,7 @@ class UpdateMethod(enum.Enum):
124124
125125def filter_top_directions (
126126 perturbations : FloatArray2D , function_values : FloatArray ,
127- est_type : EstimatorType ,
127+ estimator_type : EstimatorType ,
128128 num_top_directions : int ) -> Tuple [FloatArray , FloatArray ]:
129129 """Select the subset of top-performing perturbations.
130130
@@ -134,7 +134,7 @@ def filter_top_directions(
134134 p, -p in the even/odd entries, so the directions p_1,...,p_n
135135 will be ordered (p_1, -p_1, p_2, -p_2,...)
136136 function_values: np array of reward values (maximization)
137- est_type : (forward_fd | antithetic)
137+ estimator_type : (forward_fd | antithetic)
138138 num_top_directions: the number of top directions to include
139139 For antithetic, the total number of perturbations will
140140 be 2* this number, because we count p, -p as a single
@@ -148,16 +148,16 @@ def filter_top_directions(
148148 """
149149 if not num_top_directions > 0 :
150150 return (perturbations , function_values )
151- if est_type == EstimatorType .FORWARD_FD :
151+ if estimator_type == EstimatorType .FORWARD_FD :
152152 top_index = np .argsort (- function_values )
153- elif est_type == EstimatorType .ANTITHETIC :
153+ elif estimator_type == EstimatorType .ANTITHETIC :
154154 top_index = np .argsort (- np .abs (function_values [0 ::2 ] -
155155 function_values [1 ::2 ]))
156156 top_index = top_index [:num_top_directions ]
157- if est_type == EstimatorType .FORWARD_FD :
157+ if estimator_type == EstimatorType .FORWARD_FD :
158158 perturbations = perturbations [top_index ]
159159 function_values = function_values [top_index ]
160- elif est_type == EstimatorType .ANTITHETIC :
160+ elif estimator_type == EstimatorType .ANTITHETIC :
161161 perturbations = np .concatenate (
162162 (perturbations [2 * top_index ], perturbations [2 * top_index + 1 ]),
163163 axis = 0 )
@@ -245,11 +245,11 @@ class StatefulOptimizer(BlackboxOptimizer):
245245 Class contains common methods for handling the state.
246246 """
247247
248- def __init__ (self , est_type : EstimatorType , normalize_fvalues : bool ,
248+ def __init__ (self , estimator_type : EstimatorType , normalize_fvalues : bool ,
249249 hyperparameters_update_method : UpdateMethod ,
250250 extra_params : Optional [Sequence [int ]]):
251251
252- self .est_type = est_type
252+ self .estimator_type = estimator_type
253253 self .normalize_fvalues = normalize_fvalues
254254 self .hyperparameters_update_method = hyperparameters_update_method
255255 if hyperparameters_update_method == UpdateMethod .STATE_NORMALIZATION :
@@ -321,7 +321,7 @@ class MonteCarloBlackboxOptimizer(StatefulOptimizer):
321321
322322 def __init__ (self ,
323323 precision_parameter : float ,
324- est_type : EstimatorType ,
324+ estimator_type : EstimatorType ,
325325 normalize_fvalues : bool ,
326326 hyperparameters_update_method : UpdateMethod ,
327327 extra_params : Optional [Sequence [int ]],
@@ -342,8 +342,8 @@ def __init__(self,
342342 self .precision_parameter = precision_parameter
343343 self .num_top_directions = num_top_directions
344344 self .gradient_ascent_optimizer = gradient_ascent_optimizer
345- super ().__init__ (est_type , normalize_fvalues , hyperparameters_update_method ,
346- extra_params )
345+ super ().__init__ (estimator_type , normalize_fvalues ,
346+ hyperparameters_update_method , extra_params )
347347
348348 # TODO: Issue #285
349349 def run_step (self , perturbations : FloatArray2D , function_values : FloatArray ,
@@ -358,14 +358,14 @@ def run_step(self, perturbations: FloatArray2D, function_values: FloatArray,
358358 function_values = np .array (normalized_values [:- 1 ])
359359 current_value = normalized_values [- 1 ]
360360 top_ps , top_fs = filter_top_directions (perturbations , function_values ,
361- self .est_type ,
361+ self .estimator_type ,
362362 self .num_top_directions )
363363 gradient = np .zeros (dim )
364364 for i , perturbation in enumerate (top_ps ):
365365 function_value = top_fs [i ]
366- if self .est_type == EstimatorType .FORWARD_FD :
366+ if self .estimator_type == EstimatorType .FORWARD_FD :
367367 gradient_sample = (function_value - current_value ) * perturbation
368- elif self .est_type == EstimatorType .ANTITHETIC :
368+ elif self .estimator_type == EstimatorType .ANTITHETIC :
369369 gradient_sample = function_value * perturbation
370370 gradient_sample /= self .precision_parameter ** 2
371371 gradient += gradient_sample
@@ -374,7 +374,7 @@ def run_step(self, perturbations: FloatArray2D, function_values: FloatArray,
374374 # in that code, the denominator for antithetic was num_top_directions.
375375 # we maintain compatibility for now so that the same hyperparameters
376376 # currently used in Toaster will have the same effect
377- if self .est_type == EstimatorType .ANTITHETIC and \
377+ if self .estimator_type == EstimatorType .ANTITHETIC and \
378378 len (top_ps ) < len (perturbations ):
379379 gradient *= 2
380380 # Use the gradient ascent optimizer to compute the next parameters with the
@@ -396,7 +396,7 @@ class SklearnRegressionBlackboxOptimizer(StatefulOptimizer):
396396 def __init__ (self ,
397397 regression_method : RegressionType ,
398398 regularizer : float ,
399- est_type : EstimatorType ,
399+ estimator_type : EstimatorType ,
400400 normalize_fvalues : bool ,
401401 hyperparameters_update_method : UpdateMethod ,
402402 extra_params : Optional [Sequence [int ]],
@@ -422,8 +422,8 @@ def __init__(self,
422422 else :
423423 raise ValueError ('Optimization procedure option not available' )
424424 self .gradient_ascent_optimizer = gradient_ascent_optimizer
425- super ().__init__ (est_type , normalize_fvalues , hyperparameters_update_method ,
426- extra_params )
425+ super ().__init__ (estimator_type , normalize_fvalues ,
426+ hyperparameters_update_method , extra_params )
427427
428428 def run_step (self , perturbations : FloatArray2D , function_values : FloatArray ,
429429 current_input : FloatArray , current_value : float ) -> FloatArray :
@@ -439,11 +439,11 @@ def run_step(self, perturbations: FloatArray2D, function_values: FloatArray,
439439
440440 matrix = None
441441 b_vector = None
442- if self .est_type == EstimatorType .FORWARD_FD :
442+ if self .estimator_type == EstimatorType .FORWARD_FD :
443443 matrix = np .array (perturbations )
444444 b_vector = (
445445 function_values - np .array ([current_value ] * len (function_values )))
446- elif self .est_type == EstimatorType .ANTITHETIC :
446+ elif self .estimator_type == EstimatorType .ANTITHETIC :
447447 matrix = np .array (perturbations [::2 ])
448448 function_even_values = np .array (function_values .tolist ()[::2 ])
449449 function_odd_values = np .array (function_values .tolist ()[1 ::2 ])
@@ -495,20 +495,20 @@ def normalize_function_values(
495495
496496
497497def monte_carlo_gradient (precision_parameter : float ,
498- est_type : EstimatorType ,
498+ estimator_type : EstimatorType ,
499499 perturbations : FloatArray2D ,
500500 function_values : FloatArray ,
501501 current_value : float ,
502502 energy : Optional [float ] = 0 ) -> FloatArray :
503503 """Calculates Monte Carlo gradient.
504504
505505 There are several ways of estimating the gradient. This is specified by the
506- attribute self.est_type . Currently, forward finite difference (FFD) and
506+ attribute self.estimator_type . Currently, forward finite difference (FFD) and
507507 antithetic are supported.
508508
509509 Args:
510510 precision_parameter: sd of Gaussian perturbations
511- est_type : 'forward_fd' (FFD) or 'antithetic'
511+ estimator_type : 'forward_fd' (FFD) or 'antithetic'
512512 perturbations: the simulated perturbations
513513 function_values: reward from perturbations (possibly normalized)
514514 current_value: estimated reward at current point (possibly normalized)
@@ -522,11 +522,11 @@ def monte_carlo_gradient(precision_parameter: float,
522522 """
523523 dim = len (perturbations [0 ])
524524 b_vector = None
525- if est_type == EstimatorType .FORWARD_FD :
525+ if estimator_type == EstimatorType .FORWARD_FD :
526526 b_vector = (function_values -
527527 np .array ([current_value ] * len (function_values ))) / (
528528 precision_parameter * precision_parameter )
529- elif est_type == EstimatorType .ANTITHETIC :
529+ elif estimator_type == EstimatorType .ANTITHETIC :
530530 b_vector = function_values / (2.0 * precision_parameter *
531531 precision_parameter )
532532 else :
@@ -543,15 +543,15 @@ def monte_carlo_gradient(precision_parameter: float,
543543 return gradient
544544
545545
546- def sklearn_regression_gradient (clf : LinearModel , est_type : EstimatorType ,
546+ def sklearn_regression_gradient (clf : LinearModel , estimator_type : EstimatorType ,
547547 perturbations : FloatArray2D ,
548548 function_values : FloatArray ,
549549 current_value : float ) -> FloatArray :
550550 """Calculates gradient by function difference regression.
551551
552552 Args:
553553 clf: an object (SkLearn linear model) which fits Ax = b
554- est_type : 'forward_fd' (FFD) or 'antithetic'
554+ estimator_type : 'forward_fd' (FFD) or 'antithetic'
555555 perturbations: the simulated perturbations
556556 function_values: reward from perturbations (possibly normalized)
557557 current_value: estimated reward at current point (possibly normalized)
@@ -565,11 +565,11 @@ def sklearn_regression_gradient(clf: LinearModel, est_type: EstimatorType,
565565 matrix = None
566566 b_vector = None
567567 dim = perturbations [0 ].size
568- if est_type == EstimatorType .FORWARD_FD :
568+ if estimator_type == EstimatorType .FORWARD_FD :
569569 matrix = np .array (perturbations )
570570 b_vector = (
571571 function_values - np .array ([current_value ] * len (function_values )))
572- elif est_type == EstimatorType .ANTITHETIC :
572+ elif estimator_type == EstimatorType .ANTITHETIC :
573573 matrix = np .array (perturbations [::2 ])
574574 function_even_values = np .array (function_values .tolist ()[::2 ])
575575 function_odd_values = np .array (function_values .tolist ()[1 ::2 ])
@@ -903,14 +903,14 @@ class TrustRegionOptimizer(StatefulOptimizer):
903903 schedule that would have to be tuned.
904904 """
905905
906- def __init__ (self , precision_parameter : float , est_type : EstimatorType ,
906+ def __init__ (self , precision_parameter : float , estimator_type : EstimatorType ,
907907 normalize_fvalues : bool ,
908908 hyperparameters_update_method : UpdateMethod ,
909909 extra_params : Optional [Sequence [int ]], tr_params : Mapping [str ,
910910 Any ]):
911911 self .precision_parameter = precision_parameter
912- super ().__init__ (est_type , normalize_fvalues , hyperparameters_update_method ,
913- extra_params )
912+ super ().__init__ (estimator_type , normalize_fvalues ,
913+ hyperparameters_update_method , extra_params )
914914
915915 self .accepted_quadratic_model = None
916916 self .accepted_function_value = None
@@ -1147,12 +1147,12 @@ def update_quadratic_model(self, perturbations: FloatArray2D,
11471147 current_value = normalized_values [1 ]
11481148 self .normalized_current_value = current_value
11491149 if self .params ['grad_type' ] == GradientType .REGRESSION :
1150- new_gradient = sklearn_regression_gradient (self .clf , self .est_type ,
1150+ new_gradient = sklearn_regression_gradient (self .clf , self .estimator_type ,
11511151 perturbations , function_values ,
11521152 current_value )
11531153 else :
11541154 new_gradient = monte_carlo_gradient (self .precision_parameter ,
1155- self .est_type , perturbations ,
1155+ self .estimator_type , perturbations ,
11561156 function_values , current_value )
11571157 new_gradient *= - 1 # TR subproblem solver performs minimization
11581158 if not is_update :
0 commit comments