@@ -31,22 +31,22 @@ def default_tensor_dtype(dtype):
3131 torch .set_default_dtype (old_dtype )
3232
3333class Space :
34- def __init__ (self , min , max , scale , mean , is_integer = False ):
34+ def __init__ (self , min , max , scale , is_integer = False ):
3535 self .min = min
3636 self .max = max
3737 self .scale = scale
38- self .mean = mean # TODO: awkward to have just this normalized
3938 self .norm_min = self .normalize (min )
4039 self .norm_max = self .normalize (max )
41- self .norm_mean = self .normalize (mean )
40+ # Since min/max are normalized from -1 to 1, just use 0 as a mean
41+ self .norm_mean = 0
4242 self .is_integer = is_integer
4343
4444class Linear (Space ):
45- def __init__ (self , min , max , scale , mean , is_integer = False ):
45+ def __init__ (self , min , max , scale , is_integer = False ):
4646 if scale == 'auto' :
4747 scale = 0.5
4848
49- super ().__init__ (min , max , scale , mean , is_integer )
49+ super ().__init__ (min , max , scale , is_integer )
5050
5151 def normalize (self , value ):
5252 #assert isinstance(value, (int, float))
@@ -61,12 +61,12 @@ def unnormalize(self, value):
6161 return value
6262
6363class Pow2 (Space ):
64- def __init__ (self , min , max , scale , mean , is_integer = False ):
64+ def __init__ (self , min , max , scale , is_integer = False ):
6565 if scale == 'auto' :
6666 scale = 0.5
6767 #scale = 2 / (np.log2(max) - np.log2(min))
6868
69- super ().__init__ (min , max , scale , mean , is_integer )
69+ super ().__init__ (min , max , scale , is_integer )
7070
7171 def normalize (self , value ):
7272 #assert isinstance(value, (int, float))
@@ -83,14 +83,14 @@ def unnormalize(self, value):
8383class Log (Space ):
8484 base : int = 10
8585
86- def __init__ (self , min , max , scale , mean , is_integer = False ):
86+ def __init__ (self , min , max , scale , is_integer = False ):
8787 if scale == 'time' :
8888 # TODO: Set scaling param intuitively based on number of jumps from min to max
8989 scale = 1 / (np .log2 (max ) - np .log2 (min ))
9090 elif scale == 'auto' :
9191 scale = 0.5
9292
93- super ().__init__ (min , max , scale , mean , is_integer )
93+ super ().__init__ (min , max , scale , is_integer )
9494
9595 def normalize (self , value ):
9696 #assert isinstance(value, (int, float))
@@ -109,11 +109,11 @@ def unnormalize(self, value):
109109class Logit (Space ):
110110 base : int = 10
111111
112- def __init__ (self , min , max , scale , mean , is_integer = False ):
112+ def __init__ (self , min , max , scale , is_integer = False ):
113113 if scale == 'auto' :
114114 scale = 0.5
115115
116- super ().__init__ (min , max , scale , mean , is_integer )
116+ super ().__init__ (min , max , scale , is_integer )
117117
118118 def normalize (self , value ):
119119 #assert isinstance(value, (int, float))
@@ -147,12 +147,10 @@ def _params_from_puffer_sweep(sweep_config, only_include=None):
147147
148148 assert 'distribution' in param
149149 distribution = param ['distribution' ]
150- search_center = param ['mean' ]
151150 kwargs = dict (
152151 min = param ['min' ],
153152 max = param ['max' ],
154153 scale = param ['scale' ],
155- mean = search_center ,
156154 )
157155 if distribution == 'uniform' :
158156 space = Linear (** kwargs )
@@ -432,7 +430,6 @@ def __init__(self,
432430 num_random_samples = 10 ,
433431 global_search_scale = 1 ,
434432 suggestions_per_pareto = 256 ,
435- seed_with_search_center = True ,
436433 expansion_rate = 0.25 ,
437434 gp_training_iter = 50 ,
438435 gp_learning_rate = 0.001 ,
@@ -452,7 +449,6 @@ def __init__(self,
452449 self .hyperparameters = Hyperparameters (sweep_config )
453450 self .global_search_scale = global_search_scale
454451 self .suggestions_per_pareto = suggestions_per_pareto
455- self .seed_with_search_center = seed_with_search_center
456452 self .resample_frequency = resample_frequency
457453 self .max_suggestion_cost = _max_suggestion_cost
458454 self .expansion_rate = expansion_rate
@@ -641,8 +637,8 @@ def suggest(self, fill):
641637
642638 ### Sample suggestions
643639 search_centers = np .stack ([e ['input' ] for e in candidates ])
644- num_sample = len ( candidates ) * self .suggestions_per_pareto
645- suggestions = self .hyperparameters . sample ( num_sample , mu = search_centers )
640+ suggestions = self .hyperparameters . sample (
641+ len ( candidates ) * self .suggestions_per_pareto , mu = search_centers )
646642
647643 dedup_indices = self ._filter_near_duplicates (suggestions )
648644 suggestions = suggestions [dedup_indices ]
0 commit comments