21
21
try :
22
22
import torch
23
23
from torch import Tensor
24
+
24
25
torch_available = True
25
26
except ImportError :
26
27
torch_available = False
@@ -42,7 +43,7 @@ def __init__(
42
43
block_size_names = default_block_size_names ,
43
44
build_neighbors_index = False ,
44
45
neighbor_method = None ,
45
- from_cache : dict = None ,
46
+ from_cache : dict = None ,
46
47
framework = "PythonConstraint" ,
47
48
solver_method = "PC_OptimizedBacktrackingSolver" ,
48
49
path_to_ATF_cache : Path = None ,
@@ -58,10 +59,14 @@ def __init__(
58
59
"""
59
60
# check the arguments
60
61
if from_cache is not None :
61
- assert tune_params is None and restrictions is None and max_threads is None , "When `from_cache` is used, the positional arguments must be set to None."
62
+ assert (
63
+ tune_params is None and restrictions is None and max_threads is None
64
+ ), "When `from_cache` is used, the positional arguments must be set to None."
62
65
tune_params = from_cache ["tune_params" ]
63
66
if from_cache is None :
64
- assert tune_params is not None and restrictions is not None and max_threads is not None , "Must specify positional arugments ."
67
+ assert (
68
+ tune_params is not None and restrictions is not None and max_threads is not None
69
+ ), "Must specify positional arugments ."
65
70
66
71
# set the object attributes using the arguments
67
72
framework_l = framework .lower ()
@@ -77,9 +82,9 @@ def __init__(
77
82
self ._tensorspace_param_config_structure = []
78
83
self ._map_tensor_to_param = {}
79
84
self ._map_param_to_tensor = {}
80
- self .restrictions = restrictions .copy () if hasattr (restrictions , ' copy' ) else restrictions
85
+ self .restrictions = restrictions .copy () if hasattr (restrictions , " copy" ) else restrictions
81
86
# the searchspace can add commonly used constraints (e.g. maxprod(blocks) <= maxthreads)
82
- self ._modified_restrictions = restrictions .copy () if hasattr (restrictions , ' copy' ) else restrictions
87
+ self ._modified_restrictions = restrictions .copy () if hasattr (restrictions , " copy" ) else restrictions
83
88
self .param_names = list (self .tune_params .keys ())
84
89
self .params_values = tuple (tuple (param_vals ) for param_vals in self .tune_params .values ())
85
90
self .params_values_indices = None
@@ -93,8 +98,12 @@ def __init__(
93
98
restrictions = [restrictions ] if not isinstance (restrictions , list ) else restrictions
94
99
if (
95
100
len (restrictions ) > 0
96
- and (any (isinstance (restriction , str ) for restriction in restrictions )
97
- or any (isinstance (restriction [0 ], str ) for restriction in restrictions if isinstance (restriction , tuple )))
101
+ and (
102
+ any (isinstance (restriction , str ) for restriction in restrictions )
103
+ or any (
104
+ isinstance (restriction [0 ], str ) for restriction in restrictions if isinstance (restriction , tuple )
105
+ )
106
+ )
98
107
and not (framework_l == "pysmt" or framework_l == "bruteforce" )
99
108
):
100
109
self .restrictions = compile_restrictions (
@@ -609,14 +618,14 @@ def get_param_configs_at_indices(self, indices: List[int]) -> List[tuple]:
609
618
# map(get) is ~40% faster than numpy[indices] (average based on six searchspaces with 10000, 100000 and 1000000 configs and 10 or 100 random indices)
610
619
return list (map (self .list .__getitem__ , indices ))
611
620
612
- def get_param_config_index (self , param_config : Union [tuple , Tensor ]):
621
+ def get_param_config_index (self , param_config : Union [tuple , any ]):
613
622
"""Lookup the index for a parameter configuration, returns None if not found."""
614
623
if torch_available and isinstance (param_config , Tensor ):
615
624
param_config = self .tensor_to_param_config (param_config )
616
625
# constant time O(1) access - much faster than any other method, but needs a shadow dict of the search space
617
626
return self .__dict .get (param_config , None )
618
-
619
- def initialize_tensorspace (self , dtype = None , device = None ):
627
+
628
+ def initialize_tensorspace (self , dtype = None , device = None ):
620
629
"""Encode the searchspace in a Tensor. Save the mapping. Call this function directly to control the precision or device used."""
621
630
assert self ._tensorspace is None , "Tensorspace is already initialized"
622
631
skipped_count = 0
@@ -642,16 +651,16 @@ def initialize_tensorspace(self, dtype = None, device = None):
642
651
if all (isinstance (v , numbers .Real ) for v in param_values ):
643
652
tensor_values = torch .tensor (param_values , dtype = self .tensor_dtype )
644
653
else :
645
- self ._tensorspace_categorical_dimensions .append (index - skipped_count )
654
+ self ._tensorspace_categorical_dimensions .append (index - skipped_count )
646
655
# tensor_values = np.arange(len(param_values))
647
656
tensor_values = torch .arange (len (param_values ), dtype = self .tensor_dtype )
648
657
649
658
# write the mappings to the object
650
- self ._map_param_to_tensor [index ] = ( dict (zip (param_values , tensor_values .tolist () )))
651
- self ._map_tensor_to_param [index ] = ( dict (zip (tensor_values .tolist (), param_values ) ))
659
+ self ._map_param_to_tensor [index ] = dict (zip (param_values , tensor_values .tolist ()))
660
+ self ._map_tensor_to_param [index ] = dict (zip (tensor_values .tolist (), param_values ))
652
661
bounds .append ((tensor_values .min (), tensor_values .max ()))
653
662
if tensor_values .min () < tensor_values .max ():
654
- self ._tensorspace_bounds_indices .append (index - skipped_count )
663
+ self ._tensorspace_bounds_indices .append (index - skipped_count )
655
664
656
665
# do some checks
657
666
assert len (self .params_values ) == len (self ._tensorspace_param_config_structure )
@@ -666,26 +675,26 @@ def initialize_tensorspace(self, dtype = None, device = None):
666
675
667
676
# set the bounds in the correct format (one array for the min, one for the max)
668
677
bounds = torch .tensor (bounds , ** self .tensor_kwargs )
669
- self ._tensorspace_bounds = torch .cat ([bounds [:,0 ], bounds [:,1 ]]).reshape ((2 , bounds .shape [0 ]))
670
-
678
+ self ._tensorspace_bounds = torch .cat ([bounds [:, 0 ], bounds [:, 1 ]]).reshape ((2 , bounds .shape [0 ]))
679
+
671
680
def get_tensorspace (self ):
672
681
"""Get the searchspace encoded in a Tensor. To use a non-default dtype or device, call `initialize_tensorspace` first."""
673
682
if self ._tensorspace is None :
674
683
self .initialize_tensorspace ()
675
684
return self ._tensorspace
676
-
685
+
677
686
def get_tensorspace_categorical_dimensions (self ):
678
687
"""Get the a list of the categorical dimensions in the tensorspace."""
679
688
return self ._tensorspace_categorical_dimensions
680
-
689
+
681
690
def param_config_to_tensor (self , param_config : tuple ):
682
691
"""Convert from a parameter configuration to a Tensor."""
683
692
if len (self ._map_param_to_tensor ) == 0 :
684
693
self .initialize_tensorspace ()
685
694
array = []
686
695
for i , param in enumerate (param_config ):
687
696
if self ._tensorspace_param_config_structure [i ] is not None :
688
- continue # skip over parameters not in the tensorspace
697
+ continue # skip over parameters not in the tensorspace
689
698
mapping = self ._map_param_to_tensor [i ]
690
699
conversions = [None , str , float , int , bool ]
691
700
for c in conversions :
@@ -697,7 +706,7 @@ def param_config_to_tensor(self, param_config: tuple):
697
706
if c == conversions [- 1 ]:
698
707
raise KeyError (f"No variant of { param } could be found in { mapping } " ) from e
699
708
return torch .tensor (array , ** self .tensor_kwargs )
700
-
709
+
701
710
def tensor_to_param_config (self , tensor : Tensor ):
702
711
"""Convert from a Tensor to a parameter configuration."""
703
712
assert tensor .dim () == 1 , f"Parameter configuration tensor must be 1-dimensional, is { tensor .dim ()} ({ tensor } )"
@@ -709,10 +718,10 @@ def tensor_to_param_config(self, tensor: Tensor):
709
718
if param is not None :
710
719
skip_counter += 1
711
720
else :
712
- value = tensor [i - skip_counter ].item ()
721
+ value = tensor [i - skip_counter ].item ()
713
722
config [i ] = self ._map_tensor_to_param [i ][value ]
714
723
return tuple (config )
715
-
724
+
716
725
def get_tensorspace_bounds (self ):
717
726
"""Get the bounds to the tensorspace parameters, returned as a 2 x d dimensional tensor, and the indices of the parameters."""
718
727
if self ._tensorspace is None :
@@ -929,7 +938,7 @@ def order_param_configs(
929
938
f"The number of ordered parameter configurations ({ len (ordered_param_configs )} ) differs from the original number of parameter configurations ({ len (param_configs )} )"
930
939
)
931
940
return ordered_param_configs
932
-
941
+
933
942
def to_ax_searchspace (self ):
934
943
"""Convert this searchspace to an Ax SearchSpace."""
935
944
from ax import ChoiceParameter , FixedParameter , ParameterType , SearchSpace
@@ -943,12 +952,14 @@ def to_ax_searchspace(self):
943
952
continue
944
953
945
954
# convert the types
946
- assert all (isinstance (param_values [0 ], type (v )) for v in param_values ), f"Parameter values of mixed types are not supported: { param_values } "
955
+ assert all (
956
+ isinstance (param_values [0 ], type (v )) for v in param_values
957
+ ), f"Parameter values of mixed types are not supported: { param_values } "
947
958
param_type_mapping = {
948
959
str : ParameterType .STRING ,
949
960
int : ParameterType .INT ,
950
961
float : ParameterType .FLOAT ,
951
- bool : ParameterType .BOOL
962
+ bool : ParameterType .BOOL ,
952
963
}
953
964
param_type = param_type_mapping [type (param_values [0 ])]
954
965
@@ -959,6 +970,8 @@ def to_ax_searchspace(self):
959
970
ax_searchspace .add_parameter (ChoiceParameter (param_name , param_type , param_values ))
960
971
961
972
# add the constraints
962
- raise NotImplementedError ("Conversion to Ax SearchSpace has not been fully implemented as Ax Searchspaces can't capture full complexity." )
973
+ raise NotImplementedError (
974
+ "Conversion to Ax SearchSpace has not been fully implemented as Ax Searchspaces can't capture full complexity."
975
+ )
963
976
964
977
return ax_searchspace
0 commit comments