@@ -44,7 +44,7 @@ class Fastfood(BaseEstimator, TransformerMixin):
44
44
Notes
45
45
-----
46
46
See "Fastfood | Approximating Kernel Expansions in Loglinear Time" by
47
- Quoc Le, Tamas Sarl and Alex Smola.
47
+ Quoc Le, Tamas Sarl and Alex Smola.
48
48
49
49
Examples
50
50
----
@@ -62,11 +62,9 @@ def __init__(self,
62
62
self .sigma = sigma
63
63
self .n_components = n_components
64
64
self .random_state = random_state
65
- self .rng = check_random_state (self .random_state )
66
65
# map to 2*n_components features or to n_components features with less
67
66
# accuracy
68
- self .tradeoff_mem_accuracy = \
69
- tradeoff_mem_accuracy
67
+ self .tradeoff_mem_accuracy = tradeoff_mem_accuracy
70
68
71
69
@staticmethod
72
70
def is_number_power_of_two (n ):
@@ -87,13 +85,14 @@ def enforce_dimensionality_constraints(d, n):
87
85
88
86
def pad_with_zeros (self , X ):
89
87
try :
90
- X_padded = np .pad (X ,
91
- ((0 , 0 ),
92
- (0 , self .number_of_features_to_pad_with_zeros )),
93
- 'constant' )
88
+ X_padded = np .pad (
89
+ X ,
90
+ ((0 , 0 ),
91
+ (0 , self ._number_of_features_to_pad_with_zeros )),
92
+ 'constant' )
94
93
except AttributeError :
95
94
zeros = np .zeros ((X .shape [0 ],
96
- self .number_of_features_to_pad_with_zeros ))
95
+ self ._number_of_features_to_pad_with_zeros ))
97
96
X_padded = np .concatenate ((X , zeros ), axis = 1 )
98
97
99
98
return X_padded
@@ -108,38 +107,38 @@ def l2norm_along_axis1(X):
108
107
109
108
def uniform_vector (self ):
110
109
if self .tradeoff_mem_accuracy != 'accuracy' :
111
- return self .rng .uniform (0 , 2 * np .pi , size = self .n )
110
+ return self ._rng .uniform (0 , 2 * np .pi , size = self ._n )
112
111
else :
113
112
return None
114
113
115
114
def apply_approximate_gaussian_matrix (self , B , G , P , X ):
116
115
""" Create mapping of all x_i by applying B, G and P step-wise """
117
116
num_examples = X .shape [0 ]
118
117
119
- result = np .multiply (B , X .reshape ((1 , num_examples , 1 , self .d )))
120
- result = result .reshape ((num_examples * self .times_to_stack_v , self .d ))
118
+ result = np .multiply (B , X .reshape ((1 , num_examples , 1 , self ._d )))
119
+ result = result .reshape ((num_examples * self ._times_to_stack_v , self ._d ))
121
120
Fastfood .approx_fourier_transformation_multi_dim (result )
122
121
result = result .reshape ((num_examples , - 1 ))
123
122
np .take (result , P , axis = 1 , mode = 'wrap' , out = result )
124
- np .multiply (np .ravel (G ), result .reshape (num_examples , self .n ),
123
+ np .multiply (np .ravel (G ), result .reshape (num_examples , self ._n ),
125
124
out = result )
126
- result = result .reshape (num_examples * self .times_to_stack_v , self .d )
125
+ result = result .reshape (num_examples * self ._times_to_stack_v , self ._d )
127
126
Fastfood .approx_fourier_transformation_multi_dim (result )
128
127
return result
129
128
130
129
def scale_transformed_data (self , S , VX ):
131
130
""" Scale mapped data VX to match kernel(e.g. RBF-Kernel) """
132
- VX = VX .reshape (- 1 , self .times_to_stack_v * self .d )
131
+ VX = VX .reshape (- 1 , self ._times_to_stack_v * self ._d )
133
132
134
- return (1 / (self .sigma * np .sqrt (self .d )) *
133
+ return (1 / (self .sigma * np .sqrt (self ._d )) *
135
134
np .multiply (np .ravel (S ), VX ))
136
135
137
136
def phi (self , X ):
138
137
if self .tradeoff_mem_accuracy == 'accuracy' :
139
138
return (1 / np .sqrt (X .shape [1 ])) * \
140
139
np .hstack ([np .cos (X ), np .sin (X )])
141
140
else :
142
- np .cos (X + self .U , X )
141
+ np .cos (X + self ._U , X )
143
142
return X * np .sqrt (2. / X .shape [1 ])
144
143
145
144
def fit (self , X , y = None ):
@@ -159,28 +158,29 @@ def fit(self, X, y=None):
159
158
self : object
160
159
Returns the transformer.
161
160
"""
162
- X = check_array (X )
161
+ X = check_array (X , dtype = np . float64 )
163
162
164
163
d_orig = X .shape [1 ]
164
+ self ._rng = check_random_state (self .random_state )
165
165
166
- self .d , self .n , self .times_to_stack_v = \
166
+ self ._d , self ._n , self ._times_to_stack_v = \
167
167
Fastfood .enforce_dimensionality_constraints (d_orig ,
168
168
self .n_components )
169
- self .number_of_features_to_pad_with_zeros = self .d - d_orig
169
+ self ._number_of_features_to_pad_with_zeros = self ._d - d_orig
170
170
171
- self .G = self .rng .normal (size = (self .times_to_stack_v , self .d ))
172
- self .B = self .rng .choice (
171
+ self ._G = self ._rng .normal (size = (self ._times_to_stack_v , self ._d ))
172
+ self ._B = self ._rng .choice (
173
173
[- 1 , 1 ],
174
- size = (self .times_to_stack_v , self .d ),
174
+ size = (self ._times_to_stack_v , self ._d ),
175
175
replace = True )
176
- self .P = np .hstack ([(i * self .d )+ self .rng .permutation (self .d )
177
- for i in range (self .times_to_stack_v )])
178
- self .S = np .multiply (1 / self .l2norm_along_axis1 (self .G )
179
- .reshape ((- 1 , 1 )),
180
- chi .rvs (self .d ,
181
- size = (self .times_to_stack_v , self .d )))
176
+ self ._P = np .hstack ([(i * self ._d )+ self ._rng .permutation (self ._d )
177
+ for i in range (self ._times_to_stack_v )])
178
+ self ._S = np .multiply (1 / self .l2norm_along_axis1 (self ._G )
179
+ .reshape ((- 1 , 1 )),
180
+ chi .rvs (self ._d ,
181
+ size = (self ._times_to_stack_v , self ._d )))
182
182
183
- self .U = self .uniform_vector ()
183
+ self ._U = self .uniform_vector ()
184
184
185
185
return self
186
186
@@ -197,11 +197,14 @@ def transform(self, X):
197
197
-------
198
198
X_new : array-like, shape (n_samples, n_components)
199
199
"""
200
- X = check_array (X )
200
+ X = check_array (X , dtype = np . float64 )
201
201
X_padded = self .pad_with_zeros (X )
202
- HGPHBX = self .apply_approximate_gaussian_matrix (self .B ,
203
- self .G ,
204
- self .P ,
202
+ HGPHBX = self .apply_approximate_gaussian_matrix (self ._B ,
203
+ self ._G ,
204
+ self ._P ,
205
205
X_padded )
206
- VX = self .scale_transformed_data (self .S , HGPHBX )
206
+ VX = self .scale_transformed_data (self ._S , HGPHBX )
207
207
return self .phi (VX )
208
+
209
+ def _more_tags (self ):
210
+ return {'non_deterministic' : True }
0 commit comments