77import sys
88import numpy as np
99import pandas as pd
10+ from distutils .version import LooseVersion as Version
1011from numpy .testing import assert_almost_equal
1112from mlxtend .feature_selection import ExhaustiveFeatureSelector as EFS
1213from sklearn .ensemble import RandomForestClassifier
1718from sklearn .datasets import load_boston
1819from mlxtend .utils import assert_raises
1920from sklearn .model_selection import GroupKFold
21+ from sklearn import __version__ as sklearn_version
2022
2123
2224def dict_compare_utility (d1 , d2 ):
@@ -161,27 +163,37 @@ def test_knn_cv3():
161163 expect = {0 : {'avg_score' : 0.9391025641025641 ,
162164 'feature_idx' : (0 , 1 , 2 ),
163165 'feature_names' : ('0' , '1' , '2' ),
164- 'cv_scores' : np .array ([0.97435897 , 0.94871795 ,
165- 0.88888889 , 0.94444444 ])},
166- 1 : {'avg_score' : 0.94017094017094016 ,
166+ 'cv_scores' : np .array ([0.974 , 0.947 , 0.892 , 0.946 ])},
167+ 1 : {'avg_score' : 0.9400782361308677 ,
167168 'feature_idx' : (0 , 1 , 3 ),
168169 'feature_names' : ('0' , '1' , '3' ),
169- 'cv_scores' : np .array ([0.92307692 , 0.94871795 ,
170- 0.91666667 , 0.97222222 ])},
170+ 'cv_scores' : np .array ([0.921 , 0.947 , 0.919 , 0.973 ])},
171171 2 : {'avg_score' : 0.95299145299145294 ,
172172 'feature_idx' : (0 , 2 , 3 ),
173173 'feature_names' : ('0' , '2' , '3' ),
174- 'cv_scores' : np .array ([0.97435897 , 0.94871795 ,
175- 0.91666667 , 0.97222222 ])},
174+ 'cv_scores' : np .array ([0.974 , 0.947 , 0.919 , 0.973 ])},
176175 3 : {'avg_score' : 0.97275641025641035 ,
177176 'feature_idx' : (1 , 2 , 3 ),
178177 'feature_names' : ('1' , '2' , '3' ),
179- 'cv_scores' : np .array ([0.97435897 , 1. ,
180- 0.94444444 , 0.97222222 ])}}
178+ 'cv_scores' : np .array ([0.974 , 1. , 0.946 , 0.973 ])}}
179+
180+ if Version (sklearn_version ) < Version ("0.22" ):
181+ expect [0 ]['cv_scores' ] = np .array ([0.97435897 , 0.94871795 ,
182+ 0.88888889 , 0.94444444 ])
183+ expect [1 ]['cv_scores' ] = np .array ([0.92307692 , 0.94871795 ,
184+ 0.91666667 , 0.97222222 ])
185+ expect [2 ]['cv_scores' ] = np .array ([0.97435897 , 0.94871795 ,
186+ 0.91666667 , 0.97222222 ])
187+ expect [3 ]['cv_scores' ] = np .array ([0.97435897 , 0.94871795 ,
188+ 0.91666667 , 0.97222222 ])
189+ expect [1 ]['avg_score' ] = 0.94017094017094016
190+ assert round (efs1 .best_score_ , 4 ) == 0.9728
191+ else :
192+ assert round (efs1 .best_score_ , 4 ) == 0.9732
193+
181194 dict_compare_utility (d1 = expect , d2 = efs1 .subsets_ )
182195 assert efs1 .best_idx_ == (1 , 2 , 3 )
183196 assert efs1 .best_feature_names_ == ('1' , '2' , '3' )
184- assert round (efs1 .best_score_ , 4 ) == 0.9728
185197
186198
187199def test_knn_cv3_groups ():
@@ -198,7 +210,7 @@ def test_knn_cv3_groups():
198210 np .random .seed (1630672634 )
199211 groups = np .random .randint (0 , 6 , size = len (y ))
200212 efs1 = efs1 .fit (X , y , groups = groups )
201- # print(efs1.subsets_)
213+
202214 expect = {0 : {'cv_scores' : np .array ([0.97916667 , 0.93877551 , 0.9245283 ]),
203215 'feature_idx' : (0 , 1 , 2 ),
204216 'avg_score' : 0.9474901595858469 ,
@@ -233,27 +245,40 @@ def test_fit_params():
233245 efs1 = efs1 .fit (X , y , sample_weight = sample_weight )
234246 expect = {0 : {'feature_idx' : (0 , 1 , 2 ),
235247 'feature_names' : ('0' , '1' , '2' ),
236- 'cv_scores' : np .array ([0.94871795 , 0.92307692 ,
237- 0.91666667 , 0.97222222 ]),
238- 'avg_score' : 0.9401709401709402 },
248+ 'cv_scores' : np .array ([0.947 , 0.868 , 0.919 , 0.973 ]),
249+ 'avg_score' : 0.9269203413940257 },
239250 1 : {'feature_idx' : (0 , 1 , 3 ),
240251 'feature_names' : ('0' , '1' , '3' ),
241- 'cv_scores' : np .array ([0.92307692 , 0.92307692 ,
242- 0.88888889 , 1. ]),
252+ 'cv_scores' : np .array ([0.921 , 0.921 , 0.892 , 1. ]),
243253 'avg_score' : 0.9337606837606838 },
244254 2 : {'feature_idx' : (0 , 2 , 3 ),
245255 'feature_names' : ('0' , '2' , '3' ),
246- 'cv_scores' : np .array ([0.97435897 , 0.94871795 ,
247- 0.94444444 , 0.97222222 ]),
248- 'avg_score' : 0.9599358974358974 },
256+ 'cv_scores' : np .array ([0.974 , 0.947 , 0.919 , 0.973 ]),
257+ 'avg_score' : 0.9532361308677098 },
249258 3 : {'feature_idx' : (1 , 2 , 3 ),
250259 'feature_names' : ('1' , '2' , '3' ),
251- 'cv_scores' : np .array ([0.97435897 , 0.94871795 ,
252- 0.91666667 , 1. ]),
253- 'avg_score' : 0.9599358974358974 }}
260+ 'cv_scores' : np .array ([0.974 , 0.947 , 0.892 , 1. ]),
261+ 'avg_score' : 0.9532361308677098 }}
262+
263+ if Version (sklearn_version ) < Version ("0.22" ):
264+ expect [0 ]['avg_score' ] = 0.9401709401709402
265+ expect [0 ]['cv_scores' ] = np .array ([0.94871795 , 0.92307692 ,
266+ 0.91666667 , 0.97222222 ])
267+ expect [1 ]['cv_scores' ] = np .array ([0.94871795 , 0.92307692 ,
268+ 0.91666667 , 0.97222222 ])
269+ expect [2 ]['cv_scores' ] = np .array ([0.94871795 , 0.92307692 ,
270+ 0.91666667 , 0.97222222 ])
271+ expect [2 ]['avg_score' ] = 0.9599358974358974
272+ expect [3 ]['avg_score' ] = 0.9599358974358974
273+ expect [3 ]['cv_scores' ] = np .array ([0.97435897 , 0.94871795 ,
274+ 0.91666667 , 1. ])
275+ assert round (efs1 .best_score_ , 4 ) == 0.9599
276+
277+ else :
278+ assert round (efs1 .best_score_ , 4 ) == 0.9532
279+
254280 dict_compare_utility (d1 = expect , d2 = efs1 .subsets_ )
255281 assert efs1 .best_idx_ == (0 , 2 , 3 )
256- assert round (efs1 .best_score_ , 4 ) == 0.9599
257282
258283
259284def test_regression ():
0 commit comments