Skip to content

Commit 1308f6b

Browse files
authored
Remove redundant line in vias_variance_decomp (#652)
* remove redundant line in vias_variance_decomp * add version print to travis test script * fix unit tests for sklean 0.22 * upd travis
1 parent aa47008 commit 1308f6b

File tree

10 files changed

+125
-64
lines changed

10 files changed

+125
-64
lines changed

.appveyor.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ install:
1616
- conda create -q -n test-environment python=%PYTHON_VERSION% numpy scipy scikit-learn pandas joblib pytest
1717
- activate test-environment
1818
- conda install -c conda-forge dlib=19.17
19-
- pip install imageio
20-
- pip install scikit-image
19+
- conda install imageio
20+
- conda install scikit-image
2121

2222
test_script:
2323
- set PYTHONPATH=%PYTHONPATH%;%CD%

.travis.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ matrix:
2020
- os: linux
2121
sudo: required
2222
python: 3.8
23-
env: LATEST="false" IMAGE="true" COVERAGE="false" NUMPY_VERSION="1.16.2" SCIPY_VERSION="1.2.1" SKLEARN_VERSION="0.20.3" JOBLIB_VERSION=0.13.1 PANDAS_VERSION="0.24.2" IMAGEIO_VERSION="2.5.0" SKIMAGE_VERSION="0.15.0" DLIB_VERSION="19.17.0" MINICONDA_PYTHON_VERSION=3.7
23+
env: LATEST="false" IMAGE="true" COVERAGE="false" NUMPY_VERSION="1.17.4" SCIPY_VERSION="1.3.1" SKLEARN_VERSION="0.22.0" JOBLIB_VERSION=0.13.1 PANDAS_VERSION="0.25.3" IMAGEIO_VERSION="2.5.0" SKIMAGE_VERSION="0.15.0" DLIB_VERSION="19.17.0" MINICONDA_PYTHON_VERSION=3.7
2424
- os: linux
2525
python: 3.8
2626
env: LATEST="true" IMAGE="true" COVERAGE="true" NOTEBOOKS="true" MINICONDA_PYTHON_VERSION=3.7

ci/.travis_install.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,9 @@ source activate test-environment
3333
if [ "${LATEST}" = "true" ]; then
3434
conda install numpy scipy pandas scikit-learn joblib
3535
else
36-
conda install numpy="$NUMPY_VERSION" scipy="$SCIPY_VERSION" pandas="$PANDAS_VERSION" scikit-learn="$SKLEARN_VERSION" joblib="$JOBLIB_VERSION"
36+
conda install numpy="$NUMPY_VERSION" scipy="$SCIPY_VERSION" pandas="$PANDAS_VERSION" joblib="$JOBLIB_VERSION"
37+
# temporary fix because 0.22 cannot be installed from the main conda branch
38+
conda install scikit-learn="$SKLEARN_VERSION" -c conda-forge
3739
fi
3840

3941

ci/.travis_test.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,12 @@
22

33
set -e
44

5+
python --version
6+
python -c "import pandas; print('pandas %s' % pandas.__version__)"
7+
python -c "import numpy; print('numpy %s' % numpy.__version__)"
8+
python -c "import scipy; print('scipy %s' % scipy.__version__)"
9+
python -c "import sklearn; print('sklearn %s' % sklearn.__version__)"
10+
python -c "import mlxtend; print('mlxtend %s' % mlxtend.__version__)"
511

612
if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then
713

mlxtend/classifier/tests/test_stacking_classifier.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def test_weight_unsupported():
112112
w = np.array([random.random() for _ in range(len(y))])
113113

114114
with pytest.raises(TypeError):
115-
sclf.fit(X, y, sample_seight=w)
115+
sclf.fit(X, y, sample_weight=w)
116116

117117

118118
def test_weight_unsupported_no_weight():
@@ -312,7 +312,7 @@ def test_not_fitted():
312312
use_probas=True,
313313
meta_classifier=meta)
314314

315-
X, y = iris_data()
315+
X, _ = iris_data()
316316

317317
assert_raises(NotFittedError,
318318
"This StackingClassifier instance is not fitted yet."
@@ -496,7 +496,7 @@ def test_predict_meta_features():
496496
multi_class='ovr',
497497
random_state=1)
498498
gnb = GaussianNB()
499-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
499+
X_train, X_test, y_train, _ = train_test_split(X, y, test_size=0.3)
500500

501501
# test default (class labels)
502502
stclf = StackingClassifier(classifiers=[knn, gnb],
@@ -552,15 +552,15 @@ def test_decision_function():
552552
sclf = StackingClassifier(classifiers=[clf1, clf2],
553553
use_probas=True,
554554
meta_classifier=meta)
555-
555+
556556
scores = cross_val_score(sclf,
557557
X,
558558
y2,
559559
cv=5,
560560
scoring='roc_auc')
561561
scores_mean = (round(scores.mean(), 2))
562562

563-
if Version(sklearn_version) < Version("0.21"):
563+
if Version(sklearn_version) < Version("0.22"):
564564
assert scores_mean == 0.95, scores_mean
565565
else:
566-
assert scores_mean == 0.95, scores_mean
566+
assert scores_mean == 0.94, scores_mean

mlxtend/classifier/tests/test_stacking_cv_classifier.py

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -429,8 +429,10 @@ def test_meta_feat_reordering():
429429

430430
if Version(sklearn_version) < Version("0.21"):
431431
expected_value = 0.86
432-
else:
432+
elif Version(sklearn_version) < Version("0.22"):
433433
expected_value = 0.87
434+
else:
435+
expected_value = 0.85
434436

435437
assert round(roc_auc_score(y_train,
436438
stclf.train_meta_features_[:, 1]), 2) == expected_value, \
@@ -480,10 +482,7 @@ def test_sparse_inputs_with_features_in_secondary():
480482
# dense
481483
stclf.fit(X_train, y_train)
482484

483-
if Version(sklearn_version) < Version("0.21"):
484-
expected_value = 1.0
485-
else:
486-
expected_value = 0.99
485+
expected_value = 1.0
487486

488487
assert round(stclf.score(X_train, y_train), 2) == expected_value, \
489488
round(stclf.score(X_train, y_train), 2)
@@ -493,8 +492,12 @@ def test_sparse_inputs_with_features_in_secondary():
493492

494493
if Version(sklearn_version) < Version("0.21"):
495494
expected_value = 1.0
496-
else:
495+
if Version(sklearn_version) < Version("0.22"):
497496
expected_value = 0.99
497+
else:
498+
expected_value = 1.00
499+
500+
498501
assert round(stclf.score(X_train, y_train), 2) == expected_value, \
499502
round(stclf.score(X_train, y_train), 2)
500503

@@ -567,8 +570,13 @@ def test_works_with_df_if_fold_indexes_missing():
567570

568571
# dense
569572
stclf.fit(X_train, y_train)
570-
assert round(stclf.score(X_train, y_train), 2) == 0.99, \
571-
round(stclf.score(X_train, y_train), 2)
573+
574+
if Version(sklearn_version) < Version("0.22"):
575+
assert round(stclf.score(X_train, y_train), 2) == 0.99, \
576+
round(stclf.score(X_train, y_train), 2)
577+
else:
578+
assert round(stclf.score(X_train, y_train), 2) == 0.98, \
579+
round(stclf.score(X_train, y_train), 2)
572580

573581

574582
def test_decision_function():
@@ -596,7 +604,7 @@ def test_decision_function():
596604
sclf = StackingCVClassifier(classifiers=[clf1, clf2],
597605
use_probas=True,
598606
meta_classifier=meta)
599-
607+
600608
scores = cross_val_score(sclf,
601609
X_breast,
602610
y_breast,
@@ -606,5 +614,7 @@ def test_decision_function():
606614

607615
if Version(sklearn_version) < Version("0.21"):
608616
assert scores_mean == 0.94, scores_mean
609-
else:
617+
elif Version(sklearn_version) < Version("0.22"):
610618
assert scores_mean == 0.96, scores_mean
619+
else:
620+
assert scores_mean == 0.90, scores_mean

mlxtend/evaluate/bias_variance_decomp.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ def bias_variance_decomp(estimator, X_train, y_train, X_test, y_test,
8686
arr=all_pred).mean()
8787

8888
avg_bias = np.sum(main_predictions != y_test) / y_test.size
89-
avg_var = np.sum(main_predictions != all_pred) / all_pred.size
9089

9190
var = np.zeros(pred.shape)
9291

mlxtend/feature_selection/tests/test_exhaustive_feature_selector.py

Lines changed: 48 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import sys
88
import numpy as np
99
import pandas as pd
10+
from distutils.version import LooseVersion as Version
1011
from numpy.testing import assert_almost_equal
1112
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
1213
from sklearn.ensemble import RandomForestClassifier
@@ -17,6 +18,7 @@
1718
from sklearn.datasets import load_boston
1819
from mlxtend.utils import assert_raises
1920
from sklearn.model_selection import GroupKFold
21+
from sklearn import __version__ as sklearn_version
2022

2123

2224
def dict_compare_utility(d1, d2):
@@ -161,27 +163,37 @@ def test_knn_cv3():
161163
expect = {0: {'avg_score': 0.9391025641025641,
162164
'feature_idx': (0, 1, 2),
163165
'feature_names': ('0', '1', '2'),
164-
'cv_scores': np.array([0.97435897, 0.94871795,
165-
0.88888889, 0.94444444])},
166-
1: {'avg_score': 0.94017094017094016,
166+
'cv_scores': np.array([0.974, 0.947, 0.892, 0.946])},
167+
1: {'avg_score': 0.9400782361308677,
167168
'feature_idx': (0, 1, 3),
168169
'feature_names': ('0', '1', '3'),
169-
'cv_scores': np.array([0.92307692, 0.94871795,
170-
0.91666667, 0.97222222])},
170+
'cv_scores': np.array([0.921, 0.947, 0.919, 0.973])},
171171
2: {'avg_score': 0.95299145299145294,
172172
'feature_idx': (0, 2, 3),
173173
'feature_names': ('0', '2', '3'),
174-
'cv_scores': np.array([0.97435897, 0.94871795,
175-
0.91666667, 0.97222222])},
174+
'cv_scores': np.array([0.974, 0.947, 0.919, 0.973])},
176175
3: {'avg_score': 0.97275641025641035,
177176
'feature_idx': (1, 2, 3),
178177
'feature_names': ('1', '2', '3'),
179-
'cv_scores': np.array([0.97435897, 1.,
180-
0.94444444, 0.97222222])}}
178+
'cv_scores': np.array([0.974, 1. , 0.946, 0.973])}}
179+
180+
if Version(sklearn_version) < Version("0.22"):
181+
expect[0]['cv_scores'] = np.array([0.97435897, 0.94871795,
182+
0.88888889, 0.94444444])
183+
expect[1]['cv_scores'] = np.array([0.92307692, 0.94871795,
184+
0.91666667, 0.97222222])
185+
expect[2]['cv_scores'] = np.array([0.97435897, 0.94871795,
186+
0.91666667, 0.97222222])
187+
expect[3]['cv_scores'] = np.array([0.97435897, 0.94871795,
188+
0.91666667, 0.97222222])
189+
expect[1]['avg_score'] = 0.94017094017094016
190+
assert round(efs1.best_score_, 4) == 0.9728
191+
else:
192+
assert round(efs1.best_score_, 4) == 0.9732
193+
181194
dict_compare_utility(d1=expect, d2=efs1.subsets_)
182195
assert efs1.best_idx_ == (1, 2, 3)
183196
assert efs1.best_feature_names_ == ('1', '2', '3')
184-
assert round(efs1.best_score_, 4) == 0.9728
185197

186198

187199
def test_knn_cv3_groups():
@@ -198,7 +210,7 @@ def test_knn_cv3_groups():
198210
np.random.seed(1630672634)
199211
groups = np.random.randint(0, 6, size=len(y))
200212
efs1 = efs1.fit(X, y, groups=groups)
201-
# print(efs1.subsets_)
213+
202214
expect = {0: {'cv_scores': np.array([0.97916667, 0.93877551, 0.9245283]),
203215
'feature_idx': (0, 1, 2),
204216
'avg_score': 0.9474901595858469,
@@ -233,27 +245,40 @@ def test_fit_params():
233245
efs1 = efs1.fit(X, y, sample_weight=sample_weight)
234246
expect = {0: {'feature_idx': (0, 1, 2),
235247
'feature_names': ('0', '1', '2'),
236-
'cv_scores': np.array([0.94871795, 0.92307692,
237-
0.91666667, 0.97222222]),
238-
'avg_score': 0.9401709401709402},
248+
'cv_scores': np.array([0.947, 0.868, 0.919, 0.973]),
249+
'avg_score': 0.9269203413940257},
239250
1: {'feature_idx': (0, 1, 3),
240251
'feature_names': ('0', '1', '3'),
241-
'cv_scores': np.array([0.92307692, 0.92307692,
242-
0.88888889, 1.]),
252+
'cv_scores': np.array([0.921, 0.921, 0.892, 1.]),
243253
'avg_score': 0.9337606837606838},
244254
2: {'feature_idx': (0, 2, 3),
245255
'feature_names': ('0', '2', '3'),
246-
'cv_scores': np.array([0.97435897, 0.94871795,
247-
0.94444444, 0.97222222]),
248-
'avg_score': 0.9599358974358974},
256+
'cv_scores': np.array([0.974, 0.947, 0.919, 0.973]),
257+
'avg_score': 0.9532361308677098},
249258
3: {'feature_idx': (1, 2, 3),
250259
'feature_names': ('1', '2', '3'),
251-
'cv_scores': np.array([0.97435897, 0.94871795,
252-
0.91666667, 1.]),
253-
'avg_score': 0.9599358974358974}}
260+
'cv_scores': np.array([0.974, 0.947, 0.892, 1.]),
261+
'avg_score': 0.9532361308677098}}
262+
263+
if Version(sklearn_version) < Version("0.22"):
264+
expect[0]['avg_score'] = 0.9401709401709402
265+
expect[0]['cv_scores'] = np.array([0.94871795, 0.92307692,
266+
0.91666667, 0.97222222])
267+
expect[1]['cv_scores'] = np.array([0.94871795, 0.92307692,
268+
0.91666667, 0.97222222])
269+
expect[2]['cv_scores'] = np.array([0.94871795, 0.92307692,
270+
0.91666667, 0.97222222])
271+
expect[2]['avg_score'] = 0.9599358974358974
272+
expect[3]['avg_score'] = 0.9599358974358974
273+
expect[3]['cv_scores'] = np.array([0.97435897, 0.94871795,
274+
0.91666667, 1.])
275+
assert round(efs1.best_score_, 4) == 0.9599
276+
277+
else:
278+
assert round(efs1.best_score_, 4) == 0.9532
279+
254280
dict_compare_utility(d1=expect, d2=efs1.subsets_)
255281
assert efs1.best_idx_ == (0, 2, 3)
256-
assert round(efs1.best_score_, 4) == 0.9599
257282

258283

259284
def test_regression():

mlxtend/feature_selection/tests/test_sequential_feature_selector.py

Lines changed: 36 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -203,23 +203,30 @@ def test_knn_cv3():
203203
sfs1 = sfs1.fit(X, y)
204204
sfs1.subsets_
205205
expect = {1: {'avg_score': 0.95299145299145294,
206-
'cv_scores': np.array([0.97435897,
207-
0.94871795,
208-
0.88888889,
209-
1.0]),
206+
'cv_scores': np.array([0.974, 0.947, 0.892, 1.]),
210207
'feature_idx': (3,)},
211208
2: {'avg_score': 0.95993589743589736,
212-
'cv_scores': np.array([0.97435897,
213-
0.94871795,
214-
0.91666667,
215-
1.0]),
209+
'cv_scores': np.array([0.974, 0.947, 0.919, 1.]),
216210
'feature_idx': (2, 3)},
217-
3: {'avg_score': 0.97275641025641035,
218-
'cv_scores': np.array([0.97435897,
219-
1.0,
220-
0.94444444,
221-
0.97222222]),
211+
3: {'avg_score': 0.9732,
212+
'cv_scores': np.array([0.974, 1., 0.946, 0.973]),
222213
'feature_idx': (1, 2, 3)}}
214+
215+
if Version(sklearn_version) < Version("0.22"):
216+
expect[1]['cv_scores'] = np.array([0.97435897,
217+
0.94871795,
218+
0.88888889,
219+
1.0])
220+
expect[2]['cv_scores'] = np.array([0.97435897,
221+
0.94871795,
222+
0.91666667,
223+
1.0])
224+
expect[2]['avg_score'] = 0.97275641025641035
225+
expect[3]['cv_scores'] = np.array([0.97435897,
226+
1.0,
227+
0.94444444,
228+
0.97222222])
229+
223230
dict_compare_utility(d_actual=sfs1.subsets_, d_desired=expect)
224231

225232

@@ -454,7 +461,11 @@ def test_knn_scoring_metric():
454461
cv=4,
455462
verbose=0)
456463
sfs5 = sfs5.fit(X, y)
457-
assert round(sfs5.k_score_, 4) == 0.9728
464+
465+
if Version(sklearn_version) < '0.22':
466+
assert round(sfs5.k_score_, 4) == 0.9728
467+
else:
468+
assert round(sfs5.k_score_, 4) == 0.9732
458469

459470
sfs6 = SFS(knn,
460471
k_features=3,
@@ -463,7 +474,10 @@ def test_knn_scoring_metric():
463474
cv=4,
464475
verbose=0)
465476
sfs6 = sfs6.fit(X, y)
466-
assert round(sfs6.k_score_, 4) == 0.9728
477+
if Version(sklearn_version) < '0.22':
478+
assert round(sfs5.k_score_, 4) == 0.9728
479+
else:
480+
assert round(sfs5.k_score_, 4) == 0.9732
467481

468482
sfs7 = SFS(knn,
469483
k_features=3,
@@ -472,7 +486,10 @@ def test_knn_scoring_metric():
472486
scoring='f1_macro',
473487
cv=4)
474488
sfs7 = sfs7.fit(X, y)
475-
assert round(sfs7.k_score_, 4) == 0.9727, sfs7.k_score_
489+
if Version(sklearn_version) < '0.22':
490+
assert round(sfs5.k_score_, 4) == 0.9727
491+
else:
492+
assert round(sfs5.k_score_, 4) == 0.9732
476493

477494

478495
def test_regression():
@@ -588,9 +605,9 @@ def _fit(self, X, y, init_params=True):
588605
errors += int(update != 0.0)
589606

590607
if self.print_progress:
591-
self._print_progress(iteration=i + 1,
592-
n_iter=self.epochs,
593-
cost=errors)
608+
self.print_progress(iteration=i + 1,
609+
n_iter=self.epochs,
610+
cost=errors)
594611
self.cost_.append(errors)
595612
return self
596613

0 commit comments

Comments
 (0)