@@ -26,7 +26,7 @@ def test_celer_path_logreg(solver):
2626 tol = 1e-11
2727 coefs , Cs , n_iters = _logistic_regression_path (
2828 X , y , Cs = 1. / alphas , fit_intercept = False , penalty = 'l1' ,
29- solver = 'liblinear' , tol = tol )
29+ solver = 'liblinear' , tol = tol , max_iter = 1000 , random_state = 0 )
3030
3131 _ , coefs_c , gaps = celer_path (
3232 X , y , "logreg" , alphas = alphas , tol = tol , verbose = 0 ,
@@ -38,7 +38,7 @@ def test_celer_path_logreg(solver):
3838
3939
4040@pytest .mark .parametrize ("sparse_X" , [True , False ])
41- def test_LogisticRegression (sparse_X ):
41+ def test_binary (sparse_X ):
4242 np .random .seed (1409 )
4343 X , y = build_dataset (
4444 n_samples = 30 , n_features = 60 , sparse_X = sparse_X )
@@ -49,28 +49,34 @@ def test_LogisticRegression(sparse_X):
4949 clf = LogisticRegression (C = - 1 )
5050 np .testing .assert_raises (ValueError , clf .fit , X , y )
5151 tol = 1e-8
52- clf1 = LogisticRegression (C = C , tol = tol , verbose = 0 )
53- clf1 .fit (X , y )
52+ clf = LogisticRegression (C = C , tol = tol , verbose = 0 )
53+ clf .fit (X , y )
5454
55- clf2 = sklearn_Logreg (
55+ clf_sk = sklearn_Logreg (
5656 C = C , penalty = 'l1' , solver = 'liblinear' , fit_intercept = False , tol = tol )
57- clf2 .fit (X , y )
58- assert_allclose (clf1 .coef_ , clf2 .coef_ , rtol = 1e-3 , atol = 1e-5 )
57+ clf_sk .fit (X , y )
58+ assert_allclose (clf .coef_ , clf_sk .coef_ , rtol = 1e-3 , atol = 1e-5 )
5959
60- # this uses float32 so we increase the tol else there are precision issues
61- clf1 .tol = 1e-4
62- check_estimator (clf1 )
6360
64- # multinomial test, need to have a slightly lower tol
65- # for results to be comparable
61+ @pytest .mark .parametrize ("sparse_X" , [True , False ])
62+ def test_multinomial (sparse_X ):
63+ np .random .seed (1409 )
64+ X , y = build_dataset (
65+ n_samples = 30 , n_features = 60 , sparse_X = sparse_X )
6666 y = np .random .choice (4 , len (y ))
67- clf3 = LogisticRegression (C = C , tol = tol , verbose = 0 )
68- clf3 .fit (X , y )
67+ tol = 1e-8
68+ clf = LogisticRegression (C = 1 , tol = tol , verbose = 0 )
69+ clf .fit (X , y )
70+
71+ clf_sk = sklearn_Logreg (
72+ C = 1 , penalty = 'l1' , solver = 'liblinear' , fit_intercept = False , tol = tol )
73+ clf_sk .fit (X , y )
74+ assert_allclose (clf .coef_ , clf_sk .coef_ , rtol = 1e-3 , atol = 1e-3 )
6975
70- clf4 = sklearn_Logreg (
71- C = C , penalty = 'l1' , solver = 'liblinear' , fit_intercept = False , tol = tol )
72- clf4 .fit (X , y )
73- assert_allclose (clf3 .coef_ , clf4 .coef_ , rtol = 1e-3 , atol = 1e-3 )
7476
75- clf3 .tol = 1e-3
76- check_estimator (clf3 )
77+ @pytest .mark .parametrize ("solver" , ["celer-pn" ])
78+ def test_check_estimator (solver ):
79+ # sklearn fits on unnormalized data for which there are convergence issues
80+ # fix with increased tolerance:
81+ clf = LogisticRegression (C = 1 , solver = solver , tol = 0.1 )
82+ check_estimator (clf )
0 commit comments