diff --git a/deselected_tests.yaml b/deselected_tests.yaml index 77a35d2b85..4d56cc0e14 100755 --- a/deselected_tests.yaml +++ b/deselected_tests.yaml @@ -376,6 +376,10 @@ deselected_tests: - model_selection/tests/test_search.py::test_search_cv_sample_weight_equivalence[estimator0] - tests/test_common.py::test_estimators[StackingClassifier(estimators=[('est1',LogisticRegression(C=0.1)),('est2',LogisticRegression(C=1))])-check_sample_weights_invariance(kind=ones)] <1.1 - tests/test_common.py::test_estimators[StackingClassifier(estimators=[('est1',LogisticRegression(C=0.1)),('est2',LogisticRegression(C=1))])-check_sample_weights_invariance(kind=zeros)] <1.1 + # Here it doesn't reach convergence under the default maximum iterations, and the test + # turns the convergence warning into an error. Note that it has numeric differences w.r.t. + # the original sklearn, so it's not guaranteed to reach the same optimality after each iteration. + - linear_model/tests/test_logistic.py::test_multinomial_identifiability_on_iris[42-False-newton-cg] # Scikit-learn does not constraint multinomial logistic intercepts to sum to zero. # Softmax function is invariant to additions by a constant, so even though the numbers