@@ -314,6 +314,9 @@ class Lasso(LinearModel, RegressorMixin):
314
314
tol : float, optional
315
315
Stopping criterion for the optimization.
316
316
317
+ positive : bool, optional
318
+ When set to ``True``, forces the coefficient vector to be positive.
319
+
317
320
fit_intercept : bool, optional (default=True)
318
321
Whether or not to fit an intercept.
319
322
@@ -345,14 +348,16 @@ class Lasso(LinearModel, RegressorMixin):
345
348
"""
346
349
347
350
def __init__ (self , alpha = 1. , max_iter = 50 , max_epochs = 50_000 , p0 = 10 , verbose = 0 ,
348
- tol = 1e-4 , fit_intercept = True , warm_start = False , ws_strategy = "subdiff" ):
351
+ tol = 1e-4 , positive = False , fit_intercept = True , warm_start = False ,
352
+ ws_strategy = "subdiff" ):
349
353
super ().__init__ ()
350
354
self .alpha = alpha
351
355
self .tol = tol
352
356
self .max_iter = max_iter
353
357
self .max_epochs = max_epochs
354
358
self .p0 = p0
355
359
self .ws_strategy = ws_strategy
360
+ self .positive = positive
356
361
self .fit_intercept = fit_intercept
357
362
self .warm_start = warm_start
358
363
self .verbose = verbose
@@ -378,7 +383,7 @@ def fit(self, X, y):
378
383
self .max_iter , self .max_epochs , self .p0 , tol = self .tol ,
379
384
ws_strategy = self .ws_strategy , fit_intercept = self .fit_intercept ,
380
385
warm_start = self .warm_start , verbose = self .verbose )
381
- return _glm_fit (X , y , self , Quadratic (), L1 (self .alpha ), solver )
386
+ return _glm_fit (X , y , self , Quadratic (), L1 (self .alpha , self . positive ), solver )
382
387
383
388
def path (self , X , y , alphas , coef_init = None , return_n_iter = True , ** params ):
384
389
"""Compute Lasso path.
@@ -417,7 +422,7 @@ def path(self, X, y, alphas, coef_init=None, return_n_iter=True, **params):
417
422
n_iters : array, shape (n_alphas,), optional
418
423
The number of iterations along the path. If return_n_iter is set to `True`.
419
424
"""
420
- penalty = compiled_clone (L1 (self .alpha ))
425
+ penalty = compiled_clone (L1 (self .alpha , self . positive ))
421
426
datafit = compiled_clone (Quadratic (), to_float32 = X .dtype == np .float32 )
422
427
solver = AndersonCD (
423
428
self .max_iter , self .max_epochs , self .p0 , tol = self .tol ,
@@ -457,6 +462,9 @@ class WeightedLasso(LinearModel, RegressorMixin):
457
462
tol : float, optional
458
463
Stopping criterion for the optimization.
459
464
465
+ positive : bool, optional
466
+ When set to ``True``, forces the coefficient vector to be positive.
467
+
460
468
fit_intercept : bool, optional (default=True)
461
469
Whether or not to fit an intercept.
462
470
@@ -492,8 +500,8 @@ class WeightedLasso(LinearModel, RegressorMixin):
492
500
"""
493
501
494
502
def __init__ (self , alpha = 1. , weights = None , max_iter = 50 , max_epochs = 50_000 , p0 = 10 ,
495
- verbose = 0 , tol = 1e-4 , fit_intercept = True , warm_start = False ,
496
- ws_strategy = "subdiff" ):
503
+ verbose = 0 , tol = 1e-4 , positive = False , fit_intercept = True ,
504
+ warm_start = False , ws_strategy = "subdiff" ):
497
505
super ().__init__ ()
498
506
self .alpha = alpha
499
507
self .weights = weights
@@ -502,6 +510,7 @@ def __init__(self, alpha=1., weights=None, max_iter=50, max_epochs=50_000, p0=10
502
510
self .max_epochs = max_epochs
503
511
self .p0 = p0
504
512
self .ws_strategy = ws_strategy
513
+ self .positive = positive
505
514
self .fit_intercept = fit_intercept
506
515
self .warm_start = warm_start
507
516
self .verbose = verbose
@@ -548,7 +557,7 @@ def path(self, X, y, alphas, coef_init=None, return_n_iter=True, **params):
548
557
raise ValueError ("The number of weights must match the number of \
549
558
features. Got %s, expected %s." % (
550
559
len (weights ), X .shape [1 ]))
551
- penalty = compiled_clone (WeightedL1 (self .alpha , weights ))
560
+ penalty = compiled_clone (WeightedL1 (self .alpha , weights , self . positive ))
552
561
datafit = compiled_clone (Quadratic (), to_float32 = X .dtype == np .float32 )
553
562
solver = AndersonCD (
554
563
self .max_iter , self .max_epochs , self .p0 , tol = self .tol ,
@@ -574,9 +583,9 @@ def fit(self, X, y):
574
583
"""
575
584
if self .weights is None :
576
585
warnings .warn ('Weights are not provided, fitting with Lasso penalty' )
577
- penalty = L1 (self .alpha )
586
+ penalty = L1 (self .alpha , self . positive )
578
587
else :
579
- penalty = WeightedL1 (self .alpha , self .weights )
588
+ penalty = WeightedL1 (self .alpha , self .weights , self . positive )
580
589
solver = AndersonCD (
581
590
self .max_iter , self .max_epochs , self .p0 , tol = self .tol ,
582
591
ws_strategy = self .ws_strategy , fit_intercept = self .fit_intercept ,
@@ -618,6 +627,9 @@ class ElasticNet(LinearModel, RegressorMixin):
618
627
tol : float, optional
619
628
Stopping criterion for the optimization.
620
629
630
+ positive : bool, optional
631
+ When set to ``True``, forces the coefficient vector to be positive.
632
+
621
633
fit_intercept : bool, optional (default=True)
622
634
Whether or not to fit an intercept.
623
635
@@ -648,8 +660,8 @@ class ElasticNet(LinearModel, RegressorMixin):
648
660
"""
649
661
650
662
def __init__ (self , alpha = 1. , l1_ratio = 0.5 , max_iter = 50 , max_epochs = 50_000 , p0 = 10 ,
651
- verbose = 0 , tol = 1e-4 , fit_intercept = True , warm_start = False ,
652
- ws_strategy = "subdiff" ):
663
+ verbose = 0 , tol = 1e-4 , positive = False , fit_intercept = True ,
664
+ warm_start = False , ws_strategy = "subdiff" ):
653
665
super ().__init__ ()
654
666
self .alpha = alpha
655
667
self .l1_ratio = l1_ratio
@@ -659,6 +671,7 @@ def __init__(self, alpha=1., l1_ratio=0.5, max_iter=50, max_epochs=50_000, p0=10
659
671
self .p0 = p0
660
672
self .ws_strategy = ws_strategy
661
673
self .fit_intercept = fit_intercept
674
+ self .positive = positive
662
675
self .warm_start = warm_start
663
676
self .verbose = verbose
664
677
@@ -699,7 +712,7 @@ def path(self, X, y, alphas, coef_init=None, return_n_iter=True, **params):
699
712
n_iters : array, shape (n_alphas,), optional
700
713
The number of iterations along the path. If return_n_iter is set to `True`.
701
714
"""
702
- penalty = compiled_clone (L1_plus_L2 (self .alpha , self .l1_ratio ))
715
+ penalty = compiled_clone (L1_plus_L2 (self .alpha , self .l1_ratio , self . positive ))
703
716
datafit = compiled_clone (Quadratic (), to_float32 = X .dtype == np .float32 )
704
717
solver = AndersonCD (
705
718
self .max_iter , self .max_epochs , self .p0 , tol = self .tol ,
@@ -728,7 +741,7 @@ def fit(self, X, y):
728
741
ws_strategy = self .ws_strategy , fit_intercept = self .fit_intercept ,
729
742
warm_start = self .warm_start , verbose = self .verbose )
730
743
return _glm_fit (X , y , self , Quadratic (),
731
- L1_plus_L2 (self .alpha , self .l1_ratio ), solver )
744
+ L1_plus_L2 (self .alpha , self .l1_ratio , self . positive ), solver )
732
745
733
746
734
747
class MCPRegression (LinearModel , RegressorMixin ):
0 commit comments