@@ -431,15 +431,15 @@ def params_to_dict(self):
431
431
return dict ()
432
432
433
433
def initialize (self , X , y ):
434
- if np .any (y <= 0 ):
434
+ if np .any (y < 0 ):
435
435
raise ValueError (
436
- "Target vector `y` should only take positive values " +
436
+ "Target vector `y` should only take positive values "
437
437
"when fitting a Poisson model." )
438
438
439
439
def initialize_sparse (self , X_data , X_indptr , X_indices , y ):
440
- if np .any (y <= 0 ):
440
+ if np .any (y < 0 ):
441
441
raise ValueError (
442
- "Target vector `y` should only take positive values " +
442
+ "Target vector `y` should only take positive values "
443
443
"when fitting a Poisson model." )
444
444
445
445
def raw_grad (self , y , Xw ):
@@ -453,6 +453,9 @@ def raw_hessian(self, y, Xw):
453
453
def value (self , y , w , Xw ):
454
454
return np .sum (np .exp (Xw ) - y * Xw ) / len (y )
455
455
456
+ def gradient (self , X , y , Xw ):
457
+ return X .T @ self .raw_grad (y , Xw )
458
+
456
459
def gradient_scalar (self , X , y , w , Xw , j ):
457
460
return (X [:, j ] @ (np .exp (Xw ) - y )) / len (y )
458
461
0 commit comments