@@ -514,12 +514,13 @@ def _variable_projection(self, H, t, init_alpha, Phi, dPhi):
514514
515515 def compute_error (B , alpha ):
516516 """
517- Compute the current residual and relative error.
517+ Compute the current residual, objective, and relative error.
518518 """
519519 residual = H - Phi (alpha , t ).dot (B )
520+ objective = 0.5 * np .linalg .norm (residual , "fro" ) ** 2
520521 error = np .linalg .norm (residual , "fro" ) / np .linalg .norm (H , "fro" )
521522
522- return residual , error
523+ return residual , objective , error
523524
524525 def compute_B (alpha ):
525526 """
@@ -551,7 +552,7 @@ def compute_B(alpha):
551552 scales = np .zeros (IA )
552553
553554 # Initialize iteration progress indicators.
554- residual , error = compute_error (B , alpha )
555+ residual , objective , error = compute_error (B , alpha )
555556
556557 for itr in range (maxiter ):
557558 # Build Jacobian matrix, looping over alpha indices.
@@ -610,10 +611,10 @@ def step(_lambda, scales_pvt=scales_pvt, rhs=rhs, ij_pvt=ij_pvt):
610611 # Take a step using our initial step size init_lambda.
611612 delta_0 , alpha_0 = step (_lambda )
612613 B_0 = compute_B (alpha_0 )
613- residual_0 , error_0 = compute_error (B_0 , alpha_0 )
614+ residual_0 , objective_0 , error_0 = compute_error (B_0 , alpha_0 )
614615
615616 # Check actual improvement vs predicted improvement.
616- actual_improvement = error - error_0
617+ actual_improvement = objective - objective_0
617618 pred_improvement = (
618619 0.5
619620 * np .linalg .multi_dot (
@@ -625,14 +626,15 @@ def step(_lambda, scales_pvt=scales_pvt, rhs=rhs, ij_pvt=ij_pvt):
625626 if error_0 < error :
626627 # Rescale lambda based on the improvement ratio.
627628 _lambda *= max (1 / 3 , 1 - (2 * improvement_ratio - 1 ) ** 3 )
628- alpha , B , residual , error = alpha_0 , B_0 , residual_0 , error_0
629+ alpha , B = alpha_0 , B_0
630+ residual , objective , error = residual_0 , objective_0 , error_0
629631 else :
630632 # Increase lambda until something works.
631633 for _ in range (maxlam ):
632634 _lambda *= lamup
633635 delta_0 , alpha_0 = step (_lambda )
634636 B_0 = compute_B (alpha_0 )
635- residual_0 , error_0 = compute_error (B_0 , alpha_0 )
637+ residual_0 , objective_0 , error_0 = compute_error (B_0 , alpha_0 )
636638
637639 if error_0 < error :
638640 break
@@ -649,7 +651,11 @@ def step(_lambda, scales_pvt=scales_pvt, rhs=rhs, ij_pvt=ij_pvt):
649651 return B , alpha , converged
650652
651653 # ...otherwise, update and proceed.
652- alpha , B , residual , error = alpha_0 , B_0 , residual_0 , error_0
654+ alpha , B = alpha_0 , B_0
655+ residual , objective , error = residual_0 , objective_0 , error_0
656+
657+ # Update SVD information.
658+ U , S , Vh = self ._compute_irank_svd (Phi (alpha , t ), tolrank )
653659
654660 # Record the current relative error.
655661 all_error [itr ] = error
@@ -682,8 +688,6 @@ def step(_lambda, scales_pvt=scales_pvt, rhs=rhs, ij_pvt=ij_pvt):
682688 print (msg .format (eps_stall , itr + 1 , error ))
683689 return B , alpha , converged
684690
685- U , S , Vh = self ._compute_irank_svd (Phi (alpha , t ), tolrank )
686-
687691 # Failed to meet tolerance in maxiter steps.
688692 if verbose :
689693 msg = (
@@ -778,9 +782,9 @@ def compute_operator(self, H, t):
778782
779783 # We'll consider non-converged trials successful if the user didn't
780784 # request a positive amount of bagging trials at which to terminate.
781- use_bad_bags = self ._bag_maxfail <= 0.0
785+ keep_bad_bags = self ._bag_maxfail <= 0.0
782786 if verbose :
783- if use_bad_bags :
787+ if keep_bad_bags :
784788 print ("Using all bag trial results...\n " )
785789 else :
786790 print ("Non-converged trial results will be removed...\n " )
@@ -811,7 +815,7 @@ def compute_operator(self, H, t):
811815 self ._varpro_opts [- 1 ] = verbose
812816
813817 # Incorporate trial results into the running average if successful.
814- if converged or use_bad_bags :
818+ if converged or keep_bad_bags :
815819 sorted_inds = self ._argsort_eigenvalues (e_i )
816820
817821 # Add to iterative sums.
@@ -839,10 +843,10 @@ def compute_operator(self, H, t):
839843 "Consider loosening the tol requirements "
840844 "of the variable projection routine."
841845 )
842- print (msg )
846+ warnings . warn (msg )
843847 runtime_warning_given = True
844848
845- if num_consecutive_fails >= self ._bag_maxfail and not use_bad_bags :
849+ if not keep_bad_bags and num_consecutive_fails >= self ._bag_maxfail :
846850 msg = (
847851 "Terminating the bagging routine due to "
848852 "{} many trials without convergence."
@@ -869,6 +873,7 @@ def compute_operator(self, H, t):
869873 self ._Atilde = np .linalg .multi_dot (
870874 [w_proj , np .diag (self ._eigenvalues ), np .linalg .pinv (w_proj )]
871875 )
876+
872877 # Compute A if requested.
873878 if self ._compute_A :
874879 self ._A = np .linalg .multi_dot (
0 commit comments