@@ -73,7 +73,7 @@ class BOPDMDOperator(DMDOperator):
7373 of the variable projection routine.
7474 :type eig_constraints: set(str) or function
7575 :param mode_prox: Optional proximal operator function to apply to the DMD
76- modes at every iteration of variable projection routine.
76+ modes at every iteration of the variable projection routine.
7777 :type mode_prox: function
7878 :param bag_maxfail: Number of consecutive non-converged trials of BOP-DMD
7979 at which to terminate the fit. Set this parameter to infinity for no
@@ -494,30 +494,10 @@ def _variable_projection(self, H, t, init_alpha, Phi, dPhi):
494494 Computational Optimization and Applications 54.3 (2013): 579-593
495495 by Dianne P. O'Leary and Bert W. Rust.
496496 """
497-
498- def compute_residual (alpha ):
499- """
500- Helper function that, given alpha, and using H, t, Phi as they are
501- passed to the _variable_projection function, computes and returns
502- the matrix Phi(alpha,t), B from the expression H = Phi(alpha,t)B,
503- the residual H - Phi(alpha,t)B, and 0.5*norm(residual,'fro')^2,
504- which is used as an error indicator.
505- """
506- Phi_matrix = Phi (alpha , t )
507-
508- # Update B matrix.
509- B = np .linalg .lstsq (Phi_matrix , H , rcond = None )[0 ]
510- if self ._mode_prox is not None :
511- B = self ._mode_prox (B )
512-
513- residual = H - Phi_matrix .dot (B )
514- error = 0.5 * np .linalg .norm (residual ) ** 2
515-
516- return B , residual , error
517-
518497 # Define M, IS, and IA.
519498 M , IS = H .shape
520499 IA = len (init_alpha )
500+ tolrank = M * np .finfo (float ).eps
521501
522502 # Unpack all variable projection parameters stored in varpro_opts.
523503 (
@@ -532,12 +512,32 @@ def compute_residual(alpha):
532512 verbose ,
533513 ) = self ._varpro_opts
534514
515+ def compute_error (B , alpha ):
516+ """
517+ Compute the current residual and relative error.
518+ """
519+ residual = H - Phi (alpha , t ).dot (B )
520+ error = np .linalg .norm (residual , "fro" ) / np .linalg .norm (H , "fro" )
521+
522+ return residual , error
523+
524+ def compute_B (alpha ):
525+ """
526+ Update B for the current alpha.
527+ """
528+ # Compute B using least squares.
529+ B = np .linalg .lstsq (Phi (alpha , t ), H , rcond = None )[0 ]
530+
531+ # Apply proximal operator if given.
532+ if self ._mode_prox is not None :
533+ B = self ._mode_prox (B )
534+
535+ return B
536+
535537 # Initialize values.
536- tolrank = M * np .finfo (float ).eps
537538 _lambda = init_lambda
538539 alpha = self ._push_eigenvalues (init_alpha )
539- B , residual , error = compute_residual (alpha )
540- relative_error = np .linalg .norm (residual ) / np .linalg .norm (H )
540+ B = compute_B (alpha )
541541 U , S , Vh = self ._compute_irank_svd (Phi (alpha , t ), tolrank )
542542
543543 # Initialize termination flags.
@@ -550,6 +550,9 @@ def compute_residual(alpha):
550550 rjac = np .zeros ((2 * IA , IA ), dtype = "complex" )
551551 scales = np .zeros (IA )
552552
553+ # Initialize iteration progress indicators.
554+ residual , error = compute_error (B , alpha )
555+
553556 for itr in range (maxiter ):
554557 # Build Jacobian matrix, looping over alpha indices.
555558 for i in range (IA ):
@@ -588,26 +591,26 @@ def compute_residual(alpha):
588591 (rhs_top [:IA ], np .zeros (IA , dtype = "complex" )), axis = None
589592 )
590593
591- def step (_lambda , rhs , scales_pvt , ij_pvt ):
594+ def step (_lambda , scales_pvt = scales_pvt , rhs = rhs , ij_pvt = ij_pvt ):
592595 """
593- Helper function that, given a step size _lambda and the current
594- right-hand side and pivots, computes and returns delta, the
595- amount in which we update alpha, and the updated alpha vector.
596- Note that this function uses rjac and alpha as they are defined
597- outside of this function.
596+ Helper function that, when given a step size _lambda,
597+ computes and returns the updated step and alpha vectors.
598598 """
599599 # Compute the step delta.
600600 rjac [IA :] = _lambda * np .diag (scales_pvt )
601601 delta = np .linalg .lstsq (rjac , rhs , rcond = None )[0 ]
602602 delta = delta [ij_pvt ]
603+
603604 # Compute the updated alpha vector.
604605 alpha_updated = alpha .ravel () + delta .ravel ()
605606 alpha_updated = self ._push_eigenvalues (alpha_updated )
607+
606608 return delta , alpha_updated
607609
608610 # Take a step using our initial step size init_lambda.
609- delta_0 , alpha_0 = step (_lambda , rhs , scales_pvt , ij_pvt )
610- B_0 , residual_0 , error_0 = compute_residual (alpha_0 )
611+ delta_0 , alpha_0 = step (_lambda )
612+ B_0 = compute_B (alpha_0 )
613+ residual_0 , error_0 = compute_error (B_0 , alpha_0 )
611614
612615 # Check actual improvement vs predicted improvement.
613616 actual_improvement = error - error_0
@@ -627,8 +630,9 @@ def step(_lambda, rhs, scales_pvt, ij_pvt):
627630 # Increase lambda until something works.
628631 for _ in range (maxlam ):
629632 _lambda *= lamup
630- delta_0 , alpha_0 = step (_lambda , rhs , scales_pvt , ij_pvt )
631- B_0 , residual_0 , error_0 = compute_residual (alpha_0 )
633+ delta_0 , alpha_0 = step (_lambda )
634+ B_0 = compute_B (alpha_0 )
635+ residual_0 , error_0 = compute_error (B_0 , alpha_0 )
632636
633637 if error_0 < error :
634638 break
@@ -641,23 +645,22 @@ def step(_lambda, rhs, scales_pvt, ij_pvt):
641645 "iteration {}. Current error {}. "
642646 "Consider increasing maxlam or lamup."
643647 )
644- print (msg .format (itr + 1 , relative_error ))
648+ print (msg .format (itr + 1 , error ))
645649 return B , alpha , converged
646650
647651 # ...otherwise, update and proceed.
648652 alpha , B , residual , error = alpha_0 , B_0 , residual_0 , error_0
649653
650654 # Record the current relative error.
651- relative_error = np .linalg .norm (residual ) / np .linalg .norm (H )
652- all_error [itr ] = relative_error
655+ all_error [itr ] = error
653656
654657 # Print iterative progress if the verbose flag is turned on.
655658 if verbose :
656659 update_msg = "Step {} Error {} Lambda {}"
657- print (update_msg .format (itr + 1 , relative_error , _lambda ))
660+ print (update_msg .format (itr + 1 , error , _lambda ))
658661
659662 # Update termination status and terminate if converged or stalled.
660- converged = relative_error < tol
663+ converged = error < tol
661664 error_reduction = all_error [itr - 1 ] - all_error [itr ]
662665 stalled = (itr > 0 ) and (
663666 error_reduction < eps_stall * all_error [itr - 1 ]
@@ -676,7 +679,7 @@ def step(_lambda, rhs, scales_pvt, ij_pvt):
676679 "Iteration {}. Current error {}. Consider "
677680 "increasing tol or decreasing eps_stall."
678681 )
679- print (msg .format (eps_stall , itr + 1 , relative_error ))
682+ print (msg .format (eps_stall , itr + 1 , error ))
680683 return B , alpha , converged
681684
682685 U , S , Vh = self ._compute_irank_svd (Phi (alpha , t ), tolrank )
@@ -687,7 +690,7 @@ def step(_lambda, rhs, scales_pvt, ij_pvt):
687690 "Failed to reach tolerance after maxiter = {} iterations. "
688691 "Current error {}."
689692 )
690- print (msg .format (maxiter , relative_error ))
693+ print (msg .format (maxiter , error ))
691694
692695 return B , alpha , converged
693696
0 commit comments