@@ -417,10 +417,6 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
417417
418418 if (bool_checking) {
419419 ComputeAverageIterate (Ax_avg, ATy_avg);
420- // Compute matrix-vector products for current and average iterates
421- linalg::Ax (lp, x_current_, Ax_cache_);
422- linalg::ATy (lp, y_current_, ATy_cache_);
423-
424420 // Reset the average iterate accumulation
425421 int inner_iter = iter - restart_scheme_.GetLastRestartIter ();
426422
@@ -476,15 +472,17 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
476472 // Restart from the average iterate
477473 restart_x = x_avg_;
478474 restart_y = y_avg_;
475+
476+ Ax_cache_ = Ax_avg;
477+ ATy_cache_ = ATy_avg;
479478 } else {
480479 // Restart from the current iterate
481480 restart_x = x_current_;
482481 restart_y = y_current_;
483482 }
484483
485484 // Perform the primal weight update using z^{n,0} and z^{n-1,0}
486- PDHG_Compute_Step_Size_Ratio (working_params, restart_x, restart_y,
487- x_at_last_restart_, y_at_last_restart_);
485+ PDHG_Compute_Step_Size_Ratio (working_params);
488486 current_eta_ = working_params.eta ;
489487 restart_scheme_.passParams (&working_params);
490488 restart_scheme_.UpdateBeta (working_params.omega * working_params.omega );
@@ -506,13 +504,9 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
506504 linalg::ATy (lp, y_current_, ATy_cache_);
507505 debug_pdlp_data_.aty_norm = linalg::vector_norm (ATy_cache_);
508506 restart_scheme_.SetLastRestartIter (iter);
507+
508+ // continue; //same logic as cupdlp
509509 }
510- } else {
511- // If not checking, still need Ax and ATy for the update
512- linalg::Ax (lp, x_current_, Ax_cache_);
513- debug_pdlp_data_.ax_norm = linalg::vector_norm (Ax_cache_);
514- linalg::ATy (lp, y_current_, ATy_cache_);
515- debug_pdlp_data_.aty_norm = linalg::vector_norm (ATy_cache_);
516510 }
517511
518512 // --- 5. Core PDHG Update Step ---
@@ -523,6 +517,9 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
523517 x_next_ = x_current_;
524518 y_next_ = y_current_;
525519
520+ debug_pdlp_data_.ax_norm = linalg::vector_norm (Ax_cache_);
521+ debug_pdlp_data_.aty_norm = linalg::vector_norm (ATy_cache_);
522+
526523 switch (params_.step_size_strategy ) {
527524 case StepSizeStrategy::FIXED:
528525 UpdateIteratesFixed ();
@@ -547,6 +544,7 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
547544 }
548545
549546 // Compute ATy for the new iterate
547+ Ax_cache_ = Ax_next_;
550548 linalg::ATy (lp, y_next_, ATy_cache_);
551549
552550 // --- 6. Update Average Iterates ---
@@ -604,17 +602,10 @@ void PDLPSolver::Initialize() {
604602
605603// Update primal weight
606604void PDLPSolver::PDHG_Compute_Step_Size_Ratio (
607- PrimalDualParams& working_params, const std::vector<double >& x_n_0,
608- const std::vector<double >& y_n_0, const std::vector<double >& x_n_minus_1_0,
609- const std::vector<double >& y_n_minus_1_0) {
605+ PrimalDualParams& working_params) {
610606 // 1. Calculate the L2 norm of the difference between current and last-restart
611607 // iterates.
612- // double primal_diff_norm = linalg::diffTwoNorm(x_n_0, x_n_minus_1_0);
613608 double primal_diff_norm = linalg::diffTwoNorm (x_at_last_restart_, x_current_);
614- double norm_xLastRestart = linalg::vector_norm (x_at_last_restart_);
615- double norm_xCurrent = linalg::vector_norm (x_current_);
616-
617- // double dual_diff_norm = linalg::diffTwoNorm(y_n_0, y_n_minus_1_0);
618609 double dual_diff_norm = linalg::diffTwoNorm (y_at_last_restart_, y_current_);
619610
620611 double dMeanStepSize = std::sqrt (stepsize_.primal_step *
@@ -1190,7 +1181,6 @@ StepSizeConfig PDLPSolver::InitializeStepSizesPowerMethod(double op_norm_sq) {
11901181
11911182std::vector<double > PDLPSolver::UpdateX () {
11921183 std::vector<double > x_new (lp_.num_col_ );
1193- linalg::ATy (lp_, y_current_, ATy_cache_);
11941184 debug_pdlp_data_.aty_norm = linalg::vector_norm (ATy_cache_);
11951185 for (HighsInt i = 0 ; i < lp_.num_col_ ; i++) {
11961186 double gradient = lp_.col_cost_ [i] - ATy_cache_[i];
0 commit comments