Skip to content

Commit 5e74cbf

Browse files
committed
wip
1 parent 8aa3cd9 commit 5e74cbf

File tree

3 files changed

+17
-27
lines changed

3 files changed

+17
-27
lines changed

highs/pdlp/cupdlp/cupdlp_step.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -410,17 +410,20 @@ void PDHG_Compute_Average_Iterate(CUPDLPwork *work) {
410410
// ATyCPU(work, iterates->atyAverage, iterates->yAverage);
411411
Ax(work, iterates->axAverage, iterates->xAverage);
412412
ATy(work, iterates->atyAverage, iterates->yAverage);
413+
// print norm of x_average
414+
cupdlp_float debug_pdlp_data_x_average_norm = 0.0;
415+
cupdlp_twoNormSquared(work, lp->nCols, iterates->xAverage
416+
->data, &debug_pdlp_data_x_average_norm);
417+
work->debug_pdlp_data_.x_average_norm = debug_pdlp_data_x_average_norm;
413418

414419
cupdlp_float debug_pdlp_data_ax_average_norm = 0.0;
415420
cupdlp_twoNormSquared(work, lp->nCols, iterates->axAverage->data, &debug_pdlp_data_ax_average_norm);
416421
work->debug_pdlp_data_.ax_average_norm = debug_pdlp_data_ax_average_norm;
417422

418-
/*
419423
// Uncomment this once Yanyu is computing aty_average_norm
420424
cupdlp_float debug_pdlp_data_aty_average_norm = 0.0;
421425
cupdlp_twoNormSquared(work, lp->nCols, iterates->atyAverage->data, &debug_pdlp_data_aty_average_norm);
422426
work->debug_pdlp_data_.aty_average_norm = debug_pdlp_data_aty_average_norm;
423-
*/
424427
}
425428

426429
void PDHG_Update_Average(CUPDLPwork *work) {

highs/pdlp/hipdlp/pdhg.cc

Lines changed: 11 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -417,10 +417,6 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
417417

418418
if (bool_checking) {
419419
ComputeAverageIterate(Ax_avg, ATy_avg);
420-
// Compute matrix-vector products for current and average iterates
421-
linalg::Ax(lp, x_current_, Ax_cache_);
422-
linalg::ATy(lp, y_current_, ATy_cache_);
423-
424420
// Reset the average iterate accumulation
425421
int inner_iter = iter - restart_scheme_.GetLastRestartIter();
426422

@@ -476,15 +472,17 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
476472
// Restart from the average iterate
477473
restart_x = x_avg_;
478474
restart_y = y_avg_;
475+
476+
Ax_cache_ = Ax_avg;
477+
ATy_cache_ = ATy_avg;
479478
} else {
480479
// Restart from the current iterate
481480
restart_x = x_current_;
482481
restart_y = y_current_;
483482
}
484483

485484
// Perform the primal weight update using z^{n,0} and z^{n-1,0}
486-
PDHG_Compute_Step_Size_Ratio(working_params, restart_x, restart_y,
487-
x_at_last_restart_, y_at_last_restart_);
485+
PDHG_Compute_Step_Size_Ratio(working_params);
488486
current_eta_ = working_params.eta;
489487
restart_scheme_.passParams(&working_params);
490488
restart_scheme_.UpdateBeta(working_params.omega * working_params.omega);
@@ -506,13 +504,9 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
506504
linalg::ATy(lp, y_current_, ATy_cache_);
507505
debug_pdlp_data_.aty_norm = linalg::vector_norm(ATy_cache_);
508506
restart_scheme_.SetLastRestartIter(iter);
507+
508+
//continue; //same logic as cupdlp
509509
}
510-
} else {
511-
// If not checking, still need Ax and ATy for the update
512-
linalg::Ax(lp, x_current_, Ax_cache_);
513-
debug_pdlp_data_.ax_norm = linalg::vector_norm(Ax_cache_);
514-
linalg::ATy(lp, y_current_, ATy_cache_);
515-
debug_pdlp_data_.aty_norm = linalg::vector_norm(ATy_cache_);
516510
}
517511

518512
// --- 5. Core PDHG Update Step ---
@@ -523,6 +517,9 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
523517
x_next_ = x_current_;
524518
y_next_ = y_current_;
525519

520+
debug_pdlp_data_.ax_norm = linalg::vector_norm(Ax_cache_);
521+
debug_pdlp_data_.aty_norm = linalg::vector_norm(ATy_cache_);
522+
526523
switch (params_.step_size_strategy) {
527524
case StepSizeStrategy::FIXED:
528525
UpdateIteratesFixed();
@@ -547,6 +544,7 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
547544
}
548545

549546
// Compute ATy for the new iterate
547+
Ax_cache_ = Ax_next_;
550548
linalg::ATy(lp, y_next_, ATy_cache_);
551549

552550
// --- 6. Update Average Iterates ---
@@ -604,17 +602,10 @@ void PDLPSolver::Initialize() {
604602

605603
// Update primal weight
606604
void PDLPSolver::PDHG_Compute_Step_Size_Ratio(
607-
PrimalDualParams& working_params, const std::vector<double>& x_n_0,
608-
const std::vector<double>& y_n_0, const std::vector<double>& x_n_minus_1_0,
609-
const std::vector<double>& y_n_minus_1_0) {
605+
PrimalDualParams& working_params) {
610606
// 1. Calculate the L2 norm of the difference between current and last-restart
611607
// iterates.
612-
//double primal_diff_norm = linalg::diffTwoNorm(x_n_0, x_n_minus_1_0);
613608
double primal_diff_norm = linalg::diffTwoNorm(x_at_last_restart_, x_current_);
614-
double norm_xLastRestart = linalg::vector_norm(x_at_last_restart_);
615-
double norm_xCurrent = linalg::vector_norm(x_current_);
616-
617-
//double dual_diff_norm = linalg::diffTwoNorm(y_n_0, y_n_minus_1_0);
618609
double dual_diff_norm = linalg::diffTwoNorm(y_at_last_restart_, y_current_);
619610

620611
double dMeanStepSize = std::sqrt(stepsize_.primal_step *
@@ -1190,7 +1181,6 @@ StepSizeConfig PDLPSolver::InitializeStepSizesPowerMethod(double op_norm_sq) {
11901181

11911182
std::vector<double> PDLPSolver::UpdateX() {
11921183
std::vector<double> x_new(lp_.num_col_);
1193-
linalg::ATy(lp_, y_current_, ATy_cache_);
11941184
debug_pdlp_data_.aty_norm = linalg::vector_norm(ATy_cache_);
11951185
for (HighsInt i = 0; i < lp_.num_col_; i++) {
11961186
double gradient = lp_.col_cost_[i] - ATy_cache_[i];

highs/pdlp/hipdlp/pdhg.hpp

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,10 +95,7 @@ class PDLPSolver {
9595
std::tuple<double, double, double, double, double> ComputeDualityGap(
9696
const std::vector<double>& x, const std::vector<double>& y,
9797
const std::vector<double>& lambda);
98-
void PDHG_Compute_Step_Size_Ratio(
99-
PrimalDualParams& working_params, const std::vector<double>& x_n_0,
100-
const std::vector<double>& y_n_0, const std::vector<double>& x_n_minus_1_0,
101-
const std::vector<double>& y_n_minus_1_0);
98+
void PDHG_Compute_Step_Size_Ratio(PrimalDualParams& working_params);
10299

103100
// --- Problem Data and Parameters ---
104101
HighsLp lp_;

0 commit comments

Comments
 (0)