Skip to content

Commit c00fd82

Browse files
author
jajhall
committed
Now using \#if PDLP_DEBUG_LOG in C++
1 parent da2ae7e commit c00fd82

File tree

6 files changed

+63
-48
lines changed

6 files changed

+63
-48
lines changed

check/TestPdlp.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -342,10 +342,11 @@ TEST_CASE("pdlp-restart-add-row", "[pdlp]") {
342342
}
343343

344344
TEST_CASE("hi-pdlp", "[pdlp]") {
345-
std::string model = "afiro"; //"adlittle";//"afiro";// shell// stair
346-
////25fv47 //fit2p //avgas //neso-2245 //neso-2005
345+
std::string model =
346+
"afiro"; //"adlittle";//"afiro";// shell// stair
347+
////25fv47 //fit2p //avgas //neso-2245 //neso-2005
347348
std::string model_file =
348-
//std::string(HIGHS_DIR) + "/srv/" + model + ".mps.gz";
349+
// std::string(HIGHS_DIR) + "/srv/" + model + ".mps.gz";
349350
"/srv/mps_da/" + model + ".mps.gz";
350351
Highs h;
351352
// h.setOptionValue("output_flag", dev_run);
@@ -355,10 +356,9 @@ TEST_CASE("hi-pdlp", "[pdlp]") {
355356
h.setOptionValue("presolve", "off");
356357

357358
HighsInt pdlp_features_off = 0
358-
//+kPdlpScalingOff
359-
//+kPdlpRestartOff
360-
+kPdlpAdaptiveStepSizeOff
361-
;
359+
//+kPdlpScalingOff
360+
//+kPdlpRestartOff
361+
+ kPdlpAdaptiveStepSizeOff;
362362
h.setOptionValue("pdlp_features_off", pdlp_features_off);
363363

364364
HighsInt pdlp_scaling = // 0;
@@ -369,8 +369,8 @@ TEST_CASE("hi-pdlp", "[pdlp]") {
369369
h.setOptionValue("pdlp_step_size_strategy", 0);
370370
h.setOptionValue("pdlp_restart_strategy", 2);
371371
h.setOptionValue("pdlp_iteration_limit", 20);
372-
//h.setOptionValue("pdlp_time_limit", 60);
373-
// h.setOptionValue("log_dev_level", kHighsLogDevLevelVerbose);
372+
// h.setOptionValue("pdlp_time_limit", 60);
373+
// h.setOptionValue("log_dev_level", kHighsLogDevLevelVerbose);
374374
auto start_hipdlp = std::chrono::high_resolution_clock::now();
375375
HighsStatus run_status = h.run();
376376
auto end_hipdlp = std::chrono::high_resolution_clock::now();

highs/pdlp/HiPdlpWrapper.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,11 +70,13 @@ HighsStatus solveLpHiPdlp(const HighsOptions& options, HighsTimer& timer,
7070
return HighsStatus::kError;
7171
}
7272

73+
#if PDLP_DEBUG_LOG
7374
// print col_value and row_dual
7475
debugPdlpFinalSolutionLog(pdlp.debug_pdlp_log_file_,
7576
pdlp_solution.col_value.data(), lp.num_col_,
7677
pdlp_solution.row_dual.data(), lp.num_row_);
7778
pdlp.closeDebugLog();
79+
#endif
7880

7981
// Report profiling
8082
pdlp.reportHipdlpTimer();

highs/pdlp/cupdlp/cupdlp_defs.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ extern "C" {
4141
#define CUPDLP_DUMP_LINESEARCH_STATS (1)
4242
#define CUPDLP_INEXACT_EPS (1e-4)
4343

44-
#define PDLP_DEBUG_LOG (1)
44+
#define PDLP_DEBUG_LOG (0)
4545

4646
typedef struct CUPDLP_CUDA_DENSE_VEC CUPDLPvec;
4747
typedef struct CUPDLP_DENSE_MATRIX CUPDLPdense;

highs/pdlp/hipdlp/pdhg.cc

Lines changed: 44 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -492,10 +492,10 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
492492
Timer solver_timer;
493493
const HighsLp& lp = lp_;
494494

495-
if (PDLP_DEBUG_LOG) {
496-
debug_pdlp_log_file_ = fopen("HiPDLP.log", "w");
497-
assert(debug_pdlp_log_file_);
498-
}
495+
#if PDLP_DEBUG_LOG
496+
debug_pdlp_log_file_ = fopen("HiPDLP.log", "w");
497+
assert(debug_pdlp_log_file_);
498+
#endif
499499

500500
// --- 0. Using PowerMethod to estimate the largest eigenvalue ---
501501
#ifdef CUPDLP_GPU
@@ -514,7 +514,7 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
514514
// --- 1. Initialization ---
515515
restart_scheme_.passLogOptions(&working_params.log_options_);
516516

517-
#ifdef PDLP_DEBUG_LOG
517+
#if PDLP_DEBUG_LOG
518518
restart_scheme_.passDebugPdlpLogFile(debug_pdlp_log_file_);
519519
restart_scheme_.passDebugPdlpData(&debug_pdlp_data_);
520520
#endif
@@ -554,7 +554,7 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
554554
logger_.print_iteration_header();
555555

556556
// --- 2. Main PDHG Loop ---
557-
#ifdef PDLP_DEBUG_LOG
557+
#if PDLP_DEBUG_LOG
558558
debugPdlpIterHeaderLog(debug_pdlp_log_file_);
559559
debugPdlpDataInitialise(&debug_pdlp_data_);
560560
debug_pdlp_data_.ax_average_norm = 0.0;
@@ -565,7 +565,7 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
565565

566566
for (int iter = 0; iter < params_.max_iterations; ++iter) {
567567
std::cout << "PDHG Iteration " << iter << std::endl;
568-
#ifdef PDLP_DEBUG_LOG
568+
#if PDLP_DEBUG_LOG
569569
debugPdlpIterLog(debug_pdlp_log_file_, iter, &debug_pdlp_data_,
570570
restart_scheme_.getBeta(), stepsize_.primal_step,
571571
stepsize_.dual_step);
@@ -632,8 +632,9 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
632632
average_results, "[A]", dSlackPosAvg_, dSlackNegAvg_);
633633
hipdlpTimerStop(kHipdlpClockConvergenceCheck);
634634
#endif
635+
#if PDLP_DEBUG_LOG
635636
debugPdlpIterHeaderLog(debug_pdlp_log_file_);
636-
637+
#endif
637638
// Print iteration statistics
638639
logger_.print_iteration_stats(iter, average_results, current_eta_);
639640

@@ -818,7 +819,7 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
818819
cudaMemcpyDeviceToDevice));
819820
CUDA_CHECK(cudaMemcpy(d_y_next_, d_y_current_, a_num_rows_ * sizeof(double),
820821
cudaMemcpyDeviceToDevice));
821-
#ifdef PDLP_DEBUG_LOG
822+
#if PDLP_DEBUG_LOG
822823
// Copy Ax and ATy cache to host
823824
CUDA_CHECK(cudaMemcpy(Ax_cache_.data(), d_ax_current_,
824825
a_num_rows_ * sizeof(double),
@@ -828,15 +829,15 @@ void PDLPSolver::solve(std::vector<double>& x, std::vector<double>& y) {
828829
cudaMemcpyDeviceToHost));
829830
debug_pdlp_data_.ax_norm = linalg::vector_norm(Ax_cache_);
830831
debug_pdlp_data_.aty_norm = linalg::vector_norm(ATy_cache_);
831-
#endif
832+
#endif
832833
#else
833834
x_next_ = x_current_;
834835
y_next_ = y_current_;
835836

836-
#ifdef PDLP_DEBUG_LOG
837+
#if PDLP_DEBUG_LOG
837838
debug_pdlp_data_.ax_norm = linalg::vector_norm(Ax_cache_);
838839
debug_pdlp_data_.aty_norm = linalg::vector_norm(ATy_cache_);
839-
#endif
840+
#endif
840841
#endif
841842

842843
switch (params_.step_size_strategy) {
@@ -1006,7 +1007,7 @@ void PDLPSolver::computeAverageIterate(std::vector<double>& ax_avg,
10061007
for (size_t i = 0; i < y_avg_.size(); ++i) y_avg_[i] = y_sum_[i] * dDualScale;
10071008
hipdlpTimerStop(kHipdlpClockAverageIterateComputeY);
10081009

1009-
#ifdef PDLP_DEBUG_LOG
1010+
#if PDLP_DEBUG_LOG
10101011
debug_pdlp_data_.x_average_norm = linalg::vector_norm_squared(x_avg_);
10111012
#endif
10121013

@@ -1018,7 +1019,7 @@ void PDLPSolver::computeAverageIterate(std::vector<double>& ax_avg,
10181019
linalg::ATy(lp_, y_avg_, aty_avg);
10191020
hipdlpTimerStop(kHipdlpClockAverageIterateMatrixTransposeMultiply);
10201021

1021-
#ifdef PDLP_DEBUG_LOG
1022+
#if PDLP_DEBUG_LOG
10221023
debug_pdlp_data_.ax_average_norm = linalg::vector_norm_squared(ax_avg);
10231024
debug_pdlp_data_.aty_average_norm = linalg::vector_norm_squared(aty_avg);
10241025
#endif
@@ -1242,7 +1243,7 @@ bool PDLPSolver::checkConvergence(
12421243
std::abs(duality_gap) / (1.0 + std::abs(primal_obj) + std::abs(dual_obj));
12431244
results.relative_obj_gap = relative_obj_gap;
12441245

1245-
#ifdef PDLP_DEBUG_LOG
1246+
#if PDLP_DEBUG_LOG
12461247
debugPdlpFeasOptLog(debug_pdlp_log_file_, iter, primal_obj, dual_obj,
12471248
relative_obj_gap,
12481249
primal_feasibility / (1.0 + unscaled_rhs_norm_),
@@ -1687,14 +1688,16 @@ void PDLPSolver::updateIteratesFixed() {
16871688
CUDA_CHECK(cudaMemcpy(aty_next_gpu.data(), d_aty_next_,
16881689
a_num_cols_ * sizeof(double), cudaMemcpyDeviceToHost));
16891690

1691+
#if PDLP_DEBUG_LOG
16901692
bool x_match = vecDiff(x_next_gpu, x_next_, 1e-10, "UpdateIteratesFixed x");
16911693
bool y_match = vecDiff(y_next_gpu, y_next_, 1e-10, "UpdateIteratesFixed y");
16921694
bool ax_match =
16931695
vecDiff(ax_next_gpu, Ax_next_, 1e-10, "UpdateIteratesFixed Ax");
16941696
bool aty_match =
16951697
vecDiff(aty_next_gpu, ATy_next_, 1e-10, "UpdateIteratesFixed ATy");
1698+
#endif
16961699

1697-
#else
1700+
#else
16981701

16991702
#endif
17001703
}
@@ -1922,9 +1925,11 @@ void PDLPSolver::hipdlpTimerStop(const HighsInt hipdlp_clock) {
19221925
hipdlp_clocks_.timer_pointer_->stop(highs_timer_clock);
19231926
}
19241927

1928+
#if PDLP_DEBUG_LOG
19251929
void PDLPSolver::closeDebugLog() {
19261930
if (debug_pdlp_log_file_) fclose(debug_pdlp_log_file_);
19271931
}
1932+
#endif
19281933

19291934
// =============================================================================
19301935
// SECTION 5: GPU Part
@@ -2021,10 +2026,14 @@ void PDLPSolver::setupGpu() {
20212026
CUDA_CHECK(cudaMalloc(&d_col_scale_, a_num_cols_ * sizeof(double)));
20222027
CUDA_CHECK(cudaMalloc(&d_row_scale_, a_num_rows_ * sizeof(double)));
20232028

2024-
CUSPARSE_CHECK(cusparseCreateDnVec(&vec_x_desc_, a_num_cols_, d_x_current_, CUDA_R_64F));
2025-
CUSPARSE_CHECK(cusparseCreateDnVec(&vec_y_desc_, a_num_rows_, d_y_current_, CUDA_R_64F));
2026-
CUSPARSE_CHECK(cusparseCreateDnVec(&vec_ax_desc_, a_num_rows_, d_ax_current_, CUDA_R_64F));
2027-
CUSPARSE_CHECK(cusparseCreateDnVec(&vec_aty_desc_, a_num_cols_, d_aty_current_, CUDA_R_64F));
2029+
CUSPARSE_CHECK(
2030+
cusparseCreateDnVec(&vec_x_desc_, a_num_cols_, d_x_current_, CUDA_R_64F));
2031+
CUSPARSE_CHECK(
2032+
cusparseCreateDnVec(&vec_y_desc_, a_num_rows_, d_y_current_, CUDA_R_64F));
2033+
CUSPARSE_CHECK(cusparseCreateDnVec(&vec_ax_desc_, a_num_rows_, d_ax_current_,
2034+
CUDA_R_64F));
2035+
CUSPARSE_CHECK(cusparseCreateDnVec(&vec_aty_desc_, a_num_cols_,
2036+
d_aty_current_, CUDA_R_64F));
20282037

20292038
CUDA_CHECK(cudaMemcpy(d_col_cost_, lp_.col_cost_.data(),
20302039
a_num_cols_ * sizeof(double), cudaMemcpyHostToDevice));
@@ -2178,30 +2187,30 @@ void PDLPSolver::linalgGpuAx(const double* d_x_in, double* d_ax_out) {
21782187
CUDA_CHECK(cudaMalloc(&d_spmv_buffer_ax_, spmv_buffer_size_ax_));
21792188
}
21802189

2181-
CUSPARSE_CHECK(cusparseSpMV(cusparse_handle_,
2182-
CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha,
2183-
mat_a_csr_, vec_x_desc_, &beta, vec_ax_desc_, CUDA_R_64F,
2184-
CUSPARSE_SPMV_CSR_ALG2, d_spmv_buffer_ax_));
2190+
CUSPARSE_CHECK(
2191+
cusparseSpMV(cusparse_handle_, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha,
2192+
mat_a_csr_, vec_x_desc_, &beta, vec_ax_desc_, CUDA_R_64F,
2193+
CUSPARSE_SPMV_CSR_ALG2, d_spmv_buffer_ax_));
21852194
}
21862195

21872196
void PDLPSolver::linalgGpuATy(const double* d_y_in, double* d_aty_out) {
21882197
// ATy = 1.0 * A^T * y + 0.0 * aty
21892198
double alpha = 1.0;
21902199
double beta = 0.0;
2191-
2200+
21922201
CUSPARSE_CHECK(cusparseDnVecSetValues(vec_y_desc_, (void*)d_y_in));
2193-
CUSPARSE_CHECK(cusparseDnVecSetValues(vec_aty_desc_, (void*)d_aty_out));
2202+
CUSPARSE_CHECK(cusparseDnVecSetValues(vec_aty_desc_, (void*)d_aty_out));
21942203
if (spmv_buffer_size_aty_ == 0) {
21952204
CUSPARSE_CHECK(cusparseSpMV_bufferSize(
21962205
cusparse_handle_, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha,
2197-
mat_a_T_csr_, vec_y_desc_, &beta, vec_aty_desc_, CUDA_R_64F, CUSPARSE_SPMV_CSR_ALG2,
2198-
&spmv_buffer_size_aty_));
2206+
mat_a_T_csr_, vec_y_desc_, &beta, vec_aty_desc_, CUDA_R_64F,
2207+
CUSPARSE_SPMV_CSR_ALG2, &spmv_buffer_size_aty_));
21992208
CUDA_CHECK(cudaMalloc(&d_spmv_buffer_aty_, spmv_buffer_size_aty_));
22002209
}
2201-
CUSPARSE_CHECK(cusparseSpMV(cusparse_handle_,
2202-
CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha,
2203-
mat_a_T_csr_, vec_y_desc_, &beta, vec_aty_desc_, CUDA_R_64F,
2204-
CUSPARSE_SPMV_CSR_ALG2, d_spmv_buffer_aty_));
2210+
CUSPARSE_CHECK(
2211+
cusparseSpMV(cusparse_handle_, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha,
2212+
mat_a_T_csr_, vec_y_desc_, &beta, vec_aty_desc_, CUDA_R_64F,
2213+
CUSPARSE_SPMV_CSR_ALG2, d_spmv_buffer_aty_));
22052214
}
22062215

22072216
void PDLPSolver::launchKernelUpdateX(double primal_step) {
@@ -2260,7 +2269,7 @@ bool PDLPSolver::checkConvergenceGpu(const int iter, const double* d_x,
22602269
results.relative_obj_gap =
22612270
std::abs(duality_gap) / (1.0 + std::abs(primal_obj) + std::abs(dual_obj));
22622271

2263-
#ifdef PDLP_DEBUG_LOG
2272+
#if PDLP_DEBUG_LOG
22642273
debugPdlpFeasOptLog(debug_pdlp_log_file_, iter, primal_obj, dual_obj,
22652274
results.relative_obj_gap,
22662275
results.primal_feasibility / (1.0 + unscaled_rhs_norm_),
@@ -2326,7 +2335,7 @@ void PDLPSolver::computeAverageIterateGpu() {
23262335
linalgGpuAx(d_x_avg_, d_ax_avg_);
23272336
linalgGpuATy(d_y_avg_, d_aty_avg_);
23282337

2329-
#ifdef PDLP_DEBUG_LOG
2338+
#if PDLP_DEBUG_LOG
23302339
// copy x_avg to host
23312340
CUDA_CHECK(cudaMemcpy(x_avg_.data(), d_x_avg_, a_num_cols_ * sizeof(double),
23322341
cudaMemcpyDeviceToHost));
@@ -2395,4 +2404,4 @@ double PDLPSolver::computeDiffNormCuBLAS(const double* d_a, const double* d_b,
23952404

23962405
return norm;
23972406
}
2398-
#endif
2407+
#endif

highs/pdlp/hipdlp/pdhg.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,13 +55,13 @@ class PDLPSolver {
5555
int getnCol() const { return lp_.num_col_; }
5656
int getnRow() const { return lp_.num_row_; }
5757

58-
#ifdef PDLP_DEBUG_LOG
58+
#if PDLP_DEBUG_LOG
5959
// --- Debugging ---
6060
FILE* debug_pdlp_log_file_ = nullptr;
6161
DebugPdlpData debug_pdlp_data_;
62+
void closeDebugLog();
6263
#endif
6364
void reportHipdlpTimer();
64-
void closeDebugLog();
6565

6666
private:
6767
// --- Core Algorithm Logic ---

highs/pdlp/hipdlp/pdlp_gpu_debug.hpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
#include <string>
77
#include <vector>
88

9+
#if PDLP_DEBUG_LOG
10+
911
bool vecDiff(const std::vector<double>& x1, const std::vector<double>& x2,
1012
double tol, const std::string& func_name = "vecDiff") {
1113
if (x1.size() != x2.size()) {
@@ -55,4 +57,6 @@ bool vecDiff(const std::vector<double>& x1, const std::vector<double>& x2,
5557
return true;
5658
}
5759

58-
#endif // PDLP_GPU_DEBUG_HPP__
60+
#endif
61+
62+
#endif // PDLP_GPU_DEBUG_HPP__

0 commit comments

Comments
 (0)