@@ -505,6 +505,7 @@ bool HighsCutGeneration::separateLiftedMixedIntegerCover() {
505505// Lifted flow cover inequalities for mixed 0-1 integer programs.
506506// Mathematical Programming, 85(3), 439-467.
507507bool HighsCutGeneration::separateLiftedFlowCover () {
508+ const double vubEpsilon = 1e-8 ;
508509 // Compute the lifting data (ld) first
509510 struct LiftingData {
510511 std::vector<double > m;
@@ -525,7 +526,7 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
525526 // col is in N- \ C-
526527 if (snfr.flowCoverStatus [i] == -1 && snfr.coef [i] == -1 ) {
527528 assert (snfr.vubCoef [i] >= 0 );
528- if (snfr.vubCoef [i] > snfr.lambda + 1e-8 ) {
529+ if (snfr.vubCoef [i] > snfr.lambda + vubEpsilon ) {
529530 ld.m [ld.r ] = snfr.vubCoef [i];
530531 ++ld.r ;
531532 } else {
@@ -538,7 +539,7 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
538539 } else if (snfr.flowCoverStatus [i] == 1 && snfr.coef [i] == 1 ) {
539540 // col is in C+
540541 assert (snfr.vubCoef [i] > 0 );
541- if (snfr.vubCoef [i] > snfr.lambda + 1e-8 ) {
542+ if (snfr.vubCoef [i] > snfr.lambda + vubEpsilon ) {
542543 ld.m [ld.r ] = snfr.vubCoef [i];
543544 ++ld.r ;
544545 ld.mp = std::min (ld.mp , snfr.vubCoef [i]);
@@ -582,11 +583,11 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
582583 double vubcoefpluslambda = vubcoef + snfr.lambda ;
583584
584585 HighsInt i = 0 ;
585- while (i < ld.r && vubcoefpluslambda >= ld.M [i + 1 ] + 1e-8 ) {
586+ while (i < ld.r && vubcoefpluslambda >= ld.M [i + 1 ] + vubEpsilon ) {
586587 ++i;
587588 }
588589
589- if (vubcoef <= ld.M [i] - 1e-8 ) {
590+ if (vubcoef <= ld.M [i] - vubEpsilon ) {
590591 assert (ld.M [i] < vubcoefpluslambda);
591592 alpha = 1 ;
592593 beta = -i * HighsCDouble (snfr.lambda ) + ld.M [i];
@@ -599,25 +600,24 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
599600
600601 auto evaluateLiftingFunction = [&](double vubcoef) {
601602 HighsInt i = 0 ;
602- while (i < ld.r && vubcoef + snfr.lambda >= ld.M [i + 1 ] + 1e-8 ) {
603+ while (i < ld.r && vubcoef + snfr.lambda >= ld.M [i + 1 ] + vubEpsilon ) {
603604 ++i;
604605 }
605606 if (i < ld.t ) {
606607 HighsCDouble liftedcoef = i * HighsCDouble (snfr.lambda );
607- if (ld.M [i] < vubcoef + 1e-8 ) {
608+ if (ld.M [i] < vubcoef + vubEpsilon ) {
608609 return static_cast <double >(liftedcoef);
609610 }
610- assert (i > 0 && ld.M [i] < vubcoef + snfr.lambda - 1e-8 &&
611+ assert (i > 0 && ld.M [i] < vubcoef + snfr.lambda - vubEpsilon &&
611612 vubcoef <= ld.M [i]);
612613 liftedcoef += vubcoef;
613614 liftedcoef -= ld.M [i];
614615 return static_cast <double >(liftedcoef);
615- }
616- if (i < ld.r ) {
616+ } else if (i < ld.r ) {
617617 HighsCDouble tmp = HighsCDouble (ld.m [i]) - ld.mp - ld.ml + snfr.lambda ;
618618 if (tmp < 0 ) tmp = 0 ;
619619 tmp += ld.M [i] + ld.ml ;
620- if (tmp < vubcoef + snfr.lambda - 1e-8 ) {
620+ if (tmp < vubcoef + snfr.lambda - vubEpsilon ) {
621621 return static_cast <double >(i * HighsCDouble (snfr.lambda ));
622622 }
623623 assert (ld.M [i] <= vubcoef + snfr.lambda + feastol &&
@@ -630,7 +630,7 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
630630 return static_cast <double >(i * HighsCDouble (snfr.lambda ) + vubcoef -
631631 ld.M [i]);
632632 }
633- assert (i == ld.r && ld.M [i] <= vubcoef + snfr.lambda + 1e-8 );
633+ assert (i == ld.r && ld.M [i] <= vubcoef + snfr.lambda + vubEpsilon );
634634 return static_cast <double >(ld.r * HighsCDouble (snfr.lambda ) + vubcoef -
635635 ld.M [ld.r ]);
636636 };
@@ -640,37 +640,37 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
640640 // L-- = N- \ (L- union C-)
641641 HighsCDouble tmpRhs = ld.d1 ;
642642 rowlen = 0 ;
643+
644+ auto addCutNonZero = [&](const double & coef, const HighsInt& index,
645+ const bool complement) -> void {
646+ vals[rowlen] = coef;
647+ inds[rowlen] = index;
648+ if (complement) {
649+ tmpRhs -= vals[rowlen];
650+ vals[rowlen] = -vals[rowlen];
651+ }
652+ rowlen++;
653+ };
654+
643655 for (HighsInt i = 0 ; i != snfr.numNnzs ; ++i) {
644656 // col is in N- \ C-
645657 if (snfr.flowCoverStatus [i] == -1 && snfr.coef [i] == -1 ) {
646- if (snfr.vubCoef [i] > snfr.lambda + 1e-8 ) {
658+ if (snfr.vubCoef [i] > snfr.lambda + vubEpsilon ) {
647659 if (snfr.origBinCols [i] != -1 ) {
648660 // col is in L-
649- vals[rowlen] = -snfr.lambda ;
650- inds[rowlen] = snfr.origBinCols [i];
651- if (snfr.complementation [i]) {
652- tmpRhs -= vals[rowlen];
653- vals[rowlen] = -vals[rowlen];
654- }
655- rowlen++;
661+ addCutNonZero (-snfr.lambda , snfr.origBinCols [i],
662+ snfr.complementation [i]);
656663 } else {
657664 tmpRhs += snfr.lambda ;
658665 }
659666 } else {
660667 // col is in L--
661668 if (snfr.origContCols [i] != -1 && snfr.aggrContCoef [i] != 0 ) {
662- vals[rowlen] = -snfr.aggrContCoef [i];
663- inds[rowlen] = snfr.origContCols [i];
664- rowlen++;
669+ addCutNonZero (-snfr.aggrContCoef [i], snfr.origContCols [i], false );
665670 }
666671 if (snfr.origBinCols [i] != -1 && snfr.aggrBinCoef [i] != 0 ) {
667- vals[rowlen] = -snfr.aggrBinCoef [i];
668- inds[rowlen] = snfr.origBinCols [i];
669- if (snfr.complementation [i]) {
670- tmpRhs -= vals[rowlen];
671- vals[rowlen] = -vals[rowlen];
672- }
673- rowlen++;
672+ addCutNonZero (-snfr.aggrBinCoef [i], snfr.origBinCols [i],
673+ snfr.complementation [i]);
674674 }
675675 tmpRhs += snfr.aggrConstant [i];
676676 }
@@ -679,13 +679,8 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
679679 if (snfr.origBinCols [i] != -1 ) {
680680 double liftedbincoef = evaluateLiftingFunction (snfr.vubCoef [i]);
681681 if (liftedbincoef != 0 ) {
682- vals[rowlen] = -liftedbincoef;
683- inds[rowlen] = snfr.origBinCols [i];
684- if (snfr.complementation [i]) {
685- tmpRhs -= vals[rowlen];
686- vals[rowlen] = -vals[rowlen];
687- }
688- rowlen++;
682+ addCutNonZero (-liftedbincoef, snfr.origBinCols [i],
683+ snfr.complementation [i]);
689684 tmpRhs -= liftedbincoef;
690685 }
691686 }
@@ -697,20 +692,13 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
697692 if (alphabeta.first == 1 ) {
698693 assert (alphabeta.second > 0 );
699694 if (snfr.origContCols [i] != -1 && snfr.aggrContCoef [i] != 0 ) {
700- vals[rowlen] = snfr.aggrContCoef [i];
701- inds[rowlen] = snfr.origContCols [i];
702- rowlen++;
695+ addCutNonZero (snfr.aggrContCoef [i], snfr.origContCols [i], false );
703696 }
704697 HighsCDouble binvarcoef = snfr.aggrBinCoef [i] - alphabeta.second ;
705698 if (snfr.origBinCols [i] != -1 ) {
706699 if (binvarcoef != 0 ) {
707- vals[rowlen] = static_cast <double >(binvarcoef);
708- inds[rowlen] = snfr.origBinCols [i];
709- if (snfr.complementation [i]) {
710- tmpRhs -= binvarcoef;
711- vals[rowlen] = -vals[rowlen];
712- }
713- rowlen++;
700+ addCutNonZero (static_cast <double >(binvarcoef), snfr.origBinCols [i],
701+ snfr.complementation [i]);
714702 }
715703 } else {
716704 tmpRhs -= binvarcoef;
@@ -722,24 +710,18 @@ bool HighsCutGeneration::separateLiftedFlowCover() {
722710 assert (snfr.flowCoverStatus [i] == 1 && snfr.coef [i] == 1 );
723711 HighsCDouble bincoef = snfr.aggrBinCoef [i];
724712 HighsCDouble constant = snfr.aggrConstant [i];
725- if (snfr.origBinCols [i] != -1 && snfr.vubCoef [i] >= snfr.lambda + 1e-8 ) {
713+ if (snfr.origBinCols [i] != -1 &&
714+ snfr.vubCoef [i] >= snfr.lambda + vubEpsilon) {
726715 // col is in C++
727716 constant += HighsCDouble (snfr.vubCoef [i]) - snfr.lambda ;
728717 bincoef -= HighsCDouble (snfr.vubCoef [i]) - snfr.lambda ;
729718 }
730719 if (snfr.origBinCols [i] != -1 && bincoef != 0 ) {
731- vals[rowlen] = static_cast <double >(bincoef);
732- inds[rowlen] = snfr.origBinCols [i];
733- if (snfr.complementation [i]) {
734- tmpRhs -= bincoef;
735- vals[rowlen] = -vals[rowlen];
736- }
737- rowlen++;
720+ addCutNonZero (static_cast <double >(bincoef), snfr.origBinCols [i],
721+ snfr.complementation [i]);
738722 }
739723 if (snfr.origContCols [i] != -1 && snfr.aggrContCoef [i] != 0 ) {
740- vals[rowlen] = snfr.aggrContCoef [i];
741- inds[rowlen] = snfr.origContCols [i];
742- rowlen++;
724+ addCutNonZero (snfr.aggrContCoef [i], snfr.origContCols [i], false );
743725 }
744726 tmpRhs -= constant;
745727 }
0 commit comments