@@ -612,7 +612,6 @@ bool CodeGenPrepare::_run(Function &F) {
612612 // bypassSlowDivision may create new BBs, but we don't want to reapply the
613613 // optimization to those blocks.
614614 BasicBlock *Next = BB->getNextNode ();
615- // F.hasOptSize is already checked in the outer if statement.
616615 if (!llvm::shouldOptimizeForSize (BB, PSI, BFI.get ()))
617616 EverMadeChange |= bypassSlowDivision (BB, BypassWidths);
618617 BB = Next;
@@ -2608,7 +2607,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
26082607 // cold block. This interacts with our handling for loads and stores to
26092608 // ensure that we can fold all uses of a potential addressing computation
26102609 // into their uses. TODO: generalize this to work over profiling data
2611- if (CI->hasFnAttr (Attribute::Cold) && !OptSize &&
2610+ if (CI->hasFnAttr (Attribute::Cold) &&
26122611 !llvm::shouldOptimizeForSize (BB, PSI, BFI.get ()))
26132612 for (auto &Arg : CI->args ()) {
26142613 if (!Arg->getType ()->isPointerTy ())
@@ -5505,9 +5504,7 @@ static bool FindAllMemoryUses(
55055504 if (CI->hasFnAttr (Attribute::Cold)) {
55065505 // If this is a cold call, we can sink the addressing calculation into
55075506 // the cold path. See optimizeCallInst
5508- bool OptForSize =
5509- OptSize || llvm::shouldOptimizeForSize (CI->getParent (), PSI, BFI);
5510- if (!OptForSize)
5507+ if (!llvm::shouldOptimizeForSize (CI->getParent (), PSI, BFI))
55115508 continue ;
55125509 }
55135510
@@ -7402,7 +7399,7 @@ bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
74027399 SelectKind = TargetLowering::ScalarValSelect;
74037400
74047401 if (TLI->isSelectSupported (SelectKind) &&
7405- (!isFormingBranchFromSelectProfitable (TTI, TLI, SI) || OptSize ||
7402+ (!isFormingBranchFromSelectProfitable (TTI, TLI, SI) ||
74067403 llvm::shouldOptimizeForSize (SI->getParent (), PSI, BFI.get ())))
74077404 return false ;
74087405
0 commit comments