Skip to content

Commit c40f6c6

Browse files
authored
PR155818 (llvm#4006)
2 parents 4bb890f + ebde50c commit c40f6c6

File tree

8 files changed

+210
-38
lines changed

8 files changed

+210
-38
lines changed

clang/include/clang/Driver/Options.td

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3936,10 +3936,26 @@ let Visibility = [ClangOption, CC1Option, FC1Option, FlangOption] in {
39363936
let Group = f_Group in {
39373937

39383938
def fopenmp_target_debug_EQ : Joined<["-"], "fopenmp-target-debug=">;
3939-
def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">;
3940-
def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">;
3941-
def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">;
3942-
def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">;
3939+
def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">,
3940+
HelpText<"Allow the optimizer to discretely increase the number of "
3941+
"teams. May cause ignore environment variables that set "
3942+
"the number of teams to be ignored. The combination of "
3943+
"-fopenmp-assume-teams-oversubscription "
3944+
"and -fopenmp-assume-threads-oversubscription "
3945+
"may allow the conversion of loops into sequential code by "
3946+
"ensuring that each team/thread executes at most one iteration.">;
3947+
def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">,
3948+
HelpText<"Allow the optimizer to discretely increase the number of "
3949+
"threads. May cause ignore environment variables that set "
3950+
"the number of threads to be ignored. The combination of "
3951+
"-fopenmp-assume-teams-oversubscription "
3952+
"and -fopenmp-assume-threads-oversubscription "
3953+
"may allow the conversion of loops into sequential code by "
3954+
"ensuring that each team/thread executes at most one iteration.">;
3955+
def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">,
3956+
HelpText<"Do not assume teams oversubscription.">;
3957+
def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">,
3958+
HelpText<"Do not assume threads oversubscription.">;
39433959
def fno_openmp_assume_no_thread_state : Flag<["-"], "fno-openmp-assume-no-thread-state">, Group<f_Group>,
39443960
Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>,
39453961
HelpText<"Assert that a thread in a parallel region may modify an ICV">,

llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1080,11 +1080,13 @@ class OpenMPIRBuilder {
10801080
/// preheader of the loop.
10811081
/// \param LoopType Information about type of loop worksharing.
10821082
/// It corresponds to type of loop workshare OpenMP pragma.
1083+
/// \param NoLoop If true, no-loop code is generated.
10831084
///
10841085
/// \returns Point where to insert code after the workshare construct.
10851086
InsertPointTy applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
10861087
InsertPointTy AllocaIP,
1087-
omp::WorksharingLoopType LoopType);
1088+
omp::WorksharingLoopType LoopType,
1089+
bool NoLoop);
10881090

10891091
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
10901092
///
@@ -1204,6 +1206,7 @@ class OpenMPIRBuilder {
12041206
/// present.
12051207
/// \param LoopType Information about type of loop worksharing.
12061208
/// It corresponds to type of loop workshare OpenMP pragma.
1209+
/// \param NoLoop If true, no-loop code is generated.
12071210
///
12081211
/// \returns Point where to insert code after the workshare construct.
12091212
LLVM_ABI InsertPointOrErrorTy applyWorkshareLoop(
@@ -1214,7 +1217,8 @@ class OpenMPIRBuilder {
12141217
bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false,
12151218
bool HasOrderedClause = false,
12161219
omp::WorksharingLoopType LoopType =
1217-
omp::WorksharingLoopType::ForStaticLoop);
1220+
omp::WorksharingLoopType::ForStaticLoop,
1221+
bool NoLoop = false);
12181222

12191223
/// Tile a loop nest.
12201224
///

llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5010,7 +5010,7 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder,
50105010
WorksharingLoopType LoopType,
50115011
BasicBlock *InsertBlock, Value *Ident,
50125012
Value *LoopBodyArg, Value *TripCount,
5013-
Function &LoopBodyFn) {
5013+
Function &LoopBodyFn, bool NoLoop) {
50145014
Type *TripCountTy = TripCount->getType();
50155015
Module &M = OMPBuilder->M;
50165016
IRBuilder<> &Builder = OMPBuilder->Builder;
@@ -5038,16 +5038,18 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder,
50385038
RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
50395039
if (LoopType == WorksharingLoopType::DistributeForStaticLoop) {
50405040
RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
5041+
RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), NoLoop));
5042+
} else {
5043+
RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), 0));
50415044
}
5042-
RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), 0));
50435045

50445046
Builder.CreateCall(RTLFn, RealArgs);
50455047
}
50465048

50475049
static void workshareLoopTargetCallback(
50485050
OpenMPIRBuilder *OMPIRBuilder, CanonicalLoopInfo *CLI, Value *Ident,
50495051
Function &OutlinedFn, const SmallVector<Instruction *, 4> &ToBeDeleted,
5050-
WorksharingLoopType LoopType) {
5052+
WorksharingLoopType LoopType, bool NoLoop) {
50515053
IRBuilder<> &Builder = OMPIRBuilder->Builder;
50525054
BasicBlock *Preheader = CLI->getPreheader();
50535055
Value *TripCount = CLI->getTripCount();
@@ -5094,17 +5096,16 @@ static void workshareLoopTargetCallback(
50945096
OutlinedFnCallInstruction->eraseFromParent();
50955097

50965098
createTargetLoopWorkshareCall(OMPIRBuilder, LoopType, Preheader, Ident,
5097-
LoopBodyArg, TripCount, OutlinedFn);
5099+
LoopBodyArg, TripCount, OutlinedFn, NoLoop);
50985100

50995101
for (auto &ToBeDeletedItem : ToBeDeleted)
51005102
ToBeDeletedItem->eraseFromParent();
51015103
CLI->invalidate();
51025104
}
51035105

5104-
OpenMPIRBuilder::InsertPointTy
5105-
OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
5106-
InsertPointTy AllocaIP,
5107-
WorksharingLoopType LoopType) {
5106+
OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoopTarget(
5107+
DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
5108+
WorksharingLoopType LoopType, bool NoLoop) {
51085109
uint32_t SrcLocStrSize;
51095110
Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
51105111
Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
@@ -5187,7 +5188,7 @@ OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
51875188
OI.PostOutlineCB = [=, ToBeDeletedVec =
51885189
std::move(ToBeDeleted)](Function &OutlinedFn) {
51895190
workshareLoopTargetCallback(this, CLI, Ident, OutlinedFn, ToBeDeletedVec,
5190-
LoopType);
5191+
LoopType, NoLoop);
51915192
};
51925193
addOutlineInfo(std::move(OI));
51935194
return CLI->getAfterIP();
@@ -5198,9 +5199,9 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::applyWorkshareLoop(
51985199
bool NeedsBarrier, omp::ScheduleKind SchedKind, Value *ChunkSize,
51995200
bool HasSimdModifier, bool HasMonotonicModifier,
52005201
bool HasNonmonotonicModifier, bool HasOrderedClause,
5201-
WorksharingLoopType LoopType) {
5202+
WorksharingLoopType LoopType, bool NoLoop) {
52025203
if (Config.isTargetDevice())
5203-
return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType);
5204+
return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType, NoLoop);
52045205
OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType(
52055206
SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier,
52065207
HasNonmonotonicModifier, HasOrderedClause);

mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -232,14 +232,24 @@ def TargetRegionFlagsNone : I32BitEnumAttrCaseNone<"none">;
232232
def TargetRegionFlagsGeneric : I32BitEnumAttrCaseBit<"generic", 0>;
233233
def TargetRegionFlagsSpmd : I32BitEnumAttrCaseBit<"spmd", 1>;
234234
def TargetRegionFlagsTripCount : I32BitEnumAttrCaseBit<"trip_count", 2>;
235+
def TargetRegionFlagsNoLoop : I32BitEnumAttrCaseBit<"no_loop", 3>;
235236

236237
def TargetRegionFlags : OpenMP_BitEnumAttr<
237238
"TargetRegionFlags",
238-
"target region property flags", [
239+
"These flags describe properties of the target kernel. "
240+
"TargetRegionFlagsGeneric - denotes generic kernel. "
241+
"TargetRegionFlagsSpmd - denotes SPMD kernel. "
242+
"TargetRegionFlagsNoLoop - denotes kernel where "
243+
"num_teams * num_threads >= loop_trip_count. It allows the conversion "
244+
"of loops into sequential code by ensuring that each team/thread "
245+
"executes at most one iteration. "
246+
"TargetRegionFlagsTripCount - checks if the loop trip count should be "
247+
"calculated.", [
239248
TargetRegionFlagsNone,
240249
TargetRegionFlagsGeneric,
241250
TargetRegionFlagsSpmd,
242-
TargetRegionFlagsTripCount
251+
TargetRegionFlagsTripCount,
252+
TargetRegionFlagsNoLoop
243253
]>;
244254

245255
//===----------------------------------------------------------------------===//

mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp

Lines changed: 36 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2116,6 +2116,31 @@ Operation *TargetOp::getInnermostCapturedOmpOp() {
21162116
});
21172117
}
21182118

2119+
/// Check if we can promote SPMD kernel to No-Loop kernel.
2120+
static bool canPromoteToNoLoop(Operation *capturedOp, TeamsOp teamsOp,
2121+
WsloopOp *wsLoopOp) {
2122+
// num_teams clause can break no-loop teams/threads assumption.
2123+
if (teamsOp.getNumTeamsUpper())
2124+
return false;
2125+
2126+
// Reduction kernels are slower in no-loop mode.
2127+
if (teamsOp.getNumReductionVars())
2128+
return false;
2129+
if (wsLoopOp->getNumReductionVars())
2130+
return false;
2131+
2132+
// Check if the user allows the promotion of kernels to no-loop mode.
2133+
OffloadModuleInterface offloadMod =
2134+
capturedOp->getParentOfType<omp::OffloadModuleInterface>();
2135+
if (!offloadMod)
2136+
return false;
2137+
auto ompFlags = offloadMod.getFlags();
2138+
if (!ompFlags)
2139+
return false;
2140+
return ompFlags.getAssumeTeamsOversubscription() &&
2141+
ompFlags.getAssumeThreadsOversubscription();
2142+
}
2143+
21192144
TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) {
21202145
// A non-null captured op is only valid if it resides inside of a TargetOp
21212146
// and is the result of calling getInnermostCapturedOmpOp() on it.
@@ -2144,7 +2169,8 @@ TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) {
21442169

21452170
// Detect target-teams-distribute-parallel-wsloop[-simd].
21462171
if (numWrappers == 2) {
2147-
if (!isa<WsloopOp>(innermostWrapper))
2172+
WsloopOp *wsloopOp = dyn_cast<WsloopOp>(innermostWrapper);
2173+
if (!wsloopOp)
21482174
return TargetRegionFlags::generic;
21492175

21502176
innermostWrapper = std::next(innermostWrapper);
@@ -2155,12 +2181,17 @@ TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) {
21552181
if (!isa_and_present<ParallelOp>(parallelOp))
21562182
return TargetRegionFlags::generic;
21572183

2158-
Operation *teamsOp = parallelOp->getParentOp();
2159-
if (!isa_and_present<TeamsOp>(teamsOp))
2184+
TeamsOp teamsOp = dyn_cast<TeamsOp>(parallelOp->getParentOp());
2185+
if (!teamsOp)
21602186
return TargetRegionFlags::generic;
21612187

2162-
if (teamsOp->getParentOp() == targetOp.getOperation())
2163-
return TargetRegionFlags::spmd | TargetRegionFlags::trip_count;
2188+
if (teamsOp->getParentOp() == targetOp.getOperation()) {
2189+
TargetRegionFlags result =
2190+
TargetRegionFlags::spmd | TargetRegionFlags::trip_count;
2191+
if (canPromoteToNoLoop(capturedOp, teamsOp, wsloopOp))
2192+
result = result | TargetRegionFlags::no_loop;
2193+
return result;
2194+
}
21642195
}
21652196
// Detect target-teams-distribute[-simd] and target-teams-loop.
21662197
else if (isa<DistributeOp, LoopOp>(innermostWrapper)) {

mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2593,13 +2593,34 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder,
25932593
}
25942594

25952595
builder.SetInsertPoint(*regionBlock, (*regionBlock)->begin());
2596+
2597+
// Check if we can generate no-loop kernel
2598+
bool noLoopMode = false;
2599+
omp::TargetOp targetOp = wsloopOp->getParentOfType<mlir::omp::TargetOp>();
2600+
if (targetOp) {
2601+
Operation *targetCapturedOp = targetOp.getInnermostCapturedOmpOp();
2602+
// We need this check because, without it, noLoopMode would be set to true
2603+
// for every omp.wsloop nested inside a no-loop SPMD target region, even if
2604+
// that loop is not the top-level SPMD one.
2605+
if (loopOp == targetCapturedOp) {
2606+
omp::TargetRegionFlags kernelFlags =
2607+
targetOp.getKernelExecFlags(targetCapturedOp);
2608+
if (omp::bitEnumContainsAll(kernelFlags,
2609+
omp::TargetRegionFlags::spmd |
2610+
omp::TargetRegionFlags::no_loop) &&
2611+
!omp::bitEnumContainsAny(kernelFlags,
2612+
omp::TargetRegionFlags::generic))
2613+
noLoopMode = true;
2614+
}
2615+
}
2616+
25962617
llvm::OpenMPIRBuilder::InsertPointOrErrorTy wsloopIP =
25972618
ompBuilder->applyWorkshareLoop(
25982619
ompLoc.DL, loopInfo, allocaIP, loopNeedsBarrier,
25992620
convertToScheduleKind(schedule), chunk, isSimd,
26002621
scheduleMod == omp::ScheduleModifier::monotonic,
26012622
scheduleMod == omp::ScheduleModifier::nonmonotonic, isOrdered,
2602-
workshareLoopType);
2623+
workshareLoopType, noLoopMode);
26032624

26042625
if (failed(handleError(wsloopIP, opInst)))
26052626
return failure();
@@ -5683,6 +5704,12 @@ initTargetDefaultAttrs(omp::TargetOp targetOp, Operation *capturedOp,
56835704
? llvm::omp::OMP_TGT_EXEC_MODE_GENERIC_SPMD
56845705
: llvm::omp::OMP_TGT_EXEC_MODE_GENERIC
56855706
: llvm::omp::OMP_TGT_EXEC_MODE_SPMD;
5707+
if (omp::bitEnumContainsAll(kernelFlags,
5708+
omp::TargetRegionFlags::spmd |
5709+
omp::TargetRegionFlags::no_loop) &&
5710+
!omp::bitEnumContainsAny(kernelFlags, omp::TargetRegionFlags::generic))
5711+
attrs.ExecFlags = llvm::omp::OMP_TGT_EXEC_MODE_SPMD_NO_LOOP;
5712+
56865713
attrs.MinTeams = minTeamsVal;
56875714
attrs.MaxTeams.front() = maxTeamsVal;
56885715
attrs.MinThreads = 1;
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
! REQUIRES: flang
2+
3+
! RUN: %libomptarget-compile-fortran-generic -O3 -fopenmp-assume-threads-oversubscription -fopenmp-assume-teams-oversubscription
4+
! RUN: env LIBOMPTARGET_INFO=16 OMP_NUM_TEAMS=16 OMP_TEAMS_THREAD_LIMIT=16 %libomptarget-run-generic 2>&1 | %fcheck-generic
5+
function check_errors(array) result (errors)
6+
integer, intent(in) :: array(1024)
7+
integer :: errors
8+
integer :: i
9+
errors = 0
10+
do i = 1, 1024
11+
if ( array( i) .ne. (i) ) then
12+
errors = errors + 1
13+
end if
14+
end do
15+
end function
16+
17+
program main
18+
use omp_lib
19+
implicit none
20+
integer :: i,j,red
21+
integer :: array(1024), errors = 0
22+
array = 1
23+
24+
! No-loop kernel
25+
!$omp target teams distribute parallel do
26+
do i = 1, 1024
27+
array(i) = i
28+
end do
29+
errors = errors + check_errors(array)
30+
31+
! SPMD kernel (num_teams clause blocks promotion to no-loop)
32+
array = 1
33+
!$omp target teams distribute parallel do num_teams(3)
34+
do i = 1, 1024
35+
array(i) = i
36+
end do
37+
38+
errors = errors + check_errors(array)
39+
40+
! No-loop kernel
41+
array = 1
42+
!$omp target teams distribute parallel do num_threads(64)
43+
do i = 1, 1024
44+
array(i) = i
45+
end do
46+
47+
errors = errors + check_errors(array)
48+
49+
! SPMD kernel
50+
array = 1
51+
!$omp target parallel do
52+
do i = 1, 1024
53+
array(i) = i
54+
end do
55+
56+
errors = errors + check_errors(array)
57+
58+
! Generic kernel
59+
array = 1
60+
!$omp target teams distribute
61+
do i = 1, 1024
62+
array(i) = i
63+
end do
64+
65+
errors = errors + check_errors(array)
66+
67+
! SPMD kernel (reduction clause blocks promotion to no-loop)
68+
array = 1
69+
red =0
70+
!$omp target teams distribute parallel do reduction(+:red)
71+
do i = 1, 1024
72+
red = red + array(i)
73+
end do
74+
75+
if (red .ne. 1024) then
76+
errors = errors + 1
77+
end if
78+
79+
print *,"number of errors: ", errors
80+
81+
end program main
82+
83+
! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD-No-Loop mode
84+
! CHECK: info: #Args: 3 Teams x Thrds: 64x 16
85+
! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode
86+
! CHECK: info: #Args: 3 Teams x Thrds: 3x 16 {{.*}}
87+
! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD-No-Loop mode
88+
! CHECK: info: #Args: 3 Teams x Thrds: 64x 16 {{.*}}
89+
! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode
90+
! CHECK: info: #Args: 3 Teams x Thrds: 1x 16
91+
! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} Generic mode
92+
! CHECK: info: #Args: 3 Teams x Thrds: 16x 16 {{.*}}
93+
! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode
94+
! CHECK: info: #Args: 4 Teams x Thrds: 16x 16 {{.*}}
95+
! CHECK: number of errors: 0
96+

0 commit comments

Comments
 (0)