Skip to content

Commit ccad1a3

Browse files
committed
review comments
1 parent c87e60e commit ccad1a3

File tree

2 files changed

+87
-48
lines changed

2 files changed

+87
-48
lines changed

flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp

Lines changed: 52 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,52 @@ static void localizeLoopLocalValue(mlir::Value local, mlir::Region &allocRegion,
201201

202202
class DoConcurrentConversion
203203
: public mlir::OpConversionPattern<fir::DoConcurrentOp> {
204+
private:
205+
struct TargetDeclareShapeCreationInfo {
206+
// Note: We use `std::vector` (rather than `llvm::SmallVector` as usual) to
207+
// interface more easily `ShapeShiftOp::getOrigins()` which returns
208+
// `std::vector`.
209+
std::vector<mlir::Value> startIndices;
210+
std::vector<mlir::Value> extents;
211+
212+
TargetDeclareShapeCreationInfo(mlir::Value liveIn) {
213+
mlir::Value shape = nullptr;
214+
mlir::Operation *liveInDefiningOp = liveIn.getDefiningOp();
215+
auto declareOp =
216+
mlir::dyn_cast_if_present<hlfir::DeclareOp>(liveInDefiningOp);
217+
218+
if (declareOp != nullptr)
219+
shape = declareOp.getShape();
220+
221+
if (!shape)
222+
return;
223+
224+
auto shapeOp =
225+
mlir::dyn_cast_if_present<fir::ShapeOp>(shape.getDefiningOp());
226+
auto shapeShiftOp =
227+
mlir::dyn_cast_if_present<fir::ShapeShiftOp>(shape.getDefiningOp());
228+
229+
if (!shapeOp && !shapeShiftOp)
230+
TODO(liveIn.getLoc(),
231+
"Shapes not defined by `fir.shape` or `fir.shape_shift` op's are"
232+
"not supported yet.");
233+
234+
if (shapeShiftOp != nullptr)
235+
startIndices = shapeShiftOp.getOrigins();
236+
237+
extents = shapeOp != nullptr
238+
? std::vector<mlir::Value>(shapeOp.getExtents().begin(),
239+
shapeOp.getExtents().end())
240+
: shapeShiftOp.getExtents();
241+
}
242+
243+
bool isShapedValue() const { return !extents.empty(); }
244+
bool isShapeShiftedValue() const { return !startIndices.empty(); }
245+
};
246+
247+
using LiveInShapeInfoMap =
248+
llvm::DenseMap<mlir::Value, TargetDeclareShapeCreationInfo>;
249+
204250
public:
205251
using mlir::OpConversionPattern<fir::DoConcurrentOp>::OpConversionPattern;
206252

@@ -325,51 +371,6 @@ class DoConcurrentConversion
325371
}
326372

327373
private:
328-
struct TargetDeclareShapeCreationInfo {
329-
// Note: We use `std::vector` (rather than `llvm::SmallVector` as usual) to
330-
// interface more easily `ShapeShiftOp::getOrigins()` which returns
331-
// `std::vector`.
332-
std::vector<mlir::Value> startIndices{};
333-
std::vector<mlir::Value> extents{};
334-
335-
TargetDeclareShapeCreationInfo(mlir::Value liveIn) {
336-
mlir::Value shape = nullptr;
337-
mlir::Operation *liveInDefiningOp = liveIn.getDefiningOp();
338-
auto declareOp =
339-
mlir::dyn_cast_if_present<hlfir::DeclareOp>(liveInDefiningOp);
340-
341-
if (declareOp != nullptr)
342-
shape = declareOp.getShape();
343-
344-
if (shape == nullptr)
345-
return;
346-
347-
auto shapeOp =
348-
mlir::dyn_cast_if_present<fir::ShapeOp>(shape.getDefiningOp());
349-
auto shapeShiftOp =
350-
mlir::dyn_cast_if_present<fir::ShapeShiftOp>(shape.getDefiningOp());
351-
352-
if (shapeOp == nullptr && shapeShiftOp == nullptr)
353-
TODO(liveIn.getLoc(),
354-
"Shapes not defined by `fir.shape` or `fir.shape_shift` op's are"
355-
"not supported yet.");
356-
357-
if (shapeShiftOp != nullptr)
358-
startIndices = shapeShiftOp.getOrigins();
359-
360-
extents = shapeOp != nullptr
361-
? std::vector<mlir::Value>(shapeOp.getExtents().begin(),
362-
shapeOp.getExtents().end())
363-
: shapeShiftOp.getExtents();
364-
}
365-
366-
bool isShapedValue() const { return !extents.empty(); }
367-
bool isShapeShiftedValue() const { return !startIndices.empty(); }
368-
};
369-
370-
using LiveInShapeInfoMap =
371-
llvm::DenseMap<mlir::Value, TargetDeclareShapeCreationInfo>;
372-
373374
mlir::omp::ParallelOp
374375
genParallelOp(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
375376
looputils::InductionVariableInfos &ivInfos,
@@ -673,8 +674,8 @@ class DoConcurrentConversion
673674
rewriter,
674675
fir::getKindMapping(targetOp->getParentOfType<mlir::ModuleOp>()));
675676

676-
// Within the loop, it possible that we discover other values that need to
677-
// mapped to the target region (the shape info values for arrays, for
677+
// Within the loop, it is possible that we discover other values that need
678+
// to be mapped to the target region (the shape info values for arrays, for
678679
// example). Therefore, the map block args might be extended and resized.
679680
// Hence, we invoke `argIface.getMapBlockArgs()` every iteration to make
680681
// sure we access the proper vector of data.
@@ -687,10 +688,13 @@ class DoConcurrentConversion
687688
miOp, liveInShapeInfoMap.at(mappedVar));
688689
++idx;
689690

690-
// TODO If `mappedVar.getDefiningOp()` is a `fir::BoxAddrOp`, we probably
691+
// If `mappedVar.getDefiningOp()` is a `fir::BoxAddrOp`, we probably
691692
// need to "unpack" the box by getting the defining op of it's value.
692693
// However, we did not hit this case in reality yet so leaving it as a
693694
// todo for now.
695+
if (mlir::isa<fir::BoxAddrOp>(mappedVar.getDefiningOp()))
696+
TODO(mappedVar.getLoc(),
697+
"Mapped variabled defined by `BoxAddrOp` are not supported yet");
694698

695699
auto mapHostValueToDevice = [&](mlir::Value hostValue,
696700
mlir::Value deviceValue) {
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
! Tests that when a loop bound is used in the body, that the mapped version of
2+
! the loop bound (rather than the host-eval one) is the one used inside the loop.
3+
4+
! RUN: %flang_fc1 -emit-hlfir -fopenmp -fdo-concurrent-to-openmp=device %s -o - \
5+
! RUN: | FileCheck %s
6+
! RUN: bbc -emit-hlfir -fopenmp -fdo-concurrent-to-openmp=device %s -o - \
7+
! RUN: | FileCheck %s
8+
9+
subroutine foo(a, n)
10+
implicit none
11+
integer :: i, n
12+
real, dimension(n) :: a
13+
14+
do concurrent (i=1:n)
15+
a(i) = n
16+
end do
17+
end subroutine
18+
19+
! CHECK: omp.target host_eval(%{{.*}} -> %{{.*}}, %{{.*}} -> %[[N_HOST_EVAL:.*]], %{{.*}} -> %{{.*}} : {{.*}}) map_entries({{.*}}) {
20+
! CHECK: %[[N_MAPPED:.*]]:2 = hlfir.declare %arg{{.*}} {uniq_name = "_QFfooEn"}
21+
! CHECK: omp.teams {
22+
! CHECK: omp.parallel {
23+
! CHECK: omp.distribute {
24+
! CHECK: omp.wsloop {
25+
! CHECK: omp.loop_nest (%{{.*}}) : index = (%{{.*}}) to (%[[N_HOST_EVAL]]) inclusive step (%{{.*}}) {
26+
! CHECK: %[[N_VAL:.*]] = fir.load %[[N_MAPPED]]#0 : !fir.ref<i32>
27+
! CHECK: %[[N_VAL_CVT:.*]] = fir.convert %[[N_VAL]] : (i32) -> f32
28+
! CHECK: hlfir.assign %[[N_VAL_CVT]] to {{.*}}
29+
! CHECK-NEXT: omp.yield
30+
! CHECK: }
31+
! CHECK: }
32+
! CHECK: }
33+
! CHECK: }
34+
! CHECK: }
35+
! CHECK: }

0 commit comments

Comments
 (0)