Skip to content

Commit 2ed8c5d

Browse files
authored
[flang][OpenMP] Fix handling of nested loop wrappers in LowerWorkshare (#117275)
1 parent c1a3960 commit 2ed8c5d

File tree

2 files changed

+26
-5
lines changed

2 files changed

+26
-5
lines changed

flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -126,9 +126,9 @@ static bool mustParallelizeOp(Operation *op) {
126126
// omp.workshare.loop_wrapper {}
127127
//
128128
// Therefore, we skip if we encounter a nested omp.workshare.
129-
if (isa<omp::WorkshareOp>(op))
129+
if (isa<omp::WorkshareOp>(nested))
130130
return WalkResult::skip();
131-
if (isa<omp::WorkshareLoopWrapperOp>(op))
131+
if (isa<omp::WorkshareLoopWrapperOp>(nested))
132132
return WalkResult::interrupt();
133133
return WalkResult::advance();
134134
})
@@ -253,8 +253,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
253253
// Either we have already remapped it
254254
bool remapped = rootMapping.contains(opr);
255255
// Or it is available because it dominates `sr`
256-
bool dominates =
257-
di.properlyDominates(opr.getDefiningOp(), &*sr.begin);
256+
bool dominates = di.properlyDominates(opr, &*sr.begin);
258257
return remapped || dominates;
259258
})) {
260259
// Safe to parallelize operations which have all operands available in
@@ -405,7 +404,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion,
405404

406405
if (sourceRegion.hasOneBlock()) {
407406
handleOneBlock(sourceRegion.front());
408-
} else {
407+
} else if (!sourceRegion.empty()) {
409408
auto &domTree = di.getDomTree(&sourceRegion);
410409
for (auto node : llvm::breadth_first(domTree.getRootNode())) {
411410
handleOneBlock(*node->getBlock());
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s
2+
3+
// Checks that the nested loop_wrapper gets parallelized
4+
func.func @wsfunc(%cond : i1) {
5+
omp.workshare {
6+
%c1 = arith.constant 1 : index
7+
%c42 = arith.constant 42 : index
8+
fir.if %cond {
9+
omp.workshare.loop_wrapper {
10+
omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) {
11+
"test.test1"() : () -> ()
12+
omp.yield
13+
}
14+
}
15+
}
16+
omp.terminator
17+
}
18+
return
19+
}
20+
21+
// CHECK: fir.if
22+
// CHECK: omp.wsloop nowait

0 commit comments

Comments
 (0)