Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions clang/lib/CodeGen/CGStmt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -331,29 +331,31 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
break;
case Stmt::OMPMaskedTaskLoopDirectiveClass:
llvm_unreachable("masked taskloop directive not supported yet.");
EmitOMPMaskedTaskLoopDirective(cast<OMPMaskedTaskLoopDirective>(*S));
break;
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
EmitOMPMasterTaskLoopSimdDirective(
cast<OMPMasterTaskLoopSimdDirective>(*S));
break;
case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
llvm_unreachable("masked taskloop simd directive not supported yet.");
EmitOMPMaskedTaskLoopSimdDirective(
cast<OMPMaskedTaskLoopSimdDirective>(*S));
break;
case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
EmitOMPParallelMasterTaskLoopDirective(
cast<OMPParallelMasterTaskLoopDirective>(*S));
break;
case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
llvm_unreachable("parallel masked taskloop directive not supported yet.");
EmitOMPParallelMaskedTaskLoopDirective(
cast<OMPParallelMaskedTaskLoopDirective>(*S));
break;
case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
EmitOMPParallelMasterTaskLoopSimdDirective(
cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
break;
case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
llvm_unreachable(
"parallel masked taskloop simd directive not supported yet.");
EmitOMPParallelMaskedTaskLoopSimdDirective(
cast<OMPParallelMaskedTaskLoopSimdDirective>(*S));
break;
case Stmt::OMPDistributeDirectiveClass:
EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
Expand Down
60 changes: 60 additions & 0 deletions clang/lib/CodeGen/CGStmtOpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7994,6 +7994,18 @@ void CodeGenFunction::EmitOMPMasterTaskLoopDirective(
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}

void CodeGenFunction::EmitOMPMaskedTaskLoopDirective(
const OMPMaskedTaskLoopDirective &S) {
auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
EmitOMPTaskLoopBasedDirective(S);
};
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, std::nullopt, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMaskedRegion(*this, CodeGen, S.getBeginLoc());
}

void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective(
const OMPMasterTaskLoopSimdDirective &S) {
auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Expand All @@ -8006,6 +8018,18 @@ void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective(
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}

void CodeGenFunction::EmitOMPMaskedTaskLoopSimdDirective(
const OMPMaskedTaskLoopSimdDirective &S) {
auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
EmitOMPTaskLoopBasedDirective(S);
};
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S);
CGM.getOpenMPRuntime().emitMaskedRegion(*this, CodeGen, S.getBeginLoc());
}

void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective(
const OMPParallelMasterTaskLoopDirective &S) {
auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Expand All @@ -8024,6 +8048,24 @@ void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective(
emitEmptyBoundParameters);
}

void CodeGenFunction::EmitOMPParallelMaskedTaskLoopDirective(
const OMPParallelMaskedTaskLoopDirective &S) {
auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
PrePostActionTy &Action) {
Action.Enter(CGF);
CGF.EmitOMPTaskLoopBasedDirective(S);
};
OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMaskedRegion(CGF, TaskLoopCodeGen,
S.getBeginLoc());
};
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_masked_taskloop, CodeGen,
emitEmptyBoundParameters);
}

void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective(
const OMPParallelMasterTaskLoopSimdDirective &S) {
auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Expand All @@ -8042,6 +8084,24 @@ void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective(
emitEmptyBoundParameters);
}

void CodeGenFunction::EmitOMPParallelMaskedTaskLoopSimdDirective(
const OMPParallelMaskedTaskLoopSimdDirective &S) {
auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
PrePostActionTy &Action) {
Action.Enter(CGF);
CGF.EmitOMPTaskLoopBasedDirective(S);
};
OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMaskedRegion(CGF, TaskLoopCodeGen,
S.getBeginLoc());
};
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_masked_taskloop_simd, CodeGen,
emitEmptyBoundParameters);
}

// Generate the instructions for '#pragma omp target update' directive.
void CodeGenFunction::EmitOMPTargetUpdateDirective(
const OMPTargetUpdateDirective &S) {
Expand Down
7 changes: 7 additions & 0 deletions clang/lib/CodeGen/CodeGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -3863,12 +3863,19 @@ class CodeGenFunction : public CodeGenTypeCache {
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S);
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S);
void
EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S);
void
EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S);
void EmitOMPParallelMasterTaskLoopDirective(
const OMPParallelMasterTaskLoopDirective &S);
void EmitOMPParallelMaskedTaskLoopDirective(
const OMPParallelMaskedTaskLoopDirective &S);
void EmitOMPParallelMasterTaskLoopSimdDirective(
const OMPParallelMasterTaskLoopSimdDirective &S);
void EmitOMPParallelMaskedTaskLoopSimdDirective(
const OMPParallelMaskedTaskLoopSimdDirective &S);
void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
void EmitOMPDistributeParallelForDirective(
const OMPDistributeParallelForDirective &S);
Expand Down
188 changes: 188 additions & 0 deletions clang/test/OpenMP/combined_masked.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --prefix-filecheck-ir-name _ --version 5
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -fopenmp-version=52 -x c -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#define N 100
void parallel_masked_taskloop(){
#pragma omp parallel masked taskloop
for( int i = 0; i < N; i++)
;

}
void parallel_masked_taskloop_simd(){
#pragma omp parallel masked taskloop simd
for( int i = 0; i < N; i++)
;

}
void masked_taskloop(){
#pragma omp masked taskloop
for( int i = 0; i < N; i++)
;

}
void masked_taskloop_simd(){
#pragma omp masked taskloop simd
for( int i = 0; i < N; i++)
;

}


int main()
{
parallel_masked_taskloop();
parallel_masked_taskloop_simd();
masked_taskloop();
masked_taskloop_simd();

}
// CHECK-LABEL: define dso_local void @parallel_masked_taskloop(
// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 0, ptr @parallel_masked_taskloop.omp_outlined)
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define internal void @parallel_masked_taskloop.omp_outlined(
// CHECK-SAME: ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_masked(ptr @[[GLOB1]], i32 [[TMP1]], i32 0)
// CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK-NEXT: br i1 [[TMP3]], label %[[OMP_IF_THEN:.*]], label %[[OMP_IF_END:.*]]
// CHECK: [[OMP_IF_THEN]]:
// CHECK-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 0, ptr @.omp_task_entry.)
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP4]], i32 0, i32 0
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP5]], i32 0, i32 5
// CHECK-NEXT: store i64 0, ptr [[TMP6]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 6
// CHECK-NEXT: store i64 99, ptr [[TMP7]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 7
// CHECK-NEXT: store i64 1, ptr [[TMP8]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 9
// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false)
// CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP8]], align 8
// CHECK-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP4]], i32 1, ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP10]], i32 1, i32 0, i64 0, ptr null)
// CHECK-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: call void @__kmpc_end_masked(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: br label %[[OMP_IF_END]]
// CHECK: [[OMP_IF_END]]:
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define dso_local void @parallel_masked_taskloop_simd(
// CHECK-SAME: ) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @parallel_masked_taskloop_simd.omp_outlined)
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define internal void @parallel_masked_taskloop_simd.omp_outlined(
// CHECK-SAME: ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_masked(ptr @[[GLOB1]], i32 [[TMP1]], i32 0)
// CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK-NEXT: br i1 [[TMP3]], label %[[OMP_IF_THEN:.*]], label %[[OMP_IF_END:.*]]
// CHECK: [[OMP_IF_THEN]]:
// CHECK-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 0, ptr @.omp_task_entry..2)
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP4]], i32 0, i32 0
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP5]], i32 0, i32 5
// CHECK-NEXT: store i64 0, ptr [[TMP6]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 6
// CHECK-NEXT: store i64 99, ptr [[TMP7]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 7
// CHECK-NEXT: store i64 1, ptr [[TMP8]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 9
// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false)
// CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP8]], align 8
// CHECK-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP4]], i32 1, ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP10]], i32 1, i32 0, i64 0, ptr null)
// CHECK-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: call void @__kmpc_end_masked(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: br label %[[OMP_IF_END]]
// CHECK: [[OMP_IF_END]]:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define dso_local void @masked_taskloop(
// CHECK-SAME: ) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 1
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_masked(ptr @[[GLOB1]], i32 [[TMP0]], i32 0)
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-NEXT: br i1 [[TMP2]], label %[[OMP_IF_THEN:.*]], label %[[OMP_IF_END:.*]]
// CHECK: [[OMP_IF_THEN]]:
// CHECK-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 80, i64 0, ptr @.omp_task_entry..4)
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 5
// CHECK-NEXT: store i64 0, ptr [[TMP5]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6
// CHECK-NEXT: store i64 99, ptr [[TMP6]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7
// CHECK-NEXT: store i64 1, ptr [[TMP7]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9
// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP8]], i8 0, i64 8, i1 false)
// CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP7]], align 8
// CHECK-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP3]], i32 1, ptr [[TMP5]], ptr [[TMP6]], i64 [[TMP9]], i32 1, i32 0, i64 0, ptr null)
// CHECK-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: call void @__kmpc_end_masked(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: br label %[[OMP_IF_END]]
// CHECK: [[OMP_IF_END]]:
// CHECK-NEXT: ret void
//
// CHECK-LABEL: define dso_local void @masked_taskloop_simd(
// CHECK-SAME: ) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 1
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_masked(ptr @[[GLOB1]], i32 [[TMP0]], i32 0)
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-NEXT: br i1 [[TMP2]], label %[[OMP_IF_THEN:.*]], label %[[OMP_IF_END:.*]]
// CHECK: [[OMP_IF_THEN]]:
// CHECK-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 80, i64 0, ptr @.omp_task_entry..6)
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 5
// CHECK-NEXT: store i64 0, ptr [[TMP5]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6
// CHECK-NEXT: store i64 99, ptr [[TMP6]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7
// CHECK-NEXT: store i64 1, ptr [[TMP7]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9
// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP8]], i8 0, i64 8, i1 false)
// CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP7]], align 8
// CHECK-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP3]], i32 1, ptr [[TMP5]], ptr [[TMP6]], i64 [[TMP9]], i32 1, i32 0, i64 0, ptr null)
// CHECK-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: call void @__kmpc_end_masked(ptr @[[GLOB1]], i32 [[TMP0]])
// CHECK-NEXT: br label %[[OMP_IF_END]]
// CHECK: [[OMP_IF_END]]:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define dso_local i32 @main(
// CHECK-SAME: ) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: call void @parallel_masked_taskloop()
// CHECK-NEXT: call void @parallel_masked_taskloop_simd()
// CHECK-NEXT: call void @masked_taskloop()
// CHECK-NEXT: call void @masked_taskloop_simd()
// CHECK-NEXT: ret i32 0
Loading