Skip to content

Commit 6f54821

Browse files
authored
merge main into amd-staging (llvm#729)
2 parents 9c4df8e + 3330509 commit 6f54821

File tree

121 files changed

+6408
-3359
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

121 files changed

+6408
-3359
lines changed

clang-tools-extra/docs/ReleaseNotes.rst

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,8 @@ Changes in existing checks
447447
positives when pointers is transferred to non-const references
448448
and avoid false positives of function pointer and fix false
449449
positives on return of non-const pointer and fix false positives on
450-
pointer-to-member operator.
450+
pointer-to-member operator and avoid false positives when the address
451+
of a variable is taken to be passed to a function.
451452

452453
- Improved :doc:`misc-coroutine-hostile-raii
453454
<clang-tidy/checks/misc/coroutine-hostile-raii>` check by adding the option

clang-tools-extra/test/clang-tidy/checkers/misc/const-correctness-pointer-as-pointers.cpp

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,3 +73,18 @@ void ignoreNonConstRefOps() {
7373
int* p2 {nullptr};
7474
int*& r2 = (int*&)p2;
7575
}
76+
77+
void pointer_to_pointer_param(int**);
78+
void pass_address_to_pointer_to_pointer() {
79+
int i = 0;
80+
int* ip = &i;
81+
// CHECK-NOT: warning
82+
pointer_to_pointer_param(&ip);
83+
}
84+
85+
void void_pointer_to_pointer_param(void**);
86+
void pass_address_to_void_pointer_to_pointer() {
87+
void* ptr = nullptr;
88+
// CHECK-NOT: warning
89+
void_pointer_to_pointer_param(&ptr);
90+
}

clang/include/clang/CIR/Dialect/IR/CIROps.td

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4722,6 +4722,16 @@ def CIR_FAbsOp : CIR_UnaryFPToFPBuiltinOp<"fabs", "FAbsOp"> {
47224722
}];
47234723
}
47244724

4725+
def CIR_FloorOp : CIR_UnaryFPToFPBuiltinOp<"floor", "FloorOp"> {
4726+
let summary = "Computes the floating-point floor value";
4727+
let description = [{
4728+
`cir.floor` computes the floor of a floating-point operand and returns
4729+
a result of the same type.
4730+
4731+
Floating-point exceptions are ignored, and it does not set `errno`.
4732+
}];
4733+
}
4734+
47254735
//===----------------------------------------------------------------------===//
47264736
// Variadic Operations
47274737
//===----------------------------------------------------------------------===//

clang/include/clang/CIR/MissingFeatures.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,8 @@ struct MissingFeatures {
153153
static bool coroEndBuiltinCall() { return false; }
154154
static bool emitBodyAndFallthrough() { return false; }
155155
static bool coroOutsideFrameMD() { return false; }
156+
static bool coroCoReturn() { return false; }
157+
static bool coroCoYield() { return false; }
156158

157159
// Various handling of deferred processing in CIRGenModule.
158160
static bool cgmRelease() { return false; }

clang/lib/Analysis/ExprMutationAnalyzer.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,11 @@ class ExprPointeeResolve {
135135
if (const auto *PE = dyn_cast<ParenExpr>(E))
136136
return resolveExpr(PE->getSubExpr());
137137

138+
if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
139+
if (UO->getOpcode() == UO_AddrOf)
140+
return resolveExpr(UO->getSubExpr());
141+
}
142+
138143
if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E)) {
139144
// only implicit cast needs to be treated as resolvable.
140145
// explicit cast will be checked in `findPointeeToNonConst`

clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,16 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
321321
case Builtin::BI__builtin_fabsf128:
322322
return emitUnaryMaybeConstrainedFPBuiltin<cir::FAbsOp>(*this, *e);
323323

324+
case Builtin::BIfloor:
325+
case Builtin::BIfloorf:
326+
case Builtin::BIfloorl:
327+
case Builtin::BI__builtin_floor:
328+
case Builtin::BI__builtin_floorf:
329+
case Builtin::BI__builtin_floorf16:
330+
case Builtin::BI__builtin_floorl:
331+
case Builtin::BI__builtin_floorf128:
332+
return emitUnaryMaybeConstrainedFPBuiltin<cir::FloorOp>(*this, *e);
333+
324334
case Builtin::BI__assume:
325335
case Builtin::BI__builtin_assume: {
326336
if (e->getArg(0)->HasSideEffects(getContext()))

clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp

Lines changed: 111 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,21 @@ static mlir::Value emitX86MaskLogic(CIRGenBuilderTy &builder,
115115
ops[0].getType());
116116
}
117117

118+
static mlir::Value emitVecInsert(CIRGenBuilderTy &builder, mlir::Location loc,
119+
mlir::Value vec, mlir::Value value,
120+
mlir::Value indexOp) {
121+
unsigned numElts = cast<cir::VectorType>(vec.getType()).getSize();
122+
123+
uint64_t index =
124+
indexOp.getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue();
125+
126+
index &= numElts - 1;
127+
128+
cir::ConstantOp indexVal = builder.getUInt64(index, loc);
129+
130+
return cir::VecInsertOp::create(builder, loc, vec, value, indexVal);
131+
}
132+
118133
mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
119134
const CallExpr *expr) {
120135
if (builtinID == Builtin::BI__builtin_cpu_is) {
@@ -238,11 +253,11 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
238253
case X86::BI__builtin_ia32_vec_set_v32qi:
239254
case X86::BI__builtin_ia32_vec_set_v16hi:
240255
case X86::BI__builtin_ia32_vec_set_v8si:
241-
case X86::BI__builtin_ia32_vec_set_v4di:
242-
cgm.errorNYI(expr->getSourceRange(),
243-
std::string("unimplemented X86 builtin call: ") +
244-
getContext().BuiltinInfo.getName(builtinID));
245-
return {};
256+
case X86::BI__builtin_ia32_vec_set_v4di: {
257+
return emitVecInsert(builder, getLoc(expr->getExprLoc()), ops[0], ops[1],
258+
ops[2]);
259+
}
260+
246261
case X86::BI_mm_setcsr:
247262
case X86::BI__builtin_ia32_ldmxcsr: {
248263
mlir::Location loc = getLoc(expr->getExprLoc());
@@ -487,6 +502,10 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
487502
case X86::BI__builtin_ia32_compressqi128_mask:
488503
case X86::BI__builtin_ia32_compressqi256_mask:
489504
case X86::BI__builtin_ia32_compressqi512_mask:
505+
cgm.errorNYI(expr->getSourceRange(),
506+
std::string("unimplemented X86 builtin call: ") +
507+
getContext().BuiltinInfo.getName(builtinID));
508+
return {};
490509
case X86::BI__builtin_ia32_gather3div2df:
491510
case X86::BI__builtin_ia32_gather3div2di:
492511
case X86::BI__builtin_ia32_gather3div4df:
@@ -510,7 +529,93 @@ mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
510529
case X86::BI__builtin_ia32_gathersiv8di:
511530
case X86::BI__builtin_ia32_gathersiv16si:
512531
case X86::BI__builtin_ia32_gatherdiv8di:
513-
case X86::BI__builtin_ia32_gatherdiv16si:
532+
case X86::BI__builtin_ia32_gatherdiv16si: {
533+
StringRef intrinsicName;
534+
switch (builtinID) {
535+
default:
536+
llvm_unreachable("Unexpected builtin");
537+
case X86::BI__builtin_ia32_gather3div2df:
538+
intrinsicName = "x86.avx512.mask.gather3div2.df";
539+
break;
540+
case X86::BI__builtin_ia32_gather3div2di:
541+
intrinsicName = "x86.avx512.mask.gather3div2.di";
542+
break;
543+
case X86::BI__builtin_ia32_gather3div4df:
544+
intrinsicName = "x86.avx512.mask.gather3div4.df";
545+
break;
546+
case X86::BI__builtin_ia32_gather3div4di:
547+
intrinsicName = "x86.avx512.mask.gather3div4.di";
548+
break;
549+
case X86::BI__builtin_ia32_gather3div4sf:
550+
intrinsicName = "x86.avx512.mask.gather3div4.sf";
551+
break;
552+
case X86::BI__builtin_ia32_gather3div4si:
553+
intrinsicName = "x86.avx512.mask.gather3div4.si";
554+
break;
555+
case X86::BI__builtin_ia32_gather3div8sf:
556+
intrinsicName = "x86.avx512.mask.gather3div8.sf";
557+
break;
558+
case X86::BI__builtin_ia32_gather3div8si:
559+
intrinsicName = "x86.avx512.mask.gather3div8.si";
560+
break;
561+
case X86::BI__builtin_ia32_gather3siv2df:
562+
intrinsicName = "x86.avx512.mask.gather3siv2.df";
563+
break;
564+
case X86::BI__builtin_ia32_gather3siv2di:
565+
intrinsicName = "x86.avx512.mask.gather3siv2.di";
566+
break;
567+
case X86::BI__builtin_ia32_gather3siv4df:
568+
intrinsicName = "x86.avx512.mask.gather3siv4.df";
569+
break;
570+
case X86::BI__builtin_ia32_gather3siv4di:
571+
intrinsicName = "x86.avx512.mask.gather3siv4.di";
572+
break;
573+
case X86::BI__builtin_ia32_gather3siv4sf:
574+
intrinsicName = "x86.avx512.mask.gather3siv4.sf";
575+
break;
576+
case X86::BI__builtin_ia32_gather3siv4si:
577+
intrinsicName = "x86.avx512.mask.gather3siv4.si";
578+
break;
579+
case X86::BI__builtin_ia32_gather3siv8sf:
580+
intrinsicName = "x86.avx512.mask.gather3siv8.sf";
581+
break;
582+
case X86::BI__builtin_ia32_gather3siv8si:
583+
intrinsicName = "x86.avx512.mask.gather3siv8.si";
584+
break;
585+
case X86::BI__builtin_ia32_gathersiv8df:
586+
intrinsicName = "x86.avx512.mask.gather.dpd.512";
587+
break;
588+
case X86::BI__builtin_ia32_gathersiv16sf:
589+
intrinsicName = "x86.avx512.mask.gather.dps.512";
590+
break;
591+
case X86::BI__builtin_ia32_gatherdiv8df:
592+
intrinsicName = "x86.avx512.mask.gather.qpd.512";
593+
break;
594+
case X86::BI__builtin_ia32_gatherdiv16sf:
595+
intrinsicName = "x86.avx512.mask.gather.qps.512";
596+
break;
597+
case X86::BI__builtin_ia32_gathersiv8di:
598+
intrinsicName = "x86.avx512.mask.gather.dpq.512";
599+
break;
600+
case X86::BI__builtin_ia32_gathersiv16si:
601+
intrinsicName = "x86.avx512.mask.gather.dpi.512";
602+
break;
603+
case X86::BI__builtin_ia32_gatherdiv8di:
604+
intrinsicName = "x86.avx512.mask.gather.qpq.512";
605+
break;
606+
case X86::BI__builtin_ia32_gatherdiv16si:
607+
intrinsicName = "x86.avx512.mask.gather.qpi.512";
608+
break;
609+
}
610+
611+
mlir::Location loc = getLoc(expr->getExprLoc());
612+
unsigned minElts =
613+
std::min(cast<cir::VectorType>(ops[0].getType()).getSize(),
614+
cast<cir::VectorType>(ops[2].getType()).getSize());
615+
ops[3] = getMaskVecValue(builder, loc, ops[3], minElts);
616+
return emitIntrinsicCallOp(builder, loc, intrinsicName.str(),
617+
convertType(expr->getType()), ops);
618+
}
514619
case X86::BI__builtin_ia32_scattersiv8df:
515620
case X86::BI__builtin_ia32_scattersiv16sf:
516621
case X86::BI__builtin_ia32_scatterdiv8df:

clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,9 @@ struct clang::CIRGen::CGCoroData {
3232

3333
// Stores the result of __builtin_coro_begin call.
3434
mlir::Value coroBegin = nullptr;
35+
36+
// The promise type's 'unhandled_exception' handler, if it defines one.
37+
Stmt *exceptionHandler = nullptr;
3538
};
3639

3740
// Defining these here allows to keep CGCoroData private to this file.
@@ -272,6 +275,17 @@ CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
272275
}
273276
return mlir::success();
274277
}
278+
279+
static bool memberCallExpressionCanThrow(const Expr *e) {
280+
if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
281+
if (const auto *proto =
282+
ce->getMethodDecl()->getType()->getAs<FunctionProtoType>())
283+
if (isNoexceptExceptionSpec(proto->getExceptionSpecType()) &&
284+
proto->canThrow() == CT_Cannot)
285+
return false;
286+
return true;
287+
}
288+
275289
// Given a suspend expression which roughly looks like:
276290
//
277291
// auto && x = CommonExpr();
@@ -333,6 +347,31 @@ emitSuspendExpression(CIRGenFunction &cgf, CGCoroData &coro,
333347
},
334348
/*resumeBuilder=*/
335349
[&](mlir::OpBuilder &b, mlir::Location loc) {
350+
// Exception handling requires additional IR. If the 'await_resume'
351+
// function is marked as 'noexcept', we avoid generating this additional
352+
// IR.
353+
CXXTryStmt *tryStmt = nullptr;
354+
if (coro.exceptionHandler && kind == cir::AwaitKind::Init &&
355+
memberCallExpressionCanThrow(s.getResumeExpr()))
356+
cgf.cgm.errorNYI("Coro resume Exception");
357+
358+
// FIXME(cir): the alloca for the resume expr should be placed in the
359+
// enclosing cir.scope instead.
360+
if (forLValue) {
361+
assert(!cir::MissingFeatures::coroCoYield());
362+
} else {
363+
awaitRes.rv =
364+
cgf.emitAnyExpr(s.getResumeExpr(), aggSlot, ignoreResult);
365+
if (!awaitRes.rv.isIgnored())
366+
// Create the alloca in the block before the scope wrapping
367+
// cir.await.
368+
assert(!cir::MissingFeatures::coroCoReturn());
369+
}
370+
371+
if (tryStmt)
372+
cgf.cgm.errorNYI("Coro tryStmt");
373+
374+
// Returns control back to parent.
336375
cir::YieldOp::create(builder, loc);
337376
});
338377

clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,15 @@ mlir::LogicalResult CIRToLLVMExp2OpLowering::matchAndRewrite(
210210
return mlir::success();
211211
}
212212

213+
mlir::LogicalResult CIRToLLVMFloorOpLowering::matchAndRewrite(
214+
cir::FloorOp op, OpAdaptor adaptor,
215+
mlir::ConversionPatternRewriter &rewriter) const {
216+
mlir::Type resTy = typeConverter->convertType(op.getType());
217+
rewriter.replaceOpWithNewOp<mlir::LLVM::FFloorOp>(op, resTy,
218+
adaptor.getSrc());
219+
return mlir::success();
220+
}
221+
213222
static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter,
214223
mlir::Value llvmSrc, mlir::Type llvmDstIntTy,
215224
bool isUnsigned, uint64_t cirSrcWidth,

clang/test/CIR/CodeGen/coro-task.cpp

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -203,11 +203,21 @@ VoidTask silly_task() {
203203
// CIR: %[[CoroHandleVoidReload:.*]] = cir.load{{.*}} %[[CoroHandleVoidAddr]] : !cir.ptr<![[CoroHandleVoid]]>, ![[CoroHandleVoid]]
204204
// CIR: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[SuspendAlwaysAddr]], %[[CoroHandleVoidReload]])
205205
// CIR: cir.yield
206+
207+
// Third region `resume` handles coroutine resuming logic.
208+
206209
// CIR: }, resume : {
210+
// CIR: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[SuspendAlwaysAddr]])
207211
// CIR: cir.yield
208212
// CIR: },)
209213
// CIR: }
210214

215+
// Since we already tested cir.await guts above, the remaining checks for:
216+
// - The actual user written co_await
217+
// - The promise call
218+
// - The final suspend co_await
219+
// - Return
220+
211221
folly::coro::Task<int> byRef(const std::string& s) {
212222
co_return s.size();
213223
}
@@ -245,6 +255,8 @@ folly::coro::Task<int> byRef(const std::string& s) {
245255
// CIR: %[[CoroHandleVoidReload:.*]] = cir.load{{.*}} %[[CoroHandleVoidAddr]] : !cir.ptr<![[CoroHandleVoid]]>, ![[CoroHandleVoid]]
246256
// CIR: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[SuspendAlwaysAddr]], %[[CoroHandleVoidReload]])
247257
// CIR: cir.yield
248-
// CIR: }, resume : {
249-
// CIR: cir.yield
250-
// CIR: },)
258+
// CIR: }, resume : {
259+
// CIR: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[SuspendAlwaysAddr]])
260+
// CIR: cir.yield
261+
// CIR: },)
262+
// CIR: }

0 commit comments

Comments
 (0)