@@ -135,6 +135,13 @@ mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
135135 return mlir::FusedLoc::get (locs, metadata, &getMLIRContext ());
136136}
137137
138+ void CIRGenFunction::emitAndUpdateRetAlloca (QualType type, mlir::Location loc,
139+ CharUnits alignment) {
140+ if (!type->isVoidType ()) {
141+ fnRetAlloca = emitAlloca (" __retval" , convertType (type), loc, alignment);
142+ }
143+ }
144+
138145void CIRGenFunction::declare (mlir::Value addrVal, const Decl *var, QualType ty,
139146 mlir::Location loc, CharUnits alignment,
140147 bool isParam) {
@@ -149,14 +156,125 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
149156 allocaOp.setConstantAttr (mlir::UnitAttr::get (&getMLIRContext ()));
150157}
151158
159+ void CIRGenFunction::LexicalScope::cleanup () {
160+ CIRGenBuilderTy &builder = cgf.builder ;
161+ LexicalScope *localScope = cgf.curLexScope ;
162+
163+ if (returnBlock != nullptr ) {
164+ // Write out the return block, which loads the value from `__retval` and
165+ // issues the `cir.return`.
166+ mlir::OpBuilder::InsertionGuard guard (builder);
167+ builder.setInsertionPointToEnd (returnBlock);
168+ (void )emitReturn (*returnLoc);
169+ }
170+
171+ mlir::Block *curBlock = builder.getBlock ();
172+ if (isGlobalInit () && !curBlock)
173+ return ;
174+ if (curBlock->mightHaveTerminator () && curBlock->getTerminator ())
175+ return ;
176+
177+ // Get rid of any empty block at the end of the scope.
178+ bool entryBlock = builder.getInsertionBlock ()->isEntryBlock ();
179+ if (!entryBlock && curBlock->empty ()) {
180+ curBlock->erase ();
181+ if (returnBlock != nullptr && returnBlock->getUses ().empty ())
182+ returnBlock->erase ();
183+ return ;
184+ }
185+
186+ // Reached the end of the scope.
187+ {
188+ mlir::OpBuilder::InsertionGuard guard (builder);
189+ builder.setInsertionPointToEnd (curBlock);
190+
191+ if (localScope->depth == 0 ) {
192+ // Reached the end of the function.
193+ if (returnBlock != nullptr ) {
194+ if (returnBlock->getUses ().empty ())
195+ returnBlock->erase ();
196+ else {
197+ builder.create <cir::BrOp>(*returnLoc, returnBlock);
198+ return ;
199+ }
200+ }
201+ emitImplicitReturn ();
202+ return ;
203+ }
204+ // Reached the end of a non-function scope. Some scopes, such as those
205+ // used with the ?: operator, can return a value.
206+ if (!localScope->isTernary () && !curBlock->mightHaveTerminator ()) {
207+ !retVal ? builder.create <cir::YieldOp>(localScope->endLoc )
208+ : builder.create <cir::YieldOp>(localScope->endLoc , retVal);
209+ }
210+ }
211+ }
212+
213+ cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn (mlir::Location loc) {
214+ CIRGenBuilderTy &builder = cgf.getBuilder ();
215+
216+ if (!cgf.curFn .getFunctionType ().hasVoidReturn ()) {
217+ // Load the value from `__retval` and return it via the `cir.return` op.
218+ auto value = builder.create <cir::LoadOp>(
219+ loc, cgf.curFn .getFunctionType ().getReturnType (), *cgf.fnRetAlloca );
220+ return builder.create <cir::ReturnOp>(loc,
221+ llvm::ArrayRef (value.getResult ()));
222+ }
223+ return builder.create <cir::ReturnOp>(loc);
224+ }
225+
226+ // This is copied from CodeGenModule::MayDropFunctionReturn. This is a
227+ // candidate for sharing between CIRGen and CodeGen.
228+ static bool mayDropFunctionReturn (const ASTContext &astContext,
229+ QualType returnType) {
230+ // We can't just discard the return value for a record type with a complex
231+ // destructor or a non-trivially copyable type.
232+ if (const RecordType *recordType =
233+ returnType.getCanonicalType ()->getAs <RecordType>()) {
234+ if (const auto *classDecl = dyn_cast<CXXRecordDecl>(recordType->getDecl ()))
235+ return classDecl->hasTrivialDestructor ();
236+ }
237+ return returnType.isTriviallyCopyableType (astContext);
238+ }
239+
240+ void CIRGenFunction::LexicalScope::emitImplicitReturn () {
241+ CIRGenBuilderTy &builder = cgf.getBuilder ();
242+ LexicalScope *localScope = cgf.curLexScope ;
243+
244+ const auto *fd = cast<clang::FunctionDecl>(cgf.curGD .getDecl ());
245+
246+ // In C++, flowing off the end of a non-void function is always undefined
247+ // behavior. In C, flowing off the end of a non-void function is undefined
248+ // behavior only if the non-existent return value is used by the caller.
249+ // That influences whether the terminating op is trap, unreachable, or
250+ // return.
251+ if (cgf.getLangOpts ().CPlusPlus && !fd->hasImplicitReturnZero () &&
252+ !cgf.sawAsmBlock && !fd->getReturnType ()->isVoidType () &&
253+ builder.getInsertionBlock ()) {
254+ bool shouldEmitUnreachable =
255+ cgf.cgm .getCodeGenOpts ().StrictReturn ||
256+ !mayDropFunctionReturn (fd->getASTContext (), fd->getReturnType ());
257+
258+ if (shouldEmitUnreachable) {
259+ if (cgf.cgm .getCodeGenOpts ().OptimizationLevel == 0 )
260+ builder.create <cir::TrapOp>(localScope->endLoc );
261+ else
262+ builder.create <cir::UnreachableOp>(localScope->endLoc );
263+ builder.clearInsertionPoint ();
264+ return ;
265+ }
266+ }
267+
268+ (void )emitReturn (localScope->endLoc );
269+ }
270+
152271void CIRGenFunction::startFunction (GlobalDecl gd, QualType returnType,
153272 cir::FuncOp fn, cir::FuncType funcType,
154273 FunctionArgList args, SourceLocation loc,
155274 SourceLocation startLoc) {
156275 assert (!curFn &&
157276 " CIRGenFunction can only be used for one function at a time" );
158277
159- fnRetTy = returnType;
160278 curFn = fn;
161279
162280 const auto *fd = dyn_cast_or_null<FunctionDecl>(gd.getDecl ());
@@ -194,6 +312,12 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
194312 builder.CIRBaseBuilderTy ::createStore (fnBodyBegin, paramVal, addrVal);
195313 }
196314 assert (builder.getInsertionBlock () && " Should be valid" );
315+
316+ // When the current function is not void, create an address to store the
317+ // result value.
318+ if (!returnType->isVoidType ())
319+ emitAndUpdateRetAlloca (returnType, getLoc (fd->getBody ()->getEndLoc ()),
320+ getContext ().getTypeAlignInChars (returnType));
197321}
198322
199323void CIRGenFunction::finishFunction (SourceLocation endLoc) {}
@@ -208,9 +332,24 @@ mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
208332 return result;
209333}
210334
335+ static void eraseEmptyAndUnusedBlocks (cir::FuncOp func) {
336+ // Remove any leftover blocks that are unreachable and empty, since they do
337+ // not represent unreachable code useful for warnings nor anything deemed
338+ // useful in general.
339+ SmallVector<mlir::Block *> blocksToDelete;
340+ for (mlir::Block &block : func.getBlocks ()) {
341+ if (block.empty () && block.getUses ().empty ())
342+ blocksToDelete.push_back (&block);
343+ }
344+ for (mlir::Block *block : blocksToDelete)
345+ block->erase ();
346+ }
347+
211348cir::FuncOp CIRGenFunction::generateCode (clang::GlobalDecl gd, cir::FuncOp fn,
212349 cir::FuncType funcType) {
213350 const auto funcDecl = cast<FunctionDecl>(gd.getDecl ());
351+ curGD = gd;
352+
214353 SourceLocation loc = funcDecl->getLocation ();
215354 Stmt *body = funcDecl->getBody ();
216355 SourceRange bodyRange =
@@ -219,55 +358,53 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
219358 SourceLocRAIIObject fnLoc{*this , loc.isValid () ? getLoc (loc)
220359 : builder.getUnknownLoc ()};
221360
222- // This will be used once more code is upstreamed.
223- [[maybe_unused]] mlir::Block *entryBB = fn.addEntryBlock ();
361+ auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
362+ return clangLoc.isValid () ? getLoc (clangLoc) : builder.getUnknownLoc ();
363+ };
364+ const mlir::Location fusedLoc = mlir::FusedLoc::get (
365+ &getMLIRContext (),
366+ {validMLIRLoc (bodyRange.getBegin ()), validMLIRLoc (bodyRange.getEnd ())});
367+ mlir::Block *entryBB = fn.addEntryBlock ();
224368
225369 FunctionArgList args;
226370 QualType retTy = buildFunctionArgList (gd, args);
227371
228- startFunction (gd, retTy, fn, funcType, args, loc, bodyRange.getBegin ());
229-
230- if (isa<CXXDestructorDecl>(funcDecl))
231- getCIRGenModule ().errorNYI (bodyRange, " C++ destructor definition" );
232- else if (isa<CXXConstructorDecl>(funcDecl))
233- getCIRGenModule ().errorNYI (bodyRange, " C++ constructor definition" );
234- else if (getLangOpts ().CUDA && !getLangOpts ().CUDAIsDevice &&
235- funcDecl->hasAttr <CUDAGlobalAttr>())
236- getCIRGenModule ().errorNYI (bodyRange, " CUDA kernel" );
237- else if (isa<CXXMethodDecl>(funcDecl) &&
238- cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker ())
239- getCIRGenModule ().errorNYI (bodyRange, " Lambda static invoker" );
240- else if (funcDecl->isDefaulted () && isa<CXXMethodDecl>(funcDecl) &&
241- (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator () ||
242- cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator ()))
243- getCIRGenModule ().errorNYI (bodyRange, " Default assignment operator" );
244- else if (body) {
245- if (mlir::failed (emitFunctionBody (body))) {
246- fn.erase ();
247- return nullptr ;
248- }
249- } else
250- llvm_unreachable (" no definition for normal function" );
251-
252- // This code to insert a cir.return or cir.trap at the end of the function is
253- // temporary until the function return code, including
254- // CIRGenFunction::LexicalScope::emitImplicitReturn(), is upstreamed.
255- mlir::Block &lastBlock = fn.getRegion ().back ();
256- if (lastBlock.empty () || !lastBlock.mightHaveTerminator () ||
257- !lastBlock.getTerminator ()->hasTrait <mlir::OpTrait::IsTerminator>()) {
258- builder.setInsertionPointToEnd (&lastBlock);
259- if (mlir::isa<cir::VoidType>(funcType.getReturnType ())) {
260- builder.create <cir::ReturnOp>(getLoc (bodyRange.getEnd ()));
372+ {
373+ LexicalScope lexScope (*this , fusedLoc, entryBB);
374+
375+ startFunction (gd, retTy, fn, funcType, args, loc, bodyRange.getBegin ());
376+
377+ if (isa<CXXDestructorDecl>(funcDecl))
378+ getCIRGenModule ().errorNYI (bodyRange, " C++ destructor definition" );
379+ else if (isa<CXXConstructorDecl>(funcDecl))
380+ getCIRGenModule ().errorNYI (bodyRange, " C++ constructor definition" );
381+ else if (getLangOpts ().CUDA && !getLangOpts ().CUDAIsDevice &&
382+ funcDecl->hasAttr <CUDAGlobalAttr>())
383+ getCIRGenModule ().errorNYI (bodyRange, " CUDA kernel" );
384+ else if (isa<CXXMethodDecl>(funcDecl) &&
385+ cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker ())
386+ getCIRGenModule ().errorNYI (bodyRange, " Lambda static invoker" );
387+ else if (funcDecl->isDefaulted () && isa<CXXMethodDecl>(funcDecl) &&
388+ (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator () ||
389+ cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator ()))
390+ getCIRGenModule ().errorNYI (bodyRange, " Default assignment operator" );
391+ else if (body) {
392+ if (mlir::failed (emitFunctionBody (body))) {
393+ fn.erase ();
394+ return nullptr ;
395+ }
261396 } else {
262- builder.create <cir::TrapOp>(getLoc (bodyRange.getEnd ()));
397+ // Anything without a body should have been handled above.
398+ llvm_unreachable (" no definition for normal function" );
263399 }
264- }
265400
266- if (mlir::failed (fn.verifyBody ()))
267- return nullptr ;
401+ if (mlir::failed (fn.verifyBody ()))
402+ return nullptr ;
268403
269- finishFunction (bodyRange.getEnd ());
404+ finishFunction (bodyRange.getEnd ());
405+ }
270406
407+ eraseEmptyAndUnusedBlocks (fn);
271408 return fn;
272409}
273410
0 commit comments