-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[CIR] Implement codegen for inline assembly with output operands #154014
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
[CIR] Implement codegen for inline assembly with output operands #154014
Conversation
This stack of pull requests is managed by Graphite. Learn more about stacking. |
@llvm/pr-subscribers-clang Author: Iris Shi (el-ev) Changes
https://github.com/llvm/clangir/blob/main/clang/lib/CIR/CodeGen/CIRAsm.cpp Patch is 23.52 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/154014.diff 6 Files Affected:
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index ebf57246ba0b9..042190201b8f2 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -179,9 +179,10 @@ struct MissingFeatures {
static bool asmGoto() { return false; }
static bool asmInputOperands() { return false; }
static bool asmLabelAttr() { return false; }
+ static bool asmLLVMAssume() { return false; }
static bool asmMemoryEffects() { return false; }
- static bool asmOutputOperands() { return false; }
static bool asmUnwindClobber() { return false; }
+ static bool asmVectorType() { return false; }
static bool assignMemcpyizer() { return false; }
static bool astVarDeclInterface() { return false; }
static bool attributeBuiltin() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
index 17dffb3515d2a..34e45ff64bea9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
@@ -10,6 +10,8 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/DiagnosticSema.h"
+
#include "CIRGenFunction.h"
#include "clang/CIR/MissingFeatures.h"
@@ -26,6 +28,98 @@ static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &s) {
return isa<MSAsmStmt>(&s) ? AsmFlavor::x86_intel : gnuAsmFlavor;
}
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+static std::string simplifyConstraint(
+ const char *constraint, const TargetInfo &target,
+ SmallVectorImpl<TargetInfo::ConstraintInfo> *outCons = nullptr) {
+ std::string result;
+
+ while (*constraint) {
+ switch (*constraint) {
+ default:
+ result += target.convertConstraint(constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ case '=': // Will see this and the following in mult-alt constraints.
+ case '+':
+ break;
+ case '#': // Ignore the rest of the constraint alternative.
+ while (constraint[1] && constraint[1] != ',')
+ constraint++;
+ break;
+ case '&':
+ case '%':
+ result += *constraint;
+ while (constraint[1] && constraint[1] == *constraint)
+ constraint++;
+ break;
+ case ',':
+ result += "|";
+ break;
+ case 'g':
+ result += "imr";
+ break;
+ case '[': {
+ assert(outCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned index;
+ bool resolveResult =
+ target.resolveSymbolicName(constraint, *outCons, index);
+ assert(resolveResult && "Could not resolve symbolic name");
+ (void)resolveResult;
+ result += llvm::utostr(index);
+ break;
+ }
+ }
+
+ constraint++;
+ }
+
+ return result;
+}
+
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+/// Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+addVariableConstraints(const std::string &constraint, const Expr &asmExpr,
+ const TargetInfo &target, CIRGenModule &cgm,
+ const AsmStmt &stmt, const bool earlyClobber,
+ std::string *gccReg = nullptr) {
+ const DeclRefExpr *asmDeclRef = dyn_cast<DeclRefExpr>(&asmExpr);
+ if (!asmDeclRef)
+ return constraint;
+ const ValueDecl &value = *asmDeclRef->getDecl();
+ const VarDecl *variable = dyn_cast<VarDecl>(&value);
+ if (!variable)
+ return constraint;
+ if (variable->getStorageClass() != SC_Register)
+ return constraint;
+ AsmLabelAttr *attr = variable->getAttr<AsmLabelAttr>();
+ if (!attr)
+ return constraint;
+ StringRef registerName = attr->getLabel();
+ assert(target.isValidGCCRegisterName(registerName));
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo info(constraint, "");
+ if (target.validateOutputConstraint(info) && !info.allowsRegister()) {
+ cgm.errorUnsupported(&stmt, "__asm__");
+ return constraint;
+ }
+ // Canonicalize the register here before returning it.
+ registerName = target.getNormalizedGCCRegisterName(registerName);
+ if (gccReg != nullptr)
+ *gccReg = registerName.str();
+ return (earlyClobber ? "&{" : "{") + registerName.str() + "}";
+}
+
static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &s,
std::string &constraints, bool &hasUnwindClobber,
bool &readOnly, bool readNone) {
@@ -83,16 +177,150 @@ static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &s,
}
}
+using ConstraintInfos = SmallVector<TargetInfo::ConstraintInfo, 4>;
+
+static void collectInOutConstraintInfos(const CIRGenFunction &cgf,
+ const AsmStmt &s, ConstraintInfos &out,
+ ConstraintInfos &in) {
+
+ for (unsigned i = 0, e = s.getNumOutputs(); i != e; i++) {
+ StringRef name;
+ if (const GCCAsmStmt *gas = dyn_cast<GCCAsmStmt>(&s))
+ name = gas->getOutputName(i);
+ TargetInfo::ConstraintInfo info(s.getOutputConstraint(i), name);
+ bool isValid = cgf.getTarget().validateOutputConstraint(info);
+ (void)isValid;
+ assert(isValid && "Failed to parse output constraint");
+ out.push_back(info);
+ }
+
+ for (unsigned i = 0, e = s.getNumInputs(); i != e; i++) {
+ StringRef name;
+ if (const GCCAsmStmt *gas = dyn_cast<GCCAsmStmt>(&s))
+ name = gas->getInputName(i);
+ TargetInfo::ConstraintInfo info(s.getInputConstraint(i), name);
+ bool isValid = cgf.getTarget().validateInputConstraint(out, info);
+ assert(isValid && "Failed to parse input constraint");
+ (void)isValid;
+ in.push_back(info);
+ }
+}
+
+static void emitAsmStores(CIRGenFunction &cgf, const AsmStmt &s,
+ const llvm::ArrayRef<mlir::Value> regResults,
+ const llvm::ArrayRef<mlir::Type> resultRegTypes,
+ const llvm::ArrayRef<mlir::Type> resultTruncRegTypes,
+ const llvm::ArrayRef<LValue> resultRegDests,
+ const llvm::ArrayRef<QualType> resultRegQualTys,
+ const llvm::BitVector &resultTypeRequiresCast,
+ const llvm::BitVector &resultRegIsFlagReg) {
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ CIRGenModule &cgm = cgf.cgm;
+ mlir::MLIRContext *ctx = builder.getContext();
+
+ assert(regResults.size() == resultRegTypes.size());
+ assert(regResults.size() == resultTruncRegTypes.size());
+ assert(regResults.size() == resultRegDests.size());
+
+ // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
+ // in which case its size may grow.
+ assert(resultTypeRequiresCast.size() <= resultRegDests.size());
+ assert(resultRegIsFlagReg.size() <= resultRegDests.size());
+
+ for (unsigned i = 0, e = regResults.size(); i != e; ++i) {
+ mlir::Value tmp = regResults[i];
+ mlir::Type truncTy = resultTruncRegTypes[i];
+
+ if (i < resultRegIsFlagReg.size() && resultRegIsFlagReg[i])
+ assert(!cir::MissingFeatures::asmLLVMAssume());
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (resultRegTypes[i] != truncTy) {
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (mlir::isa<mlir::FloatType>(truncTy)) {
+ tmp = builder.createFloatingCast(tmp, truncTy);
+ } else if (isa<cir::PointerType>(truncTy) &&
+ isa<cir::IntType>(tmp.getType())) {
+ uint64_t resSize = cgm.getDataLayout().getTypeSizeInBits(truncTy);
+ tmp = builder.createIntCast(
+ tmp, cir::IntType::get(ctx, (unsigned)resSize, false));
+ tmp = builder.createIntToPtr(tmp, truncTy);
+ } else if (isa<cir::PointerType>(tmp.getType()) &&
+ isa<cir::IntType>(truncTy)) {
+ uint64_t tmpSize = cgm.getDataLayout().getTypeSizeInBits(tmp.getType());
+ tmp = builder.createPtrToInt(
+ tmp, cir::IntType::get(ctx, (unsigned)tmpSize, false));
+ tmp = builder.createIntCast(tmp, truncTy);
+ } else if (isa<cir::IntType>(truncTy)) {
+ tmp = builder.createIntCast(tmp, truncTy);
+ } else if (isa<cir::VectorType>(truncTy)) {
+ assert(!cir::MissingFeatures::asmVectorType());
+ }
+ }
+
+ LValue dest = resultRegDests[i];
+ // ResultTypeRequiresCast elements correspond to the first
+ // ResultTypeRequiresCast.size() elements of RegResults.
+ if ((i < resultTypeRequiresCast.size()) && resultTypeRequiresCast[i]) {
+ unsigned size = cgf.getContext().getTypeSize(resultRegQualTys[i]);
+ Address addr =
+ dest.getAddress().withElementType(builder, resultRegTypes[i]);
+ if (cgm.getTargetCIRGenInfo().isScalarizableAsmOperand(cgf, truncTy)) {
+ builder.createStore(cgf.getLoc(s.getAsmLoc()), tmp, addr);
+ continue;
+ }
+
+ QualType ty =
+ cgf.getContext().getIntTypeForBitwidth(size, /*Signed=*/false);
+ if (ty.isNull()) {
+ const Expr *outExpr = s.getOutputExpr(i);
+ cgm.getDiags().Report(outExpr->getExprLoc(),
+ diag::err_store_value_to_reg);
+ return;
+ }
+ dest = cgf.makeAddrLValue(addr, ty);
+ }
+
+ cgf.emitStoreThroughLValue(RValue::get(tmp), dest);
+ }
+}
+
mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
// Assemble the final asm string.
std::string asmString = s.generateAsmString(getContext());
+ SourceLocation srcLoc = s.getAsmLoc();
+ mlir::Location loc = getLoc(srcLoc);
+
+ // Get all the output and input constraints together.
+ ConstraintInfos outputConstraintInfos;
+ ConstraintInfos inputConstraintInfos;
+ collectInOutConstraintInfos(*this, s, outputConstraintInfos,
+ inputConstraintInfos);
bool isGCCAsmGoto = false;
std::string constraints;
+ std::vector<LValue> resultRegDests;
+ std::vector<QualType> resultRegQualTys;
+ std::vector<mlir::Type> resultRegTypes;
+ std::vector<mlir::Type> resultTruncRegTypes;
+ std::vector<mlir::Type> argTypes;
+ std::vector<mlir::Type> argElemTypes;
+ std::vector<mlir::Value> args;
std::vector<mlir::Value> outArgs;
std::vector<mlir::Value> inArgs;
std::vector<mlir::Value> inOutArgs;
+ llvm::BitVector resultTypeRequiresCast;
+ llvm::BitVector resultRegIsFlagReg;
+
+ // Keep track of out constraints for tied input operand.
+ std::vector<std::string> outputConstraints;
+
+ // Keep track of defined physregs.
+ llvm::SmallSet<std::string, 8> physRegOutputs;
// An inline asm can be marked readonly if it meets the following conditions:
// - it doesn't have any sideeffects
@@ -102,12 +330,104 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
// in addition to meeting the conditions listed above.
bool readOnly = true, readNone = true;
- if (s.getNumInputs() != 0 || s.getNumOutputs() != 0) {
+ if (s.getNumInputs() != 0) {
assert(!cir::MissingFeatures::asmInputOperands());
- assert(!cir::MissingFeatures::asmOutputOperands());
- cgm.errorNYI(s.getAsmLoc(), "asm with operands");
+ cgm.errorNYI(srcLoc, "asm with input operands");
}
+ for (unsigned i = 0, e = s.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &info = outputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string outputConstraint(s.getOutputConstraint(i));
+ outputConstraint =
+ simplifyConstraint(outputConstraint.c_str() + 1, getTarget());
+
+ const Expr *outExpr = s.getOutputExpr(i);
+ outExpr = outExpr->IgnoreParenNoopCasts(getContext());
+
+ std::string gccReg;
+ outputConstraint =
+ addVariableConstraints(outputConstraint, *outExpr, getTarget(), cgm, s,
+ info.earlyClobber(), &gccReg);
+
+ // Give an error on multiple outputs to same physreg.
+ if (!gccReg.empty() && !physRegOutputs.insert(gccReg).second)
+ cgm.error(srcLoc, "multiple outputs to hard register: " + gccReg);
+
+ outputConstraints.push_back(outputConstraint);
+ LValue dest = emitLValue(outExpr);
+
+ if (!constraints.empty())
+ constraints += ',';
+
+ // If this is a register output, then make the inline a sm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ QualType qty = outExpr->getType();
+ const bool isScalarOrAggregate =
+ hasScalarEvaluationKind(qty) || hasAggregateEvaluationKind(qty);
+ if (!info.allowsMemory() && isScalarOrAggregate) {
+ constraints += "=" + outputConstraint;
+ resultRegQualTys.push_back(qty);
+ resultRegDests.push_back(dest);
+
+ bool isFlagReg = llvm::StringRef(outputConstraint).starts_with("{@cc");
+ resultRegIsFlagReg.push_back(isFlagReg);
+
+ mlir::Type ty = convertTypeForMem(qty);
+ const bool requiresCast =
+ info.allowsRegister() &&
+ (cgm.getTargetCIRGenInfo().isScalarizableAsmOperand(*this, ty) ||
+ isa<cir::RecordType, cir::ArrayType>(ty));
+
+ resultTruncRegTypes.push_back(ty);
+ resultTypeRequiresCast.push_back(requiresCast);
+
+ if (requiresCast) {
+ unsigned size = getContext().getTypeSize(qty);
+ ty = cir::IntType::get(&getMLIRContext(), size, false);
+ }
+
+ resultRegTypes.push_back(ty);
+
+ if (info.hasMatchingInput())
+ assert(!cir::MissingFeatures::asmInputOperands());
+
+ if (mlir::Type adjTy = cgm.getTargetCIRGenInfo().adjustInlineAsmType(
+ *this, outputConstraint, resultRegTypes.back()))
+ resultRegTypes.back() = adjTy;
+ else
+ cgm.getDiags().Report(srcLoc, diag::err_asm_invalid_type_in_input)
+ << outExpr->getType() << outputConstraint;
+
+ // Update largest vector width for any vector types.
+ assert(!cir::MissingFeatures::asmVectorType());
+ } else {
+ Address destAddr = dest.getAddress();
+
+ // Matrix types in memory are represented by arrays, but accessed through
+ // vector pointers, with the alignment specified on the access operation.
+ // For inline assembly, update pointer arguments to use vector pointers.
+ // Otherwise there will be a mis-match if the matrix is also an
+ // input-argument which is represented as vector.
+ if (isa<MatrixType>(outExpr->getType().getCanonicalType()))
+ destAddr =
+ destAddr.withElementType(builder, convertType(outExpr->getType()));
+
+ argTypes.push_back(destAddr.getType());
+ argElemTypes.push_back(destAddr.getElementType());
+ outArgs.push_back(destAddr.getPointer());
+ args.push_back(destAddr.getPointer());
+ constraints += "=*";
+ constraints += outputConstraint;
+ readOnly = readNone = false;
+ }
+
+ if (info.isReadWrite())
+ assert(!cir::MissingFeatures::asmInputOperands());
+
+ } // iterate over output operands
+
bool hasUnwindClobber = false;
collectClobbers(*this, s, constraints, hasUnwindClobber, readOnly, readNone);
@@ -115,11 +435,20 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
mlir::Type resultType;
+ if (resultRegTypes.size() == 1) {
+ resultType = resultRegTypes[0];
+ } else if (resultRegTypes.size() > 1) {
+ std::string sname = builder.getUniqueAnonRecordName();
+ resultType =
+ builder.getCompleteRecordTy(resultRegTypes, sname, false, false);
+ }
bool hasSideEffect = s.isVolatile() || s.getNumOutputs() == 0;
+ std::vector<mlir::Value> regResults;
+
cir::InlineAsmOp ia = builder.create<cir::InlineAsmOp>(
- getLoc(s.getAsmLoc()), resultType, operands, asmString, constraints,
- hasSideEffect, inferFlavor(cgm, s), mlir::ArrayAttr());
+ loc, resultType, operands, asmString, constraints, hasSideEffect,
+ inferFlavor(cgm, s), mlir::ArrayAttr());
if (isGCCAsmGoto) {
assert(!cir::MissingFeatures::asmGoto());
@@ -127,10 +456,56 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
assert(!cir::MissingFeatures::asmUnwindClobber());
} else {
assert(!cir::MissingFeatures::asmMemoryEffects());
+
+ mlir::Value result;
+ if (ia.getNumResults())
+ result = ia.getResult(0);
+
+ llvm::SmallVector<mlir::Attribute> operandAttrs;
+
+ int i = 0;
+ for (auto typ : argElemTypes) {
+ if (typ) {
+ auto op = args[i++];
+ assert(mlir::isa<cir::PointerType>(op.getType()) &&
+ "pointer type expected");
+ assert(cast<cir::PointerType>(op.getType()).getPointee() == typ &&
+ "element type differs from pointee type!");
+
+ operandAttrs.push_back(mlir::UnitAttr::get(&getMLIRContext()));
+ } else {
+ // We need to add an attribute for every arg since later, during
+ // the lowering to LLVM IR the attributes will be assigned to the
+ // CallInsn argument by index, i.e. we can't skip null type here
+ operandAttrs.push_back(mlir::Attribute());
+ }
+ }
+ assert(args.size() == operandAttrs.size() &&
+ "The number of attributes is not even with the number of operands");
+
+ ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+
+ if (resultRegTypes.size() == 1) {
+ regResults.push_back(result);
+ } else if (resultRegTypes.size() > 1) {
+ CharUnits alignment = CharUnits::One();
+ mlir::StringAttr sname = cast<cir::RecordType>(resultType).getName();
+ mlir::Value dest = emitAlloca(sname, resultType, loc, alignment, false);
+ Address addr = Address(dest, alignment);
+ builder.createStore(loc, result, addr);
+
+ for (unsigned i = 0, e = resultRegTypes.size(); i != e; ++i) {
+ cir::PointerType typ = builder.getPointerTo(resultRegTypes[i]);
+ cir::GetMemberOp ptr = builder.createGetMember(loc, typ, dest, "", i);
+ cir::LoadOp tmp = builder.createLoad(loc, Address(ptr, alignment));
+ regResults.push_back(tmp);
+ }
+ }
}
- llvm::SmallVector<mlir::Attribute> operandAttrs;
- ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+ emitAsmStores(*this, s, regResults, resultRegTypes, resultTruncRegTypes,
+ resultRegDests, resultRegQualTys, resultTypeRequiresCast,
+ resultRegIsFlagReg);
return mlir::success();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index d5296881540aa..8e1579810e147 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -2158,3 +2158,23 @@ DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc,
llvm::StringRef feature) {
return errorNYI(loc.getBegin(), feature) << loc;
}
+
+void CIRGenModule::error(SourceLocation loc, StringRef error) {
+ unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+ getDiags().Report(astContext.getFullLoc(loc), diagID) << error;
+}
+
+/// Print out an error that codegen doesn't support the specified stmt yet.
+void CIRGenModule::errorUnsupported(const Stmt *s, llvm::StringRef type) {
+ unsigned diagId = diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ diags.Report(astContext.getFullLoc(s->getBeginLoc()), diagId)
+ << type << s->getSourceRange();
+}
+
+/// Print out an error that codegen doesn't support the specified decl yet.
+void CIRGenModule::errorUnsupported(const Decl *d, llvm::StringRef type) {
+ unsigned diagId = diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ diags.Report(astContext.getFullLoc(d->getLocation()), diagId) << type;
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 06cc3e09e416b..e8fd8fcb59689 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -437,6 +437,15 @@ class CIRGenModule : public CIRGenTypeCache...
[truncated]
|
@llvm/pr-subscribers-clangir Author: Iris Shi (el-ev) Changes
https://github.com/llvm/clangir/blob/main/clang/lib/CIR/CodeGen/CIRAsm.cpp Patch is 23.52 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/154014.diff 6 Files Affected:
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index ebf57246ba0b9..042190201b8f2 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -179,9 +179,10 @@ struct MissingFeatures {
static bool asmGoto() { return false; }
static bool asmInputOperands() { return false; }
static bool asmLabelAttr() { return false; }
+ static bool asmLLVMAssume() { return false; }
static bool asmMemoryEffects() { return false; }
- static bool asmOutputOperands() { return false; }
static bool asmUnwindClobber() { return false; }
+ static bool asmVectorType() { return false; }
static bool assignMemcpyizer() { return false; }
static bool astVarDeclInterface() { return false; }
static bool attributeBuiltin() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
index 17dffb3515d2a..34e45ff64bea9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
@@ -10,6 +10,8 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/DiagnosticSema.h"
+
#include "CIRGenFunction.h"
#include "clang/CIR/MissingFeatures.h"
@@ -26,6 +28,98 @@ static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &s) {
return isa<MSAsmStmt>(&s) ? AsmFlavor::x86_intel : gnuAsmFlavor;
}
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+static std::string simplifyConstraint(
+ const char *constraint, const TargetInfo &target,
+ SmallVectorImpl<TargetInfo::ConstraintInfo> *outCons = nullptr) {
+ std::string result;
+
+ while (*constraint) {
+ switch (*constraint) {
+ default:
+ result += target.convertConstraint(constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ case '=': // Will see this and the following in mult-alt constraints.
+ case '+':
+ break;
+ case '#': // Ignore the rest of the constraint alternative.
+ while (constraint[1] && constraint[1] != ',')
+ constraint++;
+ break;
+ case '&':
+ case '%':
+ result += *constraint;
+ while (constraint[1] && constraint[1] == *constraint)
+ constraint++;
+ break;
+ case ',':
+ result += "|";
+ break;
+ case 'g':
+ result += "imr";
+ break;
+ case '[': {
+ assert(outCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned index;
+ bool resolveResult =
+ target.resolveSymbolicName(constraint, *outCons, index);
+ assert(resolveResult && "Could not resolve symbolic name");
+ (void)resolveResult;
+ result += llvm::utostr(index);
+ break;
+ }
+ }
+
+ constraint++;
+ }
+
+ return result;
+}
+
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+/// Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+addVariableConstraints(const std::string &constraint, const Expr &asmExpr,
+ const TargetInfo &target, CIRGenModule &cgm,
+ const AsmStmt &stmt, const bool earlyClobber,
+ std::string *gccReg = nullptr) {
+ const DeclRefExpr *asmDeclRef = dyn_cast<DeclRefExpr>(&asmExpr);
+ if (!asmDeclRef)
+ return constraint;
+ const ValueDecl &value = *asmDeclRef->getDecl();
+ const VarDecl *variable = dyn_cast<VarDecl>(&value);
+ if (!variable)
+ return constraint;
+ if (variable->getStorageClass() != SC_Register)
+ return constraint;
+ AsmLabelAttr *attr = variable->getAttr<AsmLabelAttr>();
+ if (!attr)
+ return constraint;
+ StringRef registerName = attr->getLabel();
+ assert(target.isValidGCCRegisterName(registerName));
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo info(constraint, "");
+ if (target.validateOutputConstraint(info) && !info.allowsRegister()) {
+ cgm.errorUnsupported(&stmt, "__asm__");
+ return constraint;
+ }
+ // Canonicalize the register here before returning it.
+ registerName = target.getNormalizedGCCRegisterName(registerName);
+ if (gccReg != nullptr)
+ *gccReg = registerName.str();
+ return (earlyClobber ? "&{" : "{") + registerName.str() + "}";
+}
+
static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &s,
std::string &constraints, bool &hasUnwindClobber,
bool &readOnly, bool readNone) {
@@ -83,16 +177,150 @@ static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &s,
}
}
+using ConstraintInfos = SmallVector<TargetInfo::ConstraintInfo, 4>;
+
+static void collectInOutConstraintInfos(const CIRGenFunction &cgf,
+ const AsmStmt &s, ConstraintInfos &out,
+ ConstraintInfos &in) {
+
+ for (unsigned i = 0, e = s.getNumOutputs(); i != e; i++) {
+ StringRef name;
+ if (const GCCAsmStmt *gas = dyn_cast<GCCAsmStmt>(&s))
+ name = gas->getOutputName(i);
+ TargetInfo::ConstraintInfo info(s.getOutputConstraint(i), name);
+ bool isValid = cgf.getTarget().validateOutputConstraint(info);
+ (void)isValid;
+ assert(isValid && "Failed to parse output constraint");
+ out.push_back(info);
+ }
+
+ for (unsigned i = 0, e = s.getNumInputs(); i != e; i++) {
+ StringRef name;
+ if (const GCCAsmStmt *gas = dyn_cast<GCCAsmStmt>(&s))
+ name = gas->getInputName(i);
+ TargetInfo::ConstraintInfo info(s.getInputConstraint(i), name);
+ bool isValid = cgf.getTarget().validateInputConstraint(out, info);
+ assert(isValid && "Failed to parse input constraint");
+ (void)isValid;
+ in.push_back(info);
+ }
+}
+
+static void emitAsmStores(CIRGenFunction &cgf, const AsmStmt &s,
+ const llvm::ArrayRef<mlir::Value> regResults,
+ const llvm::ArrayRef<mlir::Type> resultRegTypes,
+ const llvm::ArrayRef<mlir::Type> resultTruncRegTypes,
+ const llvm::ArrayRef<LValue> resultRegDests,
+ const llvm::ArrayRef<QualType> resultRegQualTys,
+ const llvm::BitVector &resultTypeRequiresCast,
+ const llvm::BitVector &resultRegIsFlagReg) {
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ CIRGenModule &cgm = cgf.cgm;
+ mlir::MLIRContext *ctx = builder.getContext();
+
+ assert(regResults.size() == resultRegTypes.size());
+ assert(regResults.size() == resultTruncRegTypes.size());
+ assert(regResults.size() == resultRegDests.size());
+
+ // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
+ // in which case its size may grow.
+ assert(resultTypeRequiresCast.size() <= resultRegDests.size());
+ assert(resultRegIsFlagReg.size() <= resultRegDests.size());
+
+ for (unsigned i = 0, e = regResults.size(); i != e; ++i) {
+ mlir::Value tmp = regResults[i];
+ mlir::Type truncTy = resultTruncRegTypes[i];
+
+ if (i < resultRegIsFlagReg.size() && resultRegIsFlagReg[i])
+ assert(!cir::MissingFeatures::asmLLVMAssume());
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (resultRegTypes[i] != truncTy) {
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (mlir::isa<mlir::FloatType>(truncTy)) {
+ tmp = builder.createFloatingCast(tmp, truncTy);
+ } else if (isa<cir::PointerType>(truncTy) &&
+ isa<cir::IntType>(tmp.getType())) {
+ uint64_t resSize = cgm.getDataLayout().getTypeSizeInBits(truncTy);
+ tmp = builder.createIntCast(
+ tmp, cir::IntType::get(ctx, (unsigned)resSize, false));
+ tmp = builder.createIntToPtr(tmp, truncTy);
+ } else if (isa<cir::PointerType>(tmp.getType()) &&
+ isa<cir::IntType>(truncTy)) {
+ uint64_t tmpSize = cgm.getDataLayout().getTypeSizeInBits(tmp.getType());
+ tmp = builder.createPtrToInt(
+ tmp, cir::IntType::get(ctx, (unsigned)tmpSize, false));
+ tmp = builder.createIntCast(tmp, truncTy);
+ } else if (isa<cir::IntType>(truncTy)) {
+ tmp = builder.createIntCast(tmp, truncTy);
+ } else if (isa<cir::VectorType>(truncTy)) {
+ assert(!cir::MissingFeatures::asmVectorType());
+ }
+ }
+
+ LValue dest = resultRegDests[i];
+ // ResultTypeRequiresCast elements correspond to the first
+ // ResultTypeRequiresCast.size() elements of RegResults.
+ if ((i < resultTypeRequiresCast.size()) && resultTypeRequiresCast[i]) {
+ unsigned size = cgf.getContext().getTypeSize(resultRegQualTys[i]);
+ Address addr =
+ dest.getAddress().withElementType(builder, resultRegTypes[i]);
+ if (cgm.getTargetCIRGenInfo().isScalarizableAsmOperand(cgf, truncTy)) {
+ builder.createStore(cgf.getLoc(s.getAsmLoc()), tmp, addr);
+ continue;
+ }
+
+ QualType ty =
+ cgf.getContext().getIntTypeForBitwidth(size, /*Signed=*/false);
+ if (ty.isNull()) {
+ const Expr *outExpr = s.getOutputExpr(i);
+ cgm.getDiags().Report(outExpr->getExprLoc(),
+ diag::err_store_value_to_reg);
+ return;
+ }
+ dest = cgf.makeAddrLValue(addr, ty);
+ }
+
+ cgf.emitStoreThroughLValue(RValue::get(tmp), dest);
+ }
+}
+
mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
// Assemble the final asm string.
std::string asmString = s.generateAsmString(getContext());
+ SourceLocation srcLoc = s.getAsmLoc();
+ mlir::Location loc = getLoc(srcLoc);
+
+ // Get all the output and input constraints together.
+ ConstraintInfos outputConstraintInfos;
+ ConstraintInfos inputConstraintInfos;
+ collectInOutConstraintInfos(*this, s, outputConstraintInfos,
+ inputConstraintInfos);
bool isGCCAsmGoto = false;
std::string constraints;
+ std::vector<LValue> resultRegDests;
+ std::vector<QualType> resultRegQualTys;
+ std::vector<mlir::Type> resultRegTypes;
+ std::vector<mlir::Type> resultTruncRegTypes;
+ std::vector<mlir::Type> argTypes;
+ std::vector<mlir::Type> argElemTypes;
+ std::vector<mlir::Value> args;
std::vector<mlir::Value> outArgs;
std::vector<mlir::Value> inArgs;
std::vector<mlir::Value> inOutArgs;
+ llvm::BitVector resultTypeRequiresCast;
+ llvm::BitVector resultRegIsFlagReg;
+
+ // Keep track of out constraints for tied input operand.
+ std::vector<std::string> outputConstraints;
+
+ // Keep track of defined physregs.
+ llvm::SmallSet<std::string, 8> physRegOutputs;
// An inline asm can be marked readonly if it meets the following conditions:
// - it doesn't have any sideeffects
@@ -102,12 +330,104 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
// in addition to meeting the conditions listed above.
bool readOnly = true, readNone = true;
- if (s.getNumInputs() != 0 || s.getNumOutputs() != 0) {
+ if (s.getNumInputs() != 0) {
assert(!cir::MissingFeatures::asmInputOperands());
- assert(!cir::MissingFeatures::asmOutputOperands());
- cgm.errorNYI(s.getAsmLoc(), "asm with operands");
+ cgm.errorNYI(srcLoc, "asm with input operands");
}
+ for (unsigned i = 0, e = s.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &info = outputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string outputConstraint(s.getOutputConstraint(i));
+ outputConstraint =
+ simplifyConstraint(outputConstraint.c_str() + 1, getTarget());
+
+ const Expr *outExpr = s.getOutputExpr(i);
+ outExpr = outExpr->IgnoreParenNoopCasts(getContext());
+
+ std::string gccReg;
+ outputConstraint =
+ addVariableConstraints(outputConstraint, *outExpr, getTarget(), cgm, s,
+ info.earlyClobber(), &gccReg);
+
+ // Give an error on multiple outputs to same physreg.
+ if (!gccReg.empty() && !physRegOutputs.insert(gccReg).second)
+ cgm.error(srcLoc, "multiple outputs to hard register: " + gccReg);
+
+ outputConstraints.push_back(outputConstraint);
+ LValue dest = emitLValue(outExpr);
+
+ if (!constraints.empty())
+ constraints += ',';
+
+ // If this is a register output, then make the inline a sm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ QualType qty = outExpr->getType();
+ const bool isScalarOrAggregate =
+ hasScalarEvaluationKind(qty) || hasAggregateEvaluationKind(qty);
+ if (!info.allowsMemory() && isScalarOrAggregate) {
+ constraints += "=" + outputConstraint;
+ resultRegQualTys.push_back(qty);
+ resultRegDests.push_back(dest);
+
+ bool isFlagReg = llvm::StringRef(outputConstraint).starts_with("{@cc");
+ resultRegIsFlagReg.push_back(isFlagReg);
+
+ mlir::Type ty = convertTypeForMem(qty);
+ const bool requiresCast =
+ info.allowsRegister() &&
+ (cgm.getTargetCIRGenInfo().isScalarizableAsmOperand(*this, ty) ||
+ isa<cir::RecordType, cir::ArrayType>(ty));
+
+ resultTruncRegTypes.push_back(ty);
+ resultTypeRequiresCast.push_back(requiresCast);
+
+ if (requiresCast) {
+ unsigned size = getContext().getTypeSize(qty);
+ ty = cir::IntType::get(&getMLIRContext(), size, false);
+ }
+
+ resultRegTypes.push_back(ty);
+
+ if (info.hasMatchingInput())
+ assert(!cir::MissingFeatures::asmInputOperands());
+
+ if (mlir::Type adjTy = cgm.getTargetCIRGenInfo().adjustInlineAsmType(
+ *this, outputConstraint, resultRegTypes.back()))
+ resultRegTypes.back() = adjTy;
+ else
+ cgm.getDiags().Report(srcLoc, diag::err_asm_invalid_type_in_input)
+ << outExpr->getType() << outputConstraint;
+
+ // Update largest vector width for any vector types.
+ assert(!cir::MissingFeatures::asmVectorType());
+ } else {
+ Address destAddr = dest.getAddress();
+
+ // Matrix types in memory are represented by arrays, but accessed through
+ // vector pointers, with the alignment specified on the access operation.
+ // For inline assembly, update pointer arguments to use vector pointers.
+ // Otherwise there will be a mis-match if the matrix is also an
+ // input-argument which is represented as vector.
+ if (isa<MatrixType>(outExpr->getType().getCanonicalType()))
+ destAddr =
+ destAddr.withElementType(builder, convertType(outExpr->getType()));
+
+ argTypes.push_back(destAddr.getType());
+ argElemTypes.push_back(destAddr.getElementType());
+ outArgs.push_back(destAddr.getPointer());
+ args.push_back(destAddr.getPointer());
+ constraints += "=*";
+ constraints += outputConstraint;
+ readOnly = readNone = false;
+ }
+
+ if (info.isReadWrite())
+ assert(!cir::MissingFeatures::asmInputOperands());
+
+ } // iterate over output operands
+
bool hasUnwindClobber = false;
collectClobbers(*this, s, constraints, hasUnwindClobber, readOnly, readNone);
@@ -115,11 +435,20 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
mlir::Type resultType;
+ if (resultRegTypes.size() == 1) {
+ resultType = resultRegTypes[0];
+ } else if (resultRegTypes.size() > 1) {
+ std::string sname = builder.getUniqueAnonRecordName();
+ resultType =
+ builder.getCompleteRecordTy(resultRegTypes, sname, false, false);
+ }
bool hasSideEffect = s.isVolatile() || s.getNumOutputs() == 0;
+ std::vector<mlir::Value> regResults;
+
cir::InlineAsmOp ia = builder.create<cir::InlineAsmOp>(
- getLoc(s.getAsmLoc()), resultType, operands, asmString, constraints,
- hasSideEffect, inferFlavor(cgm, s), mlir::ArrayAttr());
+ loc, resultType, operands, asmString, constraints, hasSideEffect,
+ inferFlavor(cgm, s), mlir::ArrayAttr());
if (isGCCAsmGoto) {
assert(!cir::MissingFeatures::asmGoto());
@@ -127,10 +456,56 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
assert(!cir::MissingFeatures::asmUnwindClobber());
} else {
assert(!cir::MissingFeatures::asmMemoryEffects());
+
+ mlir::Value result;
+ if (ia.getNumResults())
+ result = ia.getResult(0);
+
+ llvm::SmallVector<mlir::Attribute> operandAttrs;
+
+ int i = 0;
+ for (auto typ : argElemTypes) {
+ if (typ) {
+ auto op = args[i++];
+ assert(mlir::isa<cir::PointerType>(op.getType()) &&
+ "pointer type expected");
+ assert(cast<cir::PointerType>(op.getType()).getPointee() == typ &&
+ "element type differs from pointee type!");
+
+ operandAttrs.push_back(mlir::UnitAttr::get(&getMLIRContext()));
+ } else {
+ // We need to add an attribute for every arg since later, during
+ // the lowering to LLVM IR the attributes will be assigned to the
+ // CallInsn argument by index, i.e. we can't skip null type here
+ operandAttrs.push_back(mlir::Attribute());
+ }
+ }
+ assert(args.size() == operandAttrs.size() &&
+ "The number of attributes is not even with the number of operands");
+
+ ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+
+ if (resultRegTypes.size() == 1) {
+ regResults.push_back(result);
+ } else if (resultRegTypes.size() > 1) {
+ CharUnits alignment = CharUnits::One();
+ mlir::StringAttr sname = cast<cir::RecordType>(resultType).getName();
+ mlir::Value dest = emitAlloca(sname, resultType, loc, alignment, false);
+ Address addr = Address(dest, alignment);
+ builder.createStore(loc, result, addr);
+
+ for (unsigned i = 0, e = resultRegTypes.size(); i != e; ++i) {
+ cir::PointerType typ = builder.getPointerTo(resultRegTypes[i]);
+ cir::GetMemberOp ptr = builder.createGetMember(loc, typ, dest, "", i);
+ cir::LoadOp tmp = builder.createLoad(loc, Address(ptr, alignment));
+ regResults.push_back(tmp);
+ }
+ }
}
- llvm::SmallVector<mlir::Attribute> operandAttrs;
- ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+ emitAsmStores(*this, s, regResults, resultRegTypes, resultTruncRegTypes,
+ resultRegDests, resultRegQualTys, resultTypeRequiresCast,
+ resultRegIsFlagReg);
return mlir::success();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index d5296881540aa..8e1579810e147 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -2158,3 +2158,23 @@ DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc,
llvm::StringRef feature) {
return errorNYI(loc.getBegin(), feature) << loc;
}
+
+void CIRGenModule::error(SourceLocation loc, StringRef error) {
+ unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+ getDiags().Report(astContext.getFullLoc(loc), diagID) << error;
+}
+
+/// Print out an error that codegen doesn't support the specified stmt yet.
+void CIRGenModule::errorUnsupported(const Stmt *s, llvm::StringRef type) {
+ unsigned diagId = diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ diags.Report(astContext.getFullLoc(s->getBeginLoc()), diagId)
+ << type << s->getSourceRange();
+}
+
+/// Print out an error that codegen doesn't support the specified decl yet.
+void CIRGenModule::errorUnsupported(const Decl *d, llvm::StringRef type) {
+ unsigned diagId = diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ diags.Report(astContext.getFullLoc(d->getLocation()), diagId) << type;
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 06cc3e09e416b..e8fd8fcb59689 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -437,6 +437,15 @@ class CIRGenModule : public CIRGenTypeCache...
[truncated]
|
clang/lib/CIR/CodeGen/CIRGenAsm.cpp
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I didn't find a suitable header to place those functions.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe put the declaration in clang/include/clang/Basic/TargetInfo.h
and implementation in ./lib/Basic/TargetInfo.cpp
?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I would suggest a separate change that just moves the code and is presented as an NFC change in clang with an explanation as to why we want to move it. This code doesn't change often, but it's so specialized that I wouldn't want it to be duplicated.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Works for me!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What about addVariableConstraints
? It doesn't doesn't belong in
TargetInfo.h
because of the dependency on clang/AST/Expr.h
and clang/AST/Stmt.h
. And it would require a callback for error reporting as the module differs (CodeGenModule
vs CIRGenModule
). I recommend keeping it as is.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@el-ev this is probably a subject for your next NFC PR (as pointed by @andykaylor), let's try to wrap up this one first.
it would require a callback for error reporting
There are other ways, e.g. you can change the function to return a llvm::Expected<std::string>
and emit the error at the callsite instead.
d200c89
to
775b7a5
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nice, thanks for adding inline asm support.
Are the tests exercising all different handling of these characters? The PR is a bit big, so I'm curious. I'm not sure how to split this functionality in smaller PRs though, it does require a lot of helpers to get done - perhaps emitAsmStmt
can be further divided so it's easier to read (that's probably the biggest)?
clang/lib/CIR/CodeGen/CIRGenAsm.cpp
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe put the declaration in clang/include/clang/Basic/TargetInfo.h
and implementation in ./lib/Basic/TargetInfo.cpp
?
775b7a5
to
53257ac
Compare
clang/lib/CIR/CodeGen/CIRGenAsm.cpp
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I would suggest a separate change that just moves the code and is presented as an NFC change in clang with an explanation as to why we want to move it. This code doesn't change often, but it's so specialized that I wouldn't want it to be duplicated.
// FIXME(cir): This should be a common helper between CIRGen | ||
// and traditional CodeGen | ||
static std::string simplifyConstraint( | ||
const char *constraint, const TargetInfo &target, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This code relies on the assumption that constraint
is zero-terminated. That's guaranteed by the fact that the call site is using std::string::c_str() to get the pointer, but in general it's not a safe assumption, so it might be a good idea to change the interface to take a std::string reference argument when you move the code.
} | ||
|
||
// FIXME(cir): This should be a common helper between CIRGen | ||
// and traditional CodeGen |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please pick one comment style for the whole block of comments, either //
or ///
@el-ev Do you intend to continue working on the inline assembly upstreaming? This PR got stalled waiting for the change to share the constraint handling, but I think if you can update it, this should be able to make progress now. |
https://github.com/llvm/clangir/blob/main/clang/lib/CIR/CodeGen/CIRAsm.cpp