Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
6542e7e
Squashed 'external/llvm-project/' changes from 3332d18c2507..e8215edf…
umangyadav Jun 24, 2025
ea8c92c
Merge commit '6542e7e6b4744737fae5e3abbfe720bb1fc4786b' into upstream…
umangyadav Jun 24, 2025
3cfa54d
Fixes for external
umangyadav Jun 24, 2025
8de7cfc
[External] Remove files that no longer exists in external
umangyadav Jun 24, 2025
97602db
[External] Remove files that no longer exists in external
umangyadav Jun 24, 2025
d7237d5
[External] Some more fixes
umangyadav Jun 24, 2025
1ae19b9
Apply rocMLIR patches for external
umangyadav Jun 24, 2025
7d04ae0
Remove disabled verification tests
umangyadav Jun 24, 2025
ddd649a
Fixes for https://github.com/llvm/llvm-project/pull/144636
umangyadav Jun 24, 2025
15e6427
Fixes for rocMLIR
umangyadav Jun 24, 2025
289459b
Fixes required due to https://github.com/llvm/llvm-project/pull/144636
umangyadav Jun 24, 2025
f905655
Changes in rocMLIR due to https://github.com/llvm/llvm-project/pull/1…
umangyadav Jun 24, 2025
23cbcfb
Fix runner-pipelines.mlir test
umangyadav Jun 24, 2025
31e77d3
Update librockcompiler_deps
umangyadav Jun 24, 2025
2ccc1bb
Formating
umangyadav Jun 24, 2025
3138a41
Enable Navi4x in CI
umangyadav Jun 24, 2025
7f30da1
Fix parameterSweeps
umangyadav Jun 24, 2025
d7f9b54
Fix parameterSweeps scripts
umangyadav Jun 25, 2025
e1b3a47
Address review comments, Revert changes for Navi4x in Jenkinsfile and…
umangyadav Jun 25, 2025
4a8c3ab
Merge branch 'develop' into upstream_merge_55
umangyadav Jun 25, 2025
966a9b3
Merge branch 'develop' into upstream_merge_55
umangyadav Jun 26, 2025
74eb560
Merge branch 'develop' into upstream_merge_55
dorde-antic Jul 2, 2025
7a50d7f
Merge branch 'develop' into upstream_merge_55
dorde-antic Jul 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,13 @@

#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h"
#include "mlir/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MHAL/IR/MHAL.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
Expand Down Expand Up @@ -124,11 +126,13 @@ struct LaunchOpInterface
Type returnType = returnVal.getType();
if (isa<TensorType>(returnType)) {
assert(returnType == callResultTypes[funcResultIdx++]);
FailureOr<BaseMemRefType> memrefType =
FailureOr<BufferLikeType> bufferType =
bufferization::getBufferType(returnVal, options, state);
if (failed(memrefType))
if (failed(bufferType))
return failure();
resultTypes.push_back(*memrefType);
assert(isa<BaseMemRefType>(*bufferType) && "expected memref type");
BaseMemRefType memrefType = cast<BaseMemRefType>(*bufferType);
resultTypes.push_back(memrefType);
} else {
// Non-tensor values are returned.
resultTypes.push_back(returnType);
Expand Down
12 changes: 6 additions & 6 deletions mlir/lib/Conversion/EmulateFp8ExtTrunc/EmulateFp8ExtTrunc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -311,14 +311,14 @@ static FlatSymbolRefAttr makeFp8TruncFunction(Location loc, FloatType outType,
Value cmp = b.create<CmpIOp>(CmpIPredicate::eq, and4, infNanConst);

Block *notInfNan = func.addBlock();
Value outNan = b.create<ConstantFloatOp>(APFloat::getQNaN(outSem), outType);
Value outNan = b.create<ConstantFloatOp>(outType, APFloat::getQNaN(outSem));
b.create<cf::CondBranchOp>(cmp, ret, ValueRange{outNan}, notInfNan,
ValueRange{});
b.setInsertionPointToStart(notInfNan);

// A deviation from the MIGraphX: denormals are zero here
Value cmp5 = b.create<CmpIOp>(CmpIPredicate::eq, and2, i32Const(0));
Value outZero = b.create<ConstantFloatOp>(APFloat::getZero(outSem), outType);
Value outZero = b.create<ConstantFloatOp>(outType, APFloat::getZero(outSem));
Block *notZero = func.addBlock();
b.create<cf::CondBranchOp>(cmp5, ret, ValueRange{outZero}, notZero,
ValueRange{});
Expand Down Expand Up @@ -366,7 +366,7 @@ static FlatSymbolRefAttr makeFp8TruncFunction(Location loc, FloatType outType,
Value cmp57 = b.create<CmpIOp>(CmpIPredicate::ne, sub43, i32Const(0));
Value and58 = b.create<AndIOp>(add56, i32Const(1 << 23));
Value tobool59Not = b.create<CmpIOp>(CmpIPredicate::eq, and58, i32Const(0));
Value trueConst = b.create<ConstantIntOp>(true, 1);
Value trueConst = b.create<ConstantIntOp>(true, /*width=*/1);
Value brCond133 = b.create<SelectOp>(cmp57, trueConst, tobool59Not);

Block *ifElse61 = func.addBlock();
Expand All @@ -392,7 +392,7 @@ static FlatSymbolRefAttr makeFp8TruncFunction(Location loc, FloatType outType,
b.setInsertionPointToStart(ifThen70);
Value ir5 = b.create<TruncIOp>(i8, ir1);
Value conv =
b.create<OrIOp>(ir5, b.create<ConstantIntOp>(127, b.getI8Type()));
b.create<OrIOp>(ir5, b.create<ConstantIntOp>(b.getI8Type(), 127));
Value convOut = b.create<BitcastOp>(outType, conv);
b.create<cf::BranchOp>(ret, convOut);

Expand All @@ -402,7 +402,7 @@ static FlatSymbolRefAttr makeFp8TruncFunction(Location loc, FloatType outType,
Value cmp72 = b.create<CmpIOp>(CmpIPredicate::eq, f8Exponent0, i32Const(0));
Value cmp74 = b.create<CmpIOp>(CmpIPredicate::ult, mantissa1,
i32Const(1 << (16 + eBits)));
Value falseConst = b.create<ConstantIntOp>(false, 1);
Value falseConst = b.create<ConstantIntOp>(false, /*width=*/1);
Value brCond = b.create<SelectOp>(cmp72, cmp74, falseConst);
b.create<cf::CondBranchOp>(brCond, ret, ValueRange{outZero}, ifEnd76,
ValueRange{f8Exponent0, mantissa1});
Expand Down Expand Up @@ -735,7 +735,7 @@ void Fp8TruncToCallPattern::rewrite(TruncFOp op, OpAdaptor adaptor,
Value rets = rewriter.createOrFold<vector::SplatOp>(
loc,
rewriter.createOrFold<ConstantFloatOp>(
loc, APFloat::getZero(outElemType.getFloatSemantics()), outElemType),
loc, outElemType, APFloat::getZero(outElemType.getFloatSemantics())),
retVecType);
SmallVector<int64_t> strides = computeStrides(inVecType.getShape());
for (int64_t i = 0, e = inVecType.getNumElements(); i < e; ++i) {
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Rock/Transforms/SugarToLoops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -916,7 +916,7 @@ static Value getConstIntOrIndexValue(OpBuilder &b, Location loc, int64_t value,
if (isa<IndexType>(type)) {
return b.create<ConstantIndexOp>(loc, value);
}
return b.create<ConstantIntOp>(loc, value, type);
return b.create<ConstantIntOp>(loc, type, value);
}

// Manually flatten a set of coordinates into a single address
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Rock/utility/builderUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ Value getAsTensor(OpBuilder &builder, Location loc, mlir::Value value,
bool isWritable) {
constexpr bool isRestrict{true};
Value origTensor = builder.create<bufferization::ToTensorOp>(
loc, value, isRestrict, isWritable);
loc, value.getType(), value, isRestrict, isWritable);
return origTensor;
}

Expand Down
11 changes: 7 additions & 4 deletions mlir/tools/rocmlir-gen/rocmlir-gen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3280,7 +3280,8 @@ createCpuConvElementwiseGemmKernelWithMlir(ModuleOp module,
bool isWritable = false) {
constexpr bool isRestrict{true};
Value flatTensor = builder.create<bufferization::ToTensorOp>(
loc, block->getArgument(blockArgIndex), isRestrict, isWritable);
loc, block->getArgument(blockArgIndex).getType(),
block->getArgument(blockArgIndex), isRestrict, isWritable);
ArrayRef<int64_t> origShape =
cast<ShapedType>(argTypes[blockArgIndex]).getShape();

Expand Down Expand Up @@ -3459,7 +3460,8 @@ createCpuGemmElementwiseGemmKernelWithMlir(ModuleOp module,
bool isWritable = false) {
constexpr bool isRestrict{true};
Value flatTensor = builder.create<bufferization::ToTensorOp>(
loc, block->getArgument(blockArgIndex), isRestrict, isWritable);
loc, block->getArgument(blockArgIndex).getType(),
block->getArgument(blockArgIndex), isRestrict, isWritable);
ArrayRef<int64_t> origShape =
cast<ShapedType>(argTypes[blockArgIndex]).getShape();

Expand Down Expand Up @@ -3574,7 +3576,8 @@ static func::FuncOp createCpuAttentionKernelWithMlir(ModuleOp module,
bool isWritable = false) {
constexpr bool isRestrict{true};
Value flatTensor = builder.create<bufferization::ToTensorOp>(
loc, block->getArgument(blockArgIndex), isRestrict, isWritable);
loc, block->getArgument(blockArgIndex).getType(),
block->getArgument(blockArgIndex), isRestrict, isWritable);
ArrayRef<int64_t> origShape =
cast<ShapedType>(argTypes[blockArgIndex]).getShape();

Expand Down Expand Up @@ -3935,7 +3938,7 @@ static func::FuncOp createVerifierFunc(ModuleOp module, const KernelIF &kernel,
char printDebug = static_cast<char>(printVerifyResults.getValue());

auto printDebugVal =
b.create<arith::ConstantIntOp>(loc, printDebug, charType);
b.create<arith::ConstantIntOp>(loc, charType, printDebug);

// obtain function name of the verifier wrapper
std::string verifyFuncName = "mcpuVerify";
Expand Down