Skip to content

Commit 1ab3771

Browse files
committed
catch up to f8 llvm change
1 parent 53ce20b commit 1ab3771

File tree

11 files changed

+65
-47
lines changed

11 files changed

+65
-47
lines changed

include/triton/Conversion/MLIRTypes.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,15 +28,17 @@ inline Type bf16Ty(MLIRContext *ctx) { return BFloat16Type::get(ctx); }
2828

2929
inline bool isFloat(Type type) {
3030
return type.isF32() || type.isF64() || type.isF16() || type.isF128() ||
31-
type.isBF16() || type.isFloat8E4M3B11FNUZ() || type.isFloat8E4M3FN() ||
32-
type.isFloat8E4M3FNUZ() || type.isFloat8E5M2() ||
33-
type.isFloat8E5M2FNUZ();
31+
type.isBF16() || llvm::isa<Float8E4M3B11FNUZType>(type) ||
32+
llvm::isa<Float8E4M3FNType>(type) ||
33+
llvm::isa<Float8E4M3FNUZType>(type) ||
34+
llvm::isa<Float8E5M2Type>(type) || llvm::isa<Float8E5M2FNUZType>(type);
3435
}
3536

3637
inline bool isFloat8(Type type) {
37-
return type.isFloat8E4M3B11FNUZ() || type.isFloat8E4M3FN() ||
38-
type.isFloat8E4M3FNUZ() || type.isFloat8E5M2() ||
39-
type.isFloat8E5M2FNUZ();
38+
return llvm::isa<Float8E4M3B11FNUZType>(type) ||
39+
llvm::isa<Float8E4M3FNType>(type) ||
40+
llvm::isa<Float8E4M3FNUZType>(type) ||
41+
llvm::isa<Float8E5M2Type>(type) || llvm::isa<Float8E5M2FNUZType>(type);
4042
}
4143

4244
inline bool isInt(Type type) { return type.isIntOrFloat() && !isFloat(type); }

lib/Analysis/Utility.cpp

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -732,14 +732,15 @@ bool supportMMA(triton::DotOp op, int version) {
732732
return false;
733733
if (!(numWarps % 4 == 0 && retShapePerCTA[rank - 2] % 64 == 0 &&
734734
retShapePerCTA[rank - 1] % 8 == 0 &&
735-
(aElemTy.isFloat8E5M2() || aElemTy.isFloat8E4M3FN() ||
736-
aElemTy.isInteger(8) || aElemTy.isF16() || aElemTy.isBF16() ||
737-
aElemTy.isF32()))) {
735+
(llvm::isa<Float8E5M2Type>(aElemTy) ||
736+
llvm::isa<Float8E4M3FNType>(aElemTy) || aElemTy.isInteger(8) ||
737+
aElemTy.isF16() || aElemTy.isBF16() || aElemTy.isF32()))) {
738738
return false;
739739
}
740740
// We cannot use MMA_V3 if we need to accumulate in F32 within the MMA op.
741741
if (op.getMaxNumImpreciseAcc() < 32 &&
742-
(aElemTy.isFloat8E5M2() || aElemTy.isFloat8E4M3FN()) &&
742+
(llvm::isa<Float8E5M2Type>(aElemTy) ||
743+
llvm::isa<Float8E4M3FNType>(aElemTy)) &&
743744
cast<RankedTensorType>(op.getType()).getElementType().isF32()) {
744745
return false;
745746
}
@@ -760,8 +761,10 @@ bool supportMMA(Value value, int version) {
760761
cast<triton::gpu::TensorOrMemDesc>(value.getType()).getElementType();
761762
// FP8 is not natively supported on all mma versions but it can always be
762763
// promoted to fp16 therefore we can always support it.
763-
bool isFP8 = elemTy.isFloat8E5M2() || elemTy.isFloat8E4M3FN() ||
764-
elemTy.isFloat8E5M2FNUZ() || elemTy.isFloat8E4M3FNUZ();
764+
bool isFP8 = llvm::isa<Float8E5M2Type>(elemTy) ||
765+
llvm::isa<Float8E4M3FNType>(elemTy) ||
766+
llvm::isa<Float8E5M2FNUZType>(elemTy) ||
767+
llvm::isa<Float8E4M3FNUZType>(elemTy);
765768
return isFP8 || elemTy.isF16() || elemTy.isBF16() ||
766769
(elemTy.isF32() && version >= 2) ||
767770
(elemTy.isInteger(8) && version >= 2);

lib/Dialect/TritonGPU/Transforms/AccelerateMatmul.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ static void decomposeMixedModeDotOp(ModuleOp mod, int computeCapability) {
344344
NvidiaMmaEncodingAttr mmaLayout =
345345
dyn_cast<NvidiaMmaEncodingAttr>(D.getType().getEncoding());
346346
if (mmaLayout) {
347-
bool isNativeFP8 = AElType.isFloat8E5M2() || AElType.isFloat8E4M3FN();
347+
bool isNativeFP8 = llvm::isa<Float8E5M2Type>(AElType) || llvm::isa<Float8E4M3FNType>(AElType);
348348
// promote operands for sm < 89 since fp8 mma is not natively supported
349349
// promote operands for sm >= 90 when mma is not v3
350350
if (!isNativeFP8 ||

lib/Dialect/TritonGPU/Transforms/Utility.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,10 @@ SmallVector<unsigned, 3> mmaVersionToInstrShape(int version,
4444
SmallVector<unsigned> validN;
4545

4646
// MMAv3 with larger instruction shape is preferred.
47-
if (eltType.isFloat8E5M2() || eltType.isFloat8E4M3FN() ||
48-
eltType.isFloat8E4M3FNUZ() || eltType.isF16() || eltType.isBF16() ||
49-
eltType.isF32()) {
47+
if (llvm::isa<Float8E5M2Type>(eltType) ||
48+
llvm::isa<Float8E4M3FNType>(eltType) ||
49+
llvm::isa<Float8E4M3FNUZType>(eltType) || eltType.isF16() ||
50+
eltType.isBF16() || eltType.isF32()) {
5051
validN.assign({256, 248, 240, 232, 224, 216, 208, 200, 192, 184, 176,
5152
168, 160, 152, 144, 136, 128, 120, 112, 104, 96, 88,
5253
80, 72, 64, 56, 48, 40, 32, 24, 16, 8});

lib/Dialect/TritonNvidiaGPU/IR/Ops.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,10 @@ bool WarpGroupDotOp::needsPartialAccumulator() {
7777
const auto &d = getD();
7878
auto aTensorTy = cast<triton::gpu::TensorOrMemDesc>(a.getType());
7979
auto aElTy = cast<triton::gpu::TensorOrMemDesc>(a.getType()).getElementType();
80-
bool isFP8 = aElTy.isFloat8E5M2() || aElTy.isFloat8E4M3FN() ||
81-
aElTy.isFloat8E5M2FNUZ() || aElTy.isFloat8E4M3FNUZ();
80+
bool isFP8 = llvm::isa<Float8E5M2Type>(aElTy) ||
81+
llvm::isa<Float8E4M3FNType>(aElTy) ||
82+
llvm::isa<Float8E5M2FNUZType>(aElTy) ||
83+
llvm::isa<Float8E4M3FNUZType>(aElTy);
8284
bool accFP32 =
8385
cast<triton::gpu::TensorOrMemDesc>(d.getType()).getElementType().isF32();
8486
uint32_t maxNumImpreciseAcc = getMaxNumImpreciseAcc();

third_party/amd/lib/TritonAMDGPUToLLVM/ElementwiseOpToLLVM.cpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1106,17 +1106,19 @@ struct FpToFpOpConversion
11061106
return outVals;
11071107
}
11081108
size_t numElements = 4;
1109-
if (srcElementType.isFloat8E4M3FN() || dstElementType.isFloat8E4M3FN() ||
1110-
srcElementType.isFloat8E4M3FNUZ() ||
1111-
dstElementType.isFloat8E4M3FNUZ() ||
1112-
srcElementType.isFloat8E5M2FNUZ() ||
1113-
dstElementType.isFloat8E5M2FNUZ()) {
1109+
if (llvm::isa<Float8E4M3FNType>(srcElementType) ||
1110+
llvm::isa<Float8E4M3FNType>(dstElementType) ||
1111+
llvm::isa<Float8E4M3FNUZType>(srcElementType) ||
1112+
llvm::isa<Float8E4M3FNUZType>(dstElementType) ||
1113+
llvm::isa<Float8E5M2FNUZType>(srcElementType) ||
1114+
llvm::isa<Float8E5M2FNUZType>(dstElementType)) {
11141115
numElements = 2;
11151116
}
11161117
bool useFP16IntermediateSrc =
1117-
srcElementType.isF32() && !(isaFamily == AMD::ISAFamily::CDNA3 &&
1118-
(dstElementType.isFloat8E4M3FNUZ() ||
1119-
dstElementType.isFloat8E5M2FNUZ()));
1118+
srcElementType.isF32() &&
1119+
!(isaFamily == AMD::ISAFamily::CDNA3 &&
1120+
(llvm::isa<Float8E4M3FNUZType>(dstElementType) ||
1121+
llvm::isa<Float8E5M2FNUZType>(dstElementType)));
11201122
bool isDstFP32 = dstElementType.isF32();
11211123
Type srcType = useFP16IntermediateSrc ? f16_ty : srcElementType;
11221124
Type dstType = isDstFP32 ? f16_ty : dstElementType;

third_party/amd/lib/TritonAMDGPUTransforms/AccelerateAMDMatmul.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,8 @@ class BlockedToMFMA : public OpRewritePattern<tt::DotOp> {
416416
// store instructions, except for fp8 matmul kernels due to regression
417417
// TODO (lixun): investigate the regression and enable this feature again
418418
auto aElemTy = mfmaInstr.getElementTypeA();
419-
bool isFP8 = aElemTy.isFloat8E5M2FNUZ() || aElemTy.isFloat8E4M3FNUZ();
419+
bool isFP8 = llvm::isa<Float8E5M2FNUZType>(aElemTy) ||
420+
llvm::isa<Float8E4M3FNUZType>(aElemTy);
420421
bool isTransposed = isChainDot(dotOp) || !isFP8;
421422
mfmaEnc = ttg::AMDMfmaEncodingAttr::get(
422423
oldRetType.getContext(),

third_party/amd/lib/TritonAMDGPUTransforms/MfmaGroup.cpp

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,19 +20,24 @@ static MfmaTypeId chooseAppropriateMfmaId(mlir::Type dataTypeA,
2020
if (dataTypeA.isInteger(8) && dataTypeB.isInteger(8)) {
2121
return MfmaTypeId::I8TyId;
2222
}
23-
if (dataTypeA.isFloat8E4M3FNUZ() && dataTypeB.isFloat8E4M3FNUZ()) {
23+
if (llvm::isa<Float8E4M3FNUZType>(dataTypeA) &&
24+
llvm::isa<Float8E4M3FNUZType>(dataTypeB)) {
2425
return MfmaTypeId::Fp8Fp8TyId;
2526
}
26-
if (dataTypeA.isFloat8E4M3FNUZ() && dataTypeB.isFloat8E5M2FNUZ()) {
27+
if (llvm::isa<Float8E4M3FNUZType>(dataTypeA) &&
28+
llvm::isa<Float8E5M2FNUZType>(dataTypeB)) {
2729
return MfmaTypeId::Fp8Bf8TyId;
2830
}
29-
if (dataTypeA.isFloat8E5M2FNUZ() && dataTypeB.isFloat8E4M3FNUZ()) {
31+
if (llvm::isa<Float8E5M2FNUZType>(dataTypeA) &&
32+
llvm::isa<Float8E4M3FNUZType>(dataTypeB)) {
3033
return MfmaTypeId::Bf8Fp8TyId;
3134
}
32-
if (dataTypeA.isFloat8E5M2FNUZ() && dataTypeB.isFloat8E5M2FNUZ()) {
35+
if (llvm::isa<Float8E5M2FNUZType>(dataTypeA) &&
36+
llvm::isa<Float8E5M2FNUZType>(dataTypeB)) {
3337
return MfmaTypeId::Bf8Bf8TyId;
3438
}
35-
if (dataTypeA.isFloat8E5M2() && dataTypeB.isFloat8E5M2()) {
39+
if (llvm::isa<Float8E5M2Type>(dataTypeA) &&
40+
llvm::isa<Float8E5M2Type>(dataTypeB)) {
3641
return MfmaTypeId::Fp16TyId;
3742
}
3843
llvm_unreachable("Unsupported input argument type.");

third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAv2.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -299,17 +299,17 @@ TensorCoreType getMmaType(triton::DotOp op) {
299299
return TensorCoreType::FP32_FP16_FP16_FP32;
300300
if (aTy.getElementType().isBF16() && bTy.getElementType().isBF16())
301301
return TensorCoreType::FP32_BF16_BF16_FP32;
302-
if (aTy.getElementType().isFloat8E5M2() &&
303-
bTy.getElementType().isFloat8E5M2())
302+
if (llvm::isa<Float8E5M2Type>(aTy.getElementType()) &&
303+
llvm::isa<Float8E5M2Type>(bTy.getElementType()))
304304
return TensorCoreType::FP32_FP8E5M2_FP8E5M2_FP32;
305-
if (aTy.getElementType().isFloat8E5M2() &&
306-
bTy.getElementType().isFloat8E4M3FN())
305+
if (llvm::isa<Float8E5M2Type>(aTy.getElementType()) &&
306+
llvm::isa<Float8E4M3FNType>(bTy.getElementType()))
307307
return TensorCoreType::FP32_FP8E5M2_FP8E4M3FN_FP32;
308-
if (aTy.getElementType().isFloat8E4M3FN() &&
309-
bTy.getElementType().isFloat8E5M2())
308+
if (llvm::isa<Float8E4M3FNType>(aTy.getElementType()) &&
309+
llvm::isa<Float8E5M2Type>(bTy.getElementType()))
310310
return TensorCoreType::FP32_FP8E4M3FN_FP8E5M2_FP32;
311-
if (aTy.getElementType().isFloat8E4M3FN() &&
312-
bTy.getElementType().isFloat8E4M3FN())
311+
if (llvm::isa<Float8E4M3FNType>(aTy.getElementType()) &&
312+
llvm::isa<Float8E4M3FNType>(bTy.getElementType()))
313313
return TensorCoreType::FP32_FP8E4M3FN_FP8E4M3FN_FP32;
314314
if (aTy.getElementType().isF32() && bTy.getElementType().isF32() &&
315315
op.getInputPrecision() == InputPrecision::TF32)

third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,9 +57,9 @@ triton::nvgpu::WGMMAEltType getMmaOperandType(Value a, bool allowTF32) {
5757
return triton::nvgpu::WGMMAEltType::tf32;
5858
} else if (aTy.isInteger(8)) {
5959
return triton::nvgpu::WGMMAEltType::s8;
60-
} else if (aTy.isFloat8E5M2()) {
60+
} else if (llvm::isa<Float8E5M2Type>(aTy)) {
6161
return triton::nvgpu::WGMMAEltType::e5m2;
62-
} else if (aTy.isFloat8E4M3FN()) {
62+
} else if (llvm::isa<Float8E4M3FNType>(aTy)) {
6363
return triton::nvgpu::WGMMAEltType::e4m3;
6464
} else {
6565
llvm::report_fatal_error("Unsupported mma operand type found");

0 commit comments

Comments
 (0)