Skip to content

Commit 7676051

Browse files
Fix lit failures after 96e53bb
Signed-off-by: Whitney Tsang <[email protected]>
1 parent edfbc64 commit 7676051

File tree

2 files changed

+3
-6
lines changed

2 files changed

+3
-6
lines changed

lib/Dialect/TritonGPU/IR/Dialect.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2143,7 +2143,8 @@ LogicalResult DotOperandEncodingAttr::verify(
21432143
if (kWidth != 16 && parentAttr.getVersion() == 1 ||
21442144
kWidth != 8 && kWidth != 16 && parentAttr.getVersion() == 2)
21452145
return emitError() << "ttg.dot_op kWidth parameter must be 16 for "
2146-
"gfx11 and 8/16 for gfx12";
2146+
"gfx11 and 4/8/16 for gfx12 (including packed "
2147+
"cases for `scaled_dot`)";
21472148
return success();
21482149
}
21492150

lib/Dialect/TritonNvidiaGPU/IR/Ops.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -502,11 +502,6 @@ static LogicalResult verifyTMEMOperand(Operation *op, RankedTensorType type,
502502
MemDescType memdesc, StringRef regName) {
503503
if (type.getRank() != 2)
504504
return op->emitOpError(regName) << " must be a 2D tensor";
505-
if (isa<TensorMemoryScalesEncodingAttr>(memdesc.getEncoding()) &&
506-
!type.getElementType().isInteger(8)) {
507-
return op->emitOpError(regName)
508-
<< " expected to be a tensor of i8 for MMA scales encoding";
509-
}
510505
if (type.getEncoding()) {
511506
auto enc = dyn_cast<DistributedEncodingTrait>(type.getEncoding());
512507
if (!enc) {
@@ -526,6 +521,7 @@ static LogicalResult verifyTMEMOperand(Operation *op, RankedTensorType type,
526521
<< " layout is not TMEM compatible";
527522
for (Attribute layout : layouts)
528523
diag.attachNote() << "potential TMEM layout: " << layout;
524+
return diag;
529525
}
530526
}
531527
return success();

0 commit comments

Comments
 (0)