Skip to content

Commit 4c556e7

Browse files
tapspateljameszianxuTT
authored andcommitted
changes
1 parent c6eda50 commit 4c556e7

File tree

10 files changed

+22
-3
lines changed

10 files changed

+22
-3
lines changed

include/ttmlir/Conversion/TTNNToEmitC/EmitCConversion.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -659,6 +659,9 @@ struct EmitCTypeConverter<::ttnn::TensorMemoryLayout> {
659659
case ttnn::TensorMemoryLayout::WidthSharded:
660660
rso << "WIDTH_SHARDED";
661661
return buf;
662+
case ttnn::TensorMemoryLayout::NDSharded:
663+
rso << "ND_SHARDED";
664+
return buf;
662665
}
663666

664667
llvm_unreachable("Unknown ttnn::TensorMemoryLayout");

include/ttmlir/Conversion/TTNNToEmitPy/EmitPyConversion.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -696,6 +696,9 @@ struct EmitPyTypeConverter<::ttnn::TensorMemoryLayout> {
696696
case ttnn::TensorMemoryLayout::WidthSharded:
697697
rso << "WIDTH_SHARDED";
698698
break;
699+
case ttnn::TensorMemoryLayout::NDSharded:
700+
rso << "ND_SHARDED";
701+
break;
699702
}
700703

701704
return buf;

include/ttmlir/Dialect/TTNN/IR/TTNNOpsEnums.td

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,15 @@ def TTNN_TensorMemoryLayout_Interleaved : I32EnumAttrCase<"Interleaved", 0, "int
2525
def TTNN_TensorMemoryLayout_HeightSharded : I32EnumAttrCase<"HeightSharded", 2, "height_sharded">;
2626
def TTNN_TensorMemoryLayout_WidthSharded : I32EnumAttrCase<"WidthSharded", 3, "width_sharded">;
2727
def TTNN_TensorMemoryLayout_BlockSharded : I32EnumAttrCase<"BlockSharded", 4, "block_sharded">;
28+
def TTNN_TensorMemoryLayout_NDSharded : I32EnumAttrCase<"NDSharded", 5, "nd_sharded">;
2829

2930
def TTNN_TensorMemoryLayout : I32EnumAttr<"TensorMemoryLayout", "TTNN Tensor Memory Layout",
3031
[
3132
TTNN_TensorMemoryLayout_Interleaved,
3233
TTNN_TensorMemoryLayout_HeightSharded,
3334
TTNN_TensorMemoryLayout_WidthSharded,
3435
TTNN_TensorMemoryLayout_BlockSharded,
36+
TTNN_TensorMemoryLayout_NDSharded,
3537
]> {
3638
let genSpecializedAttr = 0;
3739
let cppNamespace = "::mlir::tt::ttnn";

include/ttmlir/OpModel/TTNN/TTNNOpModel.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ template <>
128128
struct OpModel<SiluOp> : UnaryEltwiseOpModel<SiluOp> {};
129129

130130
template <>
131-
struct OpModel<MishOp> : UnaryEltwiseOpModel<MishOp> {};
131+
struct OpModel<MishOp> : UnaryEltwiseWithFastApproxModeOpModel<MishOp> {};
132132

133133
template <>
134134
struct OpModel<RsqrtOp> : UnaryEltwiseWithFastApproxModeOpModel<RsqrtOp> {};

include/ttmlir/Target/TTNN/types.fbs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ enum TensorMemoryLayout: ushort {
77
HeightSharded,
88
WidthSharded,
99
BlockSharded,
10+
NDSharded,
1011
}
1112

1213
enum StorageType: ushort {

include/ttmlir/Target/Utils/MLIRToFlatbuffer.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,8 @@ toFlatbuffer(FlatbufferObjectCache &, ttnn::TensorMemoryLayout memLayout) {
220220
return ::tt::target::ttnn::TensorMemoryLayout::WidthSharded;
221221
case ttnn::TensorMemoryLayout::BlockSharded:
222222
return ::tt::target::ttnn::TensorMemoryLayout::BlockSharded;
223+
case ttnn::TensorMemoryLayout::NDSharded:
224+
return ::tt::target::ttnn::TensorMemoryLayout::NDSharded;
223225
}
224226
}
225227

lib/OpModel/TTNN/Conversion.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -331,6 +331,8 @@ getTensorMemoryLayout(const TensorMemoryLayout tensorMemoryLayout) {
331331
return ::tt::tt_metal::TensorMemoryLayout::WIDTH_SHARDED;
332332
case TensorMemoryLayout::BlockSharded:
333333
return ::tt::tt_metal::TensorMemoryLayout::BLOCK_SHARDED;
334+
case TensorMemoryLayout::NDSharded:
335+
return ::tt::tt_metal::TensorMemoryLayout::ND_SHARDED;
334336
}
335337
}
336338
TensorMemoryLayout
@@ -344,6 +346,8 @@ getTensorMemoryLayout(const ::tt::tt_metal::TensorMemoryLayout memLayout) {
344346
return TensorMemoryLayout::WidthSharded;
345347
case ::tt::tt_metal::TensorMemoryLayout::BLOCK_SHARDED:
346348
return TensorMemoryLayout::BlockSharded;
349+
case ::tt::tt_metal::TensorMemoryLayout::ND_SHARDED:
350+
return TensorMemoryLayout::NDSharded;
347351
}
348352
}
349353

lib/OpModel/TTNN/TTNNOpModel.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -948,7 +948,7 @@ template struct UnaryEltwiseOpModel<ReciprocalOp>;
948948
template struct UnaryEltwiseOpModel<CbrtOp>;
949949
template struct UnaryEltwiseOpModel<BitwiseNotOp>;
950950
template struct UnaryEltwiseOpModel<SiluOp>;
951-
template struct UnaryEltwiseOpModel<MishOp>;
951+
template struct UnaryEltwiseWithFastApproxModeOpModel<MishOp>;
952952
template struct UnaryEltwiseWithFastApproxModeOpModel<Log1pOp>;
953953
template struct UnaryEltwiseOpModel<Expm1Op>;
954954
template struct UnaryEltwiseWithFastApproxModeOpModel<RsqrtOp>;

runtime/lib/ttnn/operations/eltwise/unary/unary.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ void run(const ::tt::target::ttnn::EltwiseUnaryOp *op,
182182
break;
183183
}
184184
case ::tt::target::ttnn::EltwiseUnaryOpType::Mish: {
185-
runEltwiseUnaryOp(op, tensorPool, ::ttnn::mish);
185+
runEltwiseUnaryWithFastAndApproximateModeOp(op, tensorPool, ::ttnn::mish);
186186
break;
187187
}
188188
case ::tt::target::ttnn::EltwiseUnaryOpType::Sin: {

runtime/lib/ttnn/utils/utils.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,8 @@ ::ttnn::TensorMemoryLayout toTTNNTensorMemoryLayout(
257257
return ::ttnn::TensorMemoryLayout::WIDTH_SHARDED;
258258
case ::tt::target::ttnn::TensorMemoryLayout::BlockSharded:
259259
return ::ttnn::TensorMemoryLayout::BLOCK_SHARDED;
260+
case ::tt::target::ttnn::TensorMemoryLayout::NDSharded:
261+
return ::ttnn::TensorMemoryLayout::ND_SHARDED;
260262
}
261263
}
262264

@@ -271,6 +273,8 @@ fromTTNNTensorMemoryLayout(::ttnn::TensorMemoryLayout tensorMemoryLayout) {
271273
return ::tt::target::ttnn::TensorMemoryLayout::WidthSharded;
272274
case ::ttnn::TensorMemoryLayout::BLOCK_SHARDED:
273275
return ::tt::target::ttnn::TensorMemoryLayout::BlockSharded;
276+
case ::ttnn::TensorMemoryLayout::ND_SHARDED:
277+
return ::tt::target::ttnn::TensorMemoryLayout::NDSharded;
274278
}
275279
}
276280

0 commit comments

Comments
 (0)