@@ -242,7 +242,7 @@ def MapOp : LinalgStructuredBase_Op<"map", [
242242 on the corresponding elements.
243243
244244 Example:
245- ```
245+ ```mlir
246246 %add = linalg.map
247247 ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
248248 outs(%init: tensor<64xf32>)
@@ -256,7 +256,7 @@ def MapOp : LinalgStructuredBase_Op<"map", [
256256 non-yield operation inside the body.
257257
258258 The example above will be printed as:
259- ```
259+ ```mlir
260260 %add = linalg.map { arith.addf }
261261 ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
262262 outs(%init: tensor<64xf32>)
@@ -327,7 +327,7 @@ def ReduceOp : LinalgStructuredBase_Op<"reduce", [
327327 dimensions in increasing order.
328328
329329 Example:
330- ```
330+ ```mlir
331331 %reduce = linalg.reduce
332332 ins(%input:tensor<16x32x64xf32>)
333333 outs(%init:tensor<16x64xf32>)
@@ -343,7 +343,7 @@ def ReduceOp : LinalgStructuredBase_Op<"reduce", [
343343 takes `%out` as the first argument.
344344
345345 The example above will be printed as:
346- ```
346+ ```mlir
347347 %reduce = linalg.reduce { arith.addf }
348348 ins(%input:tensor<16x32x64xf32>)
349349 outs(%init:tensor<16x64xf32>)
@@ -408,7 +408,7 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [
408408 operation only that produces a transposed "view".
409409
410410 Example:
411- ```
411+ ```mlir
412412 %transpose = linalg.transpose
413413 ins(%input:tensor<16x64xf32>)
414414 outs(%init:tensor<64x16xf32>)
@@ -480,7 +480,7 @@ def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [
480480 Broadcast the input into the given shape by adding `dimensions`.
481481
482482 Example:
483- ```
483+ ```mlir
484484 %bcast = linalg.broadcast
485485 ins(%input:tensor<16xf32>)
486486 outs(%init:tensor<16x64xf32>)
@@ -689,7 +689,7 @@ def MatmulOp : LinalgStructuredBase_Op<"matmul", [
689689 the maps if specified.
690690
691691 Example Transpose:
692- ```
692+ ```mlir
693693 linalg.matmul indexing_maps = [
694694 affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
695695 affine_map<(d0, d1, d2) -> (d2, d1)>,
@@ -700,7 +700,7 @@ def MatmulOp : LinalgStructuredBase_Op<"matmul", [
700700 ```
701701
702702 Example Broadcast:
703- ```
703+ ```mlir
704704 linalg.matmul indexing_maps = [
705705 affine_map<(d0, d1, d2) -> (d2)>, // broadcast
706706 affine_map<(d0, d1, d2) -> (d2, d1)>,
@@ -711,7 +711,7 @@ def MatmulOp : LinalgStructuredBase_Op<"matmul", [
711711 ```
712712
713713 Example Broadcast and transpose:
714- ```
714+ ```mlir
715715 linalg.matmul indexing_maps = [
716716 affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
717717 affine_map<(d0, d1, d2) -> (d2)>, // broadcast
@@ -839,7 +839,7 @@ def ContractOp : LinalgStructuredBase_Op<"contract", [
839839 `H = ⟨ b, m, n ⟩` (with `k` as a contracting reduction-dimension while `m`,
840840 `n` and `b` have parallel iteration-type) and gets represented as:
841841
842- ```
842+ ```mlir
843843 %D = linalg.contract
844844 indexing_maps = [affine_map<(batch, m, n, k) -> (batch, m, k)>,
845845 affine_map<(batch, m, n, k) -> (batch, k, n)>,
@@ -854,7 +854,7 @@ def ContractOp : LinalgStructuredBase_Op<"contract", [
854854 For example, the following is a variant of batch-matmul with a transposition
855855 applied to `A` while `B`'s 2D-matrix gets broadcasted along the batch dim:
856856
857- ```
857+ ```mlir
858858 linalg.contract
859859 indexing_maps = [affine_map<(batch, m, n, k) -> (batch, k, m)>,
860860 affine_map<(batch, m, n, k) -> (k, n)>,
@@ -953,7 +953,7 @@ def BatchMatmulOp : LinalgStructuredBase_Op<"batch_matmul", !listconcat([AttrSiz
953953 arguments if specified.
954954
955955 Example Transpose:
956- ```
956+ ```mlir
957957 linalg.batch_matmul indexing_maps = [
958958 affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, // transpose
959959 affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
@@ -964,7 +964,7 @@ def BatchMatmulOp : LinalgStructuredBase_Op<"batch_matmul", !listconcat([AttrSiz
964964 ```
965965
966966 Example Broadcast:
967- ```
967+ ```mlir
968968 linalg.batch_matmul indexing_maps = [
969969 affine_map<(d0, d1, d2, d3) -> (d3)>, // broadcast
970970 affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
@@ -975,7 +975,7 @@ def BatchMatmulOp : LinalgStructuredBase_Op<"batch_matmul", !listconcat([AttrSiz
975975 ```
976976
977977 Example Broadcast and Transpose:
978- ```
978+ ```mlir
979979 linalg.batch_matmul indexing_maps = [
980980 affine_map<(d0, d1, d2, d3) -> (d1, d3)>, // broadcast
981981 affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, // transpose
0 commit comments