Skip to content

Commit fe06c46

Browse files
committed
1. Improved indentation in op definition description and test cases.
1 parent 9d2d247 commit fe06c46

File tree

3 files changed

+159
-176
lines changed

3 files changed

+159
-176
lines changed

mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td

Lines changed: 55 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -690,34 +690,32 @@ def MatmulOp : LinalgStructuredBase_Op<"matmul", [
690690

691691
Example Transpose:
692692
```
693-
linalg.matmul indexing_maps = [
694-
affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
695-
affine_map<(d0, d1, d2) -> (d2, d1)>,
696-
affine_map<(d0, d1, d2) -> (d0, d1)>
697-
]
698-
ins(%arg0, %arg1 : memref<5x3xf32>,memref<5x7xf32>)
699-
outs(%arg2: memref<3x7xf32>)
693+
linalg.matmul
694+
indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
695+
affine_map<(d0, d1, d2) -> (d2, d1)>,
696+
affine_map<(d0, d1, d2) -> (d0, d1)>]
697+
ins(%arg0, %arg1 : memref<5x3xf32>,memref<5x7xf32>)
698+
outs(%arg2: memref<3x7xf32>)
700699
```
701700

702701
Example Broadcast:
703702
```
704-
linalg.matmul indexing_maps = [
705-
affine_map<(d0, d1, d2) -> (d2)>, // broadcast
706-
affine_map<(d0, d1, d2) -> (d2, d1)>,
707-
affine_map<(d0, d1, d2) -> (d0, d1)>
708-
]
709-
ins(%arg0, %arg1 : memref<3xf32>, memref<5x7xf32>)
710-
outs(%arg2: memref<3x7xf32>)
703+
linalg.matmul
704+
indexing_maps = [affine_map<(d0, d1, d2) -> (d2)>, // broadcast
705+
affine_map<(d0, d1, d2) -> (d2, d1)>,
706+
affine_map<(d0, d1, d2) -> (d0, d1)>]
707+
ins(%arg0, %arg1 : memref<3xf32>, memref<5x7xf32>)
708+
outs(%arg2: memref<3x7xf32>)
711709
```
712710

713711
Example Broadcast and transpose:
714712
```
715-
linalg.matmul indexing_maps = [
716-
affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
717-
affine_map<(d0, d1, d2) -> (d2)>, // broadcast
718-
affine_map<(d0, d1, d2) -> (d0, d1)>
719-
]
720-
ins(%arg0, %arg1 : memref<5x3xf32>, memref<7xf32>) outs(%arg2: memref<3x7xf32>)
713+
linalg.matmul
714+
indexing_maps = [affine_map<(d0, d1, d2) -> (d2, d0)>, // transpose
715+
affine_map<(d0, d1, d2) -> (d2)>, // broadcast
716+
affine_map<(d0, d1, d2) -> (d0, d1)>]
717+
ins(%arg0, %arg1 : memref<5x3xf32>, memref<7xf32>)
718+
outs(%arg2: memref<3x7xf32>)
721719
```
722720
}];
723721

@@ -954,35 +952,32 @@ def BatchMatmulOp : LinalgStructuredBase_Op<"batch_matmul", !listconcat([AttrSiz
954952

955953
Example Transpose:
956954
```
957-
linalg.batch_matmul indexing_maps = [
958-
affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, // transpose
959-
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
960-
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
961-
]
962-
ins(%arg0, %arg1 : memref<2x5x3xf32>,memref<2x5x7xf32>)
963-
outs(%arg2: memref<2x3x7xf32>)
955+
linalg.batch_matmul
956+
indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, // transpose
957+
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
958+
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]
959+
ins(%arg0, %arg1 : memref<2x5x3xf32>,memref<2x5x7xf32>)
960+
outs(%arg2: memref<2x3x7xf32>)
964961
```
965962

966963
Example Broadcast:
967964
```
968-
linalg.batch_matmul indexing_maps = [
969-
affine_map<(d0, d1, d2, d3) -> (d3)>, // broadcast
970-
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
971-
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
972-
]
973-
ins(%arg0, %arg1 : memref<5xf32>, memref<2x5x7xf32>)
974-
outs(%arg2: memref<2x3x7xf32>)
965+
linalg.batch_matmul
966+
indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3)>, // broadcast
967+
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
968+
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]
969+
ins(%arg0, %arg1 : memref<5xf32>, memref<2x5x7xf32>)
970+
outs(%arg2: memref<2x3x7xf32>)
975971
```
976972

977973
Example Broadcast and Transpose:
978974
```
979-
linalg.batch_matmul indexing_maps = [
980-
affine_map<(d0, d1, d2, d3) -> (d1, d3)>, // broadcast
981-
affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, // transpose
982-
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
983-
]
984-
ins(%arg0, %arg1 : memref<3x5xf32>, memref<2x7x5xf32>)
985-
outs(%arg2: memref<2x3x7xf32>)
975+
linalg.batch_matmul
976+
indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d1, d3)>, // broadcast
977+
affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, // transpose
978+
affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>]
979+
ins(%arg0, %arg1 : memref<3x5xf32>, memref<2x7x5xf32>)
980+
outs(%arg2: memref<2x3x7xf32>)
986981
```
987982
}];
988983

@@ -1074,7 +1069,7 @@ def BatchReduceMatmulOp : LinalgStructuredBase_Op<"batch_reduce_matmul", [
10741069
LinalgContractionOpInterface]> {
10751070

10761071
let summary = [{Performs a batch-reduce matrix multiplication of two 3D inputs.
1077-
The partial multiplication results are reduced into a 2D output.}];
1072+
The partial multiplication results are reduced into a 2D output.}];
10781073
let description = [{
10791074
Numeric casting is performed on the operands to the inner multiply, promoting
10801075
them to the same data type as the accumulator/output.
@@ -1085,35 +1080,32 @@ The partial multiplication results are reduced into a 2D output.}];
10851080

10861081
Example Transpose:
10871082
```
1088-
linalg.batch_reduce_matmul indexing_maps = [
1089-
affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, // transpose
1090-
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
1091-
affine_map<(d0, d1, d2, d3) -> (d1, d2)>
1092-
]
1093-
ins(%arg0, %arg1 : memref<2x5x3xf32>,memref<2x5x7xf32>)
1094-
outs(%arg2: memref<3x7xf32>)
1083+
linalg.batch_reduce_matmul
1084+
indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>, // transpose
1085+
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
1086+
affine_map<(d0, d1, d2, d3) -> (d1, d2)>]
1087+
ins(%arg0, %arg1 : memref<2x5x3xf32>,memref<2x5x7xf32>)
1088+
outs(%arg2: memref<3x7xf32>)
10951089
```
10961090

10971091
Example Broadcast:
10981092
```
1099-
linalg.batch_reduce_matmul indexing_maps = [
1100-
affine_map<(d0, d1, d2, d3) -> (d3)>, // broadcast
1101-
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
1102-
affine_map<(d0, d1, d2, d3) -> (d1, d2)>
1103-
]
1104-
ins(%arg0, %arg1 : memref<5xf32>, memref<2x5x7xf32>)
1105-
outs(%arg2: memref<3x7xf32>)
1093+
linalg.batch_reduce_matmul
1094+
indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d3)>, // broadcast
1095+
affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>,
1096+
affine_map<(d0, d1, d2, d3) -> (d1, d2)>]
1097+
ins(%arg0, %arg1 : memref<5xf32>, memref<2x5x7xf32>)
1098+
outs(%arg2: memref<3x7xf32>)
11061099
```
11071100

11081101
Example Broadcast and Transpose:
11091102
```
1110-
linalg.batch_reduce_matmul indexing_maps = [
1111-
affine_map<(d0, d1, d2, d3) -> (d1, d3)>, // broadcast
1112-
affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, // transpose
1113-
affine_map<(d0, d1, d2, d3) -> (d1, d2)>
1114-
]
1115-
ins(%arg0, %arg1 : memref<3x5xf32>, memref<2x7x5xf32>)
1116-
outs(%arg2: memref<3x7xf32>)
1103+
linalg.batch_reduce_matmul
1104+
indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d1, d3)>, // broadcast
1105+
affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, // transpose
1106+
affine_map<(d0, d1, d2, d3) -> (d1, d2)>]
1107+
ins(%arg0, %arg1 : memref<3x5xf32>, memref<2x7x5xf32>)
1108+
outs(%arg2: memref<3x7xf32>)
11171109
```
11181110
}];
11191111

mlir/test/Dialect/Linalg/generalize-named-ops.mlir

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1024,22 +1024,20 @@ func.func @batch_matmul(%arg0: tensor<2x3x5xf32>, %arg1: tensor<2x5x7xf32>, %arg
10241024

10251025
// -----
10261026

1027-
// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
1028-
// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
1029-
// CHECK: #[[$ATTR_2:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
1027+
// CHECK: #[[$ACCESS_A:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
1028+
// CHECK: #[[$ACCESS_B:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
1029+
// CHECK: #[[$ACCESS_C:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
10301030

10311031
// CHECK-LABEL: func.func @batch_reduce_matmul(
1032-
// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: tensor<2x3x5xf32>,
1033-
// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: tensor<2x5x7xf32>,
1034-
// CHECK-SAME: %[[VAL_2:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: tensor<3x7xf32>) -> tensor<3x7xf32> {
1035-
// CHECK: %[[VAL_3:.*]] = linalg.generic {indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_2]]], iterator_types = ["reduction", "parallel", "parallel", "reduction"]} ins(%[[VAL_0]], %[[VAL_1]] : tensor<2x3x5xf32>, tensor<2x5x7xf32>) outs(%[[VAL_2]] : tensor<3x7xf32>) {
1036-
// CHECK: ^bb0(%[[VAL_4:.*]]: f32, %[[VAL_5:.*]]: f32, %[[VAL_6:.*]]: f32):
1037-
// CHECK: %[[VAL_7:.*]] = arith.mulf %[[VAL_4]], %[[VAL_5]] : f32
1038-
// CHECK: %[[VAL_8:.*]] = arith.addf %[[VAL_6]], %[[VAL_7]] : f32
1039-
// CHECK: linalg.yield %[[VAL_8]] : f32
1040-
// CHECK: } -> tensor<3x7xf32>
1041-
// CHECK: return %[[VAL_3]] : tensor<3x7xf32>
1042-
// CHECK: }
1032+
// CHECK-SAME: %[[ARG_A:.*]]: tensor<2x3x5xf32>,
1033+
// CHECK-SAME: %[[ARG_B:.*]]: tensor<2x5x7xf32>,
1034+
// CHECK-SAME: %[[ARG_C:.*]]: tensor<3x7xf32>) -> tensor<3x7xf32> {
1035+
// CHECK: linalg.generic
1036+
// CHECK-SAME: indexing_maps = [#[[$ACCESS_A]], #[[$ACCESS_B]], #[[$ACCESS_C]]],
1037+
// CHECK-SAME: iterator_types = ["reduction", "parallel", "parallel", "reduction"]}
1038+
// CHECK: arith.mulf
1039+
// CHECK: arith.addf
1040+
// CHECK: linalg.yield
10431041

10441042
func.func @batch_reduce_matmul(%arg0: tensor<2x3x5xf32>, %arg1: tensor<2x5x7xf32>, %arg2: tensor<3x7xf32>) -> tensor<3x7xf32> {
10451043
%0 = linalg.batch_reduce_matmul indexing_maps = [

0 commit comments

Comments
 (0)