@@ -323,8 +323,8 @@ def MemRef_ReallocOp : MemRef_Op<"realloc"> {
323323
324324 ```mlir
325325 %new = memref.realloc %old : memref<64xf32> to memref<124xf32>
326- %4 = memref.load %new[%index] // ok
327- %5 = memref.load %old[%index] // undefined behavior
326+ %4 = memref.load %new[%index] : memref<124xf32> // ok
327+ %5 = memref.load %old[%index] : memref<64xf32> // undefined behavior
328328 ```
329329 }];
330330
@@ -445,9 +445,10 @@ def MemRef_AllocaScopeOp : MemRef_Op<"alloca_scope",
445445 operation:
446446
447447 ```mlir
448- %result = memref.alloca_scope {
448+ %result = memref.alloca_scope -> f32 {
449+ %value = arith.constant 1.0 : f32
449450 ...
450- memref.alloca_scope.return %value
451+ memref.alloca_scope.return %value : f32
451452 }
452453 ```
453454
@@ -478,7 +479,7 @@ def MemRef_AllocaScopeReturnOp : MemRef_Op<"alloca_scope.return",
478479 to indicate which values are going to be returned. For example:
479480
480481 ```mlir
481- memref.alloca_scope.return %value
482+ memref.alloca_scope.return %value : f32
482483 ```
483484 }];
484485
@@ -543,11 +544,11 @@ def MemRef_CastOp : MemRef_Op<"cast", [
543544 Example:
544545
545546 ```mlir
546- Cast to concrete shape.
547- %4 = memref.cast %1 : memref<*xf32> to memref<4x?xf32>
547+ // Cast to concrete shape.
548+ %4 = memref.cast %1 : memref<*xf32> to memref<4x?xf32>
548549
549- Erase rank information.
550- %5 = memref.cast %1 : memref<4x?xf32> to memref<*xf32>
550+ // Erase rank information.
551+ %5 = memref.cast %1 : memref<4x?xf32> to memref<*xf32>
551552 ```
552553 }];
553554
@@ -613,8 +614,8 @@ def MemRef_DeallocOp : MemRef_Op<"dealloc", [MemRefsNormalizable]> {
613614 Example:
614615
615616 ```mlir
616- %0 = memref.alloc() : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1), 1> >
617- memref.dealloc %0 : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1), 1> >
617+ %0 = memref.alloc() : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1)> , 1>
618+ memref.dealloc %0 : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1)> , 1>
618619 ```
619620 }];
620621
@@ -728,22 +729,22 @@ def MemRef_DmaStartOp : MemRef_Op<"dma_start"> {
728729 space 1 at indices [%k, %l], would be specified as follows:
729730
730731 ```mlir
731- %num_elements = arith.constant 256
732+ %num_elements = arith.constant 256 : index
732733 %idx = arith.constant 0 : index
733- %tag = memref.alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 4 >
734- dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx] :
735- memref<40 x 128 x f32> , affine_map<(d0) -> (d0)>, 0>,
736- memref<2 x 1024 x f32> , affine_map<(d0) -> (d0)>, 1>,
737- memref<1 x i32> , affine_map<(d0) -> (d0)>, 2>
734+ %tag = memref.alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 2 >
735+ memref. dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx] :
736+ memref<40 x 128 x f32, affine_map<(d0, d1 ) -> (d0, d1 )>, 0>,
737+ memref<2 x 1024 x f32, affine_map<(d0, d1 ) -> (d0, d1 )>, 1>,
738+ memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
738739 ```
739740
740741 If %stride and %num_elt_per_stride are specified, the DMA is expected to
741742 transfer %num_elt_per_stride elements every %stride elements apart from
742743 memory space 0 until %num_elements are transferred.
743744
744745 ```mlir
745- dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx], %stride,
746- %num_elt_per_stride :
746+ memref. dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx], %stride,
747+ %num_elt_per_stride :
747748 ```
748749
749750 * TODO: add additional operands to allow source and destination striding, and
@@ -891,10 +892,10 @@ def MemRef_DmaWaitOp : MemRef_Op<"dma_wait"> {
891892 Example:
892893
893894 ```mlir
894- dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%index] :
895- memref<2048 x f32> , affine_map<(d0) -> (d0)>, 0>,
896- memref<256 x f32> , affine_map<(d0) -> (d0)>, 1>
897- memref<1 x i32> , affine_map<(d0) -> (d0)>, 2>
895+ memref. dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%index] :
896+ memref<2048 x f32, affine_map<(d0) -> (d0)>, 0>,
897+ memref<256 x f32, affine_map<(d0) -> (d0)>, 1>,
898+ memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
898899 ...
899900 ...
900901 dma_wait %tag[%index], %num_elements : memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
@@ -1004,16 +1005,16 @@ def MemRef_ExtractStridedMetadataOp : MemRef_Op<"extract_strided_metadata", [
10041005
10051006 ```mlir
10061007 %base, %offset, %sizes:2, %strides:2 =
1007- memref.extract_strided_metadata %memref :
1008- memref<10x?xf32 >, index, index, index, index, index
1008+ memref.extract_strided_metadata %memref : memref<10x?xf32>
1009+ -> memref<f32 >, index, index, index, index, index
10091010
10101011 // After folding, the type of %m2 can be memref<10x?xf32> and further
10111012 // folded to %memref.
10121013 %m2 = memref.reinterpret_cast %base to
10131014 offset: [%offset],
10141015 sizes: [%sizes#0, %sizes#1],
10151016 strides: [%strides#0, %strides#1]
1016- : memref<f32> to memref<?x?xf32, offset: ?, strides: [?, ?] >
1017+ : memref<f32> to memref<?x?xf32, strided<[ ?, ?], offset:?> >
10171018 ```
10181019 }];
10191020
@@ -1182,10 +1183,10 @@ def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
11821183
11831184 ```mlir
11841185 // Private variable with an initial value.
1185- memref.global "private" @x : memref<2xf32> = dense<0.0,2.0>
1186+ memref.global "private" @x : memref<2xf32> = dense<[ 0.0, 2.0] >
11861187
11871188 // Private variable with an initial value and an alignment (power of 2).
1188- memref.global "private" @x : memref<2xf32> = dense<0.0,2.0> {alignment = 64}
1189+ memref.global "private" @x : memref<2xf32> = dense<[ 0.0, 2.0] > {alignment = 64}
11891190
11901191 // Declaration of an external variable.
11911192 memref.global "private" @y : memref<4xi32>
@@ -1194,7 +1195,7 @@ def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
11941195 memref.global @z : memref<3xf16> = uninitialized
11951196
11961197 // Externally visible constant variable.
1197- memref.global constant @c : memref<2xi32> = dense<1, 4>
1198+ memref.global constant @c : memref<2xi32> = dense<[ 1, 4] >
11981199 ```
11991200 }];
12001201
@@ -1555,7 +1556,8 @@ def MemRef_ReinterpretCastOp
15551556 %dst = memref.reinterpret_cast %src to
15561557 offset: [%offset],
15571558 sizes: [%sizes],
1558- strides: [%strides]
1559+ strides: [%strides] :
1560+ memref<*xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
15591561 ```
15601562 means that `%dst`'s descriptor will be:
15611563 ```mlir
@@ -1695,12 +1697,12 @@ def MemRef_ReshapeOp: MemRef_Op<"reshape", [
16951697 ```mlir
16961698 // Reshape statically-shaped memref.
16971699 %dst = memref.reshape %src(%shape)
1698- : (memref<4x1xf32>, memref<1xi32>) to memref<4xf32>
1700+ : (memref<4x1xf32>, memref<1xi32>) -> memref<4xf32>
16991701 %dst0 = memref.reshape %src(%shape0)
1700- : (memref<4x1xf32>, memref<2xi32>) to memref<2x2xf32>
1702+ : (memref<4x1xf32>, memref<2xi32>) -> memref<2x2xf32>
17011703 // Flatten unranked memref.
17021704 %dst = memref.reshape %src(%shape)
1703- : (memref<*xf32>, memref<1xi32>) to memref<?xf32>
1705+ : (memref<*xf32>, memref<1xi32>) -> memref<?xf32>
17041706 ```
17051707
17061708 b. Source type is ranked or unranked. Shape argument has dynamic size.
@@ -1709,10 +1711,10 @@ def MemRef_ReshapeOp: MemRef_Op<"reshape", [
17091711 ```mlir
17101712 // Reshape dynamically-shaped 1D memref.
17111713 %dst = memref.reshape %src(%shape)
1712- : (memref<?xf32>, memref<?xi32>) to memref<*xf32>
1714+ : (memref<?xf32>, memref<?xi32>) -> memref<*xf32>
17131715 // Reshape unranked memref.
17141716 %dst = memref.reshape %src(%shape)
1715- : (memref<*xf32>, memref<?xi32>) to memref<*xf32>
1717+ : (memref<*xf32>, memref<?xi32>) -> memref<*xf32>
17161718 ```
17171719 }];
17181720
0 commit comments