Skip to content

Commit eb17780

Browse files
[mlir][sparse] Change sparse_tensor.print format (#91528)
1. Remove the trailing comma for the last element of memref and add closing parenthesis. 2. Change integration tests to use the new format.
1 parent df21ee4 commit eb17780

File tree

73 files changed

+1068
-1059
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+1068
-1059
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -830,11 +830,17 @@ struct PrintRewriter : public OpRewritePattern<PrintOp> {
830830
vector::PrintPunctuation::Comma);
831831
rewriter.create<vector::PrintOp>(loc, imag,
832832
vector::PrintPunctuation::Close);
833-
rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma);
834833
} else {
835-
rewriter.create<vector::PrintOp>(loc, val,
836-
vector::PrintPunctuation::Comma);
834+
rewriter.create<vector::PrintOp>(
835+
loc, val, vector::PrintPunctuation::NoPunctuation);
837836
}
837+
// Terminating comma (except at end).
838+
auto bound = rewriter.create<arith::AddIOp>(loc, idxs.back(), step);
839+
Value cond = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ne,
840+
bound, size);
841+
scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, cond, /*else*/ false);
842+
rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
843+
rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma);
838844
}
839845
idxs.pop_back();
840846
rewriter.setInsertionPointAfter(forOp);

mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -93,19 +93,19 @@ module {
9393
// CHECK-NEXT: nse = 12
9494
// CHECK-NEXT: dim = ( 4, 6 )
9595
// CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
96-
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
97-
// CHECK-NEXT: crd[1] : ( 0, 2, 1,
98-
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
96+
// CHECK-NEXT: pos[1] : ( 0, 2, 3 )
97+
// CHECK-NEXT: crd[1] : ( 0, 2, 1 )
98+
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
9999
// CHECK-NEXT: ----
100100
sparse_tensor.print %A : tensor<?x?xf64, #BSR>
101101

102102
// CHECK-NEXT: ---- Sparse Tensor ----
103103
// CHECK-NEXT: nse = 12
104104
// CHECK-NEXT: dim = ( 2, 3, 2, 2 )
105105
// CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
106-
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
107-
// CHECK-NEXT: crd[1] : ( 0, 2, 1
108-
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
106+
// CHECK-NEXT: pos[1] : ( 0, 2, 3 )
107+
// CHECK-NEXT: crd[1] : ( 0, 2, 1 )
108+
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
109109
// CHECK-NEXT: ----
110110
%t1 = sparse_tensor.reinterpret_map %A : tensor<?x?xf64, #BSR>
111111
to tensor<?x?x2x2xf64, #DSDD>
@@ -115,9 +115,9 @@ module {
115115
// CHECK-NEXT: nse = 12
116116
// CHECK-NEXT: dim = ( 4, 6 )
117117
// CHECK-NEXT: lvl = ( 2, 3, 2, 2 )
118-
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
119-
// CHECK-NEXT: crd[1] : ( 0, 2, 1,
120-
// CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0,
118+
// CHECK-NEXT: pos[1] : ( 0, 2, 3 )
119+
// CHECK-NEXT: crd[1] : ( 0, 2, 1 )
120+
// CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0 )
121121
// CHECK-NEXT: ----
122122
%As = call @scale(%A) : (tensor<?x?xf64, #BSR>) -> (tensor<?x?xf64, #BSR>)
123123
sparse_tensor.print %As : tensor<?x?xf64, #BSR>

mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ module {
108108
// CHECK-NEXT: nse = 24
109109
// CHECK-NEXT: dim = ( 6, 16 )
110110
// CHECK-NEXT: lvl = ( 2, 4, 3, 4 )
111-
// CHECK-NEXT: pos[1] : ( 0, 1, 2,
112-
// CHECK-NEXT: crd[1] : ( 0, 2,
113-
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
111+
// CHECK-NEXT: pos[1] : ( 0, 1, 2 )
112+
// CHECK-NEXT: crd[1] : ( 0, 2 )
113+
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
114114
// CHECK-NEXT: ----
115115
//
116116
func.func @foo1() {
@@ -134,9 +134,9 @@ module {
134134
// CHECK-NEXT: nse = 24
135135
// CHECK-NEXT: dim = ( 6, 16 )
136136
// CHECK-NEXT: lvl = ( 2, 4, 4, 3 )
137-
// CHECK-NEXT: pos[1] : ( 0, 1, 2,
138-
// CHECK-NEXT: crd[1] : ( 0, 2,
139-
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
137+
// CHECK-NEXT: pos[1] : ( 0, 1, 2 )
138+
// CHECK-NEXT: crd[1] : ( 0, 2 )
139+
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
140140
// CHECK-NEXT: ----
141141
//
142142
func.func @foo2() {
@@ -160,9 +160,9 @@ module {
160160
// CHECK-NEXT: nse = 24
161161
// CHECK-NEXT: dim = ( 6, 16 )
162162
// CHECK-NEXT: lvl = ( 4, 2, 3, 4 )
163-
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
164-
// CHECK-NEXT: crd[1] : ( 0, 1,
165-
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
163+
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2 )
164+
// CHECK-NEXT: crd[1] : ( 0, 1 )
165+
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
166166
// CHECK-NEXT: ----
167167
//
168168
func.func @foo3() {
@@ -186,9 +186,9 @@ module {
186186
// CHECK-NEXT: nse = 24
187187
// CHECK-NEXT: dim = ( 6, 16 )
188188
// CHECK-NEXT: lvl = ( 4, 2, 4, 3 )
189-
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
190-
// CHECK-NEXT: crd[1] : ( 0, 1,
191-
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
189+
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2 )
190+
// CHECK-NEXT: crd[1] : ( 0, 1 )
191+
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
192192
// CHECK-NEXT: ----
193193
//
194194
func.func @foo4() {

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -111,11 +111,11 @@ module {
111111
// CHECK-NEXT: nse = 18
112112
// CHECK-NEXT: dim = ( 9, 4 )
113113
// CHECK-NEXT: lvl = ( 9, 4 )
114-
// CHECK-NEXT: pos[0] : ( 0, 9,
115-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
116-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
117-
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
118-
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
114+
// CHECK-NEXT: pos[0] : ( 0, 9 )
115+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
116+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
117+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
118+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
119119
// CHECK-NEXT: ----
120120
//
121121
%0 = call @concat_sparse_sparse(%sm24cc, %sm34cd, %sm44dc)
@@ -142,11 +142,11 @@ module {
142142
// CHECK-NEXT: nse = 18
143143
// CHECK-NEXT: dim = ( 9, 4 )
144144
// CHECK-NEXT: lvl = ( 9, 4 )
145-
// CHECK-NEXT: pos[0] : ( 0, 9,
146-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
147-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
148-
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
149-
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
145+
// CHECK-NEXT: pos[0] : ( 0, 9 )
146+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
147+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
148+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
149+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
150150
// CHECK-NEXT: ----
151151
//
152152
%2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -144,11 +144,11 @@ module {
144144
// CHECK-NEXT: nse = 18
145145
// CHECK-NEXT: dim = ( 9, 4 )
146146
// CHECK-NEXT: lvl = ( 4, 9 )
147-
// CHECK-NEXT: pos[0] : ( 0, 4
148-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
149-
// CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18
150-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5
151-
// CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
147+
// CHECK-NEXT: pos[0] : ( 0, 4 )
148+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
149+
// CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18 )
150+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5 )
151+
// CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1 )
152152
// CHECK-NEXT: ----
153153
//
154154
%4 = call @concat_sparse_sparse_perm(%sm24ccp, %sm34cd, %sm44dc)
@@ -173,11 +173,11 @@ module {
173173
// CHECK-NEXT: nse = 18
174174
// CHECK-NEXT: dim = ( 9, 4 )
175175
// CHECK-NEXT: lvl = ( 9, 4 )
176-
// CHECK-NEXT: pos[0] : ( 0, 9
177-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
178-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18
179-
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1
180-
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
176+
// CHECK-NEXT: pos[0] : ( 0, 9 )
177+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
178+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18 )
179+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1 )
180+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5 )
181181
// CHECK-NEXT: ----
182182
//
183183
%6 = call @concat_mix_sparse_perm(%m24, %sm34cdp, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -116,11 +116,11 @@ module {
116116
// CHECK-NEXT: nse = 18
117117
// CHECK-NEXT: dim = ( 4, 9 )
118118
// CHECK-NEXT: lvl = ( 4, 9 )
119-
// CHECK-NEXT: pos[0] : ( 0, 4
120-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
121-
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
122-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
123-
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
119+
// CHECK-NEXT: pos[0] : ( 0, 4 )
120+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
121+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
122+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
123+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
124124
// CHECK-NEXT: ----
125125
//
126126
%8 = call @concat_sparse_sparse_dim1(%sm42cc, %sm43cd, %sm44dc)
@@ -140,11 +140,11 @@ module {
140140
// CHECK-NEXT: nse = 18
141141
// CHECK-NEXT: dim = ( 4, 9 )
142142
// CHECK-NEXT: lvl = ( 4, 9 )
143-
// CHECK-NEXT: pos[0] : ( 0, 4
144-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
145-
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
146-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
147-
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
143+
// CHECK-NEXT: pos[0] : ( 0, 4 )
144+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
145+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
146+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
147+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
148148
// CHECK-NEXT: ----
149149
//
150150
%10 = call @concat_mix_sparse_dim1(%m42, %sm43cd, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -130,11 +130,11 @@ module {
130130
// CHECK-NEXT: nse = 18
131131
// CHECK-NEXT: dim = ( 4, 9 )
132132
// CHECK-NEXT: lvl = ( 9, 4 )
133-
// CHECK-NEXT: pos[0] : ( 0, 9
134-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
135-
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18
136-
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0
137-
// CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
133+
// CHECK-NEXT: pos[0] : ( 0, 9 )
134+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8 )
135+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18 )
136+
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0 )
137+
// CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1 )
138138
// CHECK-NEXT: ----
139139
//
140140
%12 = call @concat_sparse_sparse_perm_dim1(%sm42ccp, %sm43cd, %sm44dc)
@@ -154,11 +154,11 @@ module {
154154
// CHECK-NEXT: nse = 18
155155
// CHECK-NEXT: dim = ( 4, 9 )
156156
// CHECK-NEXT: lvl = ( 4, 9 )
157-
// CHECK-NEXT: pos[0] : ( 0, 4
158-
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
159-
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
160-
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
161-
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
157+
// CHECK-NEXT: pos[0] : ( 0, 4 )
158+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
159+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18 )
160+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6 )
161+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5 )
162162
// CHECK-NEXT: ----
163163
//
164164
%14 = call @concat_mix_sparse_perm_dim1(%m42, %sm43cdp, %sm44dc)

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ module {
108108
// CHECK-NEXT: nse = 25
109109
// CHECK-NEXT: dim = ( 5, 5 )
110110
// CHECK-NEXT: lvl = ( 5, 5 )
111-
// CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10,
111+
// CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10 )
112112
// CHECK-NEXT: ----
113113
//
114114
sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ module {
9595
// CHECK-NEXT: nse = 32
9696
// CHECK-NEXT: dim = ( 32 )
9797
// CHECK-NEXT: lvl = ( 32 )
98-
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
98+
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
9999
// CHECK-NEXT: ----
100100
//
101101
sparse_tensor.print %0 : tensor<?xbf16, #DenseVector>

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ module {
9696
// CHECK-NEXT: nse = 32
9797
// CHECK-NEXT: dim = ( 32 )
9898
// CHECK-NEXT: lvl = ( 32 )
99-
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
99+
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
100100
// CHECK-NEXT: ----
101101
//
102102
sparse_tensor.print %0 : tensor<?xf16, #DenseVector>

0 commit comments

Comments
 (0)