Skip to content

Commit 3b9bf22

Browse files
newlingkeshavvinayak01
authored andcommitted
Numerical tests: softmax with dynamic reduction size (iree-org#21594)
Additional dynamic-K softmax tests in preparation for pipeline switch --------- Signed-off-by: James Newling <[email protected]> Signed-off-by: keshavvinayak01 <[email protected]>
1 parent a06a068 commit 3b9bf22

File tree

1 file changed

+64
-0
lines changed

1 file changed

+64
-0
lines changed

tests/e2e/regression/linalg_ops_dynamic.mlir

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,3 +66,67 @@ func.func @dynamic_matmul_dynamic_reduction_size_B2_M1_N3_K5() {
6666
check.expect_almost_eq(%observed, %expected, atol 1.0e-04) : tensor<2x1x3xf16>
6767
return
6868
}
69+
70+
71+
// Softmax operation with dynamic reduction size.
72+
// Number of samples: 2. Size of reduction dimension: 4.
73+
func.func @softmax_dynamic_reduction_N2_K4(){
74+
75+
// Computed with numpy, values of ln([[3, 2, 4, 1.],[1, 7, 1, 1]]).
76+
%input = flow.tensor.dynamic_constant dense<
77+
[[1.09861229, 0.69314718, 1.38629436, 0. ],
78+
[0. , 1.94591015, 0. , 0. ]]> : tensor<2x4xf32> -> tensor<2x?xf32>
79+
80+
%expected = flow.tensor.dynamic_constant dense<
81+
[[0.3, 0.2, 0.4, 0.1],
82+
[0.1, 0.7, 0.1, 0.1]]> : tensor<2x4xf32> -> tensor<2x?xf32>
83+
84+
%c_1_index = arith.constant 1 : index
85+
%dim_0 = tensor.dim %input, %c_1_index : tensor<2x?xf32>
86+
%output = tensor.empty(%dim_0) : tensor<2x?xf32>
87+
88+
%sm = linalg.softmax dimension(1) ins(%input : tensor<2x?xf32>)
89+
outs(%output : tensor<2x?xf32>) -> tensor<2x?xf32>
90+
91+
check.expect_almost_eq(%sm, %expected, atol 1.0e-04) : tensor<2x?xf32>
92+
return
93+
}
94+
95+
// Softmax operation with dynamic reduction size.
96+
// Number of samples: 1. Size of reduction dimension: 1.
97+
func.func @softmax_dynamic_reduction_N1_K1(){
98+
99+
%input = flow.tensor.dynamic_constant dense<[[-77.7]]> : tensor<1x1xf32> -> tensor<1x?xf32>
100+
%expected = flow.tensor.dynamic_constant dense<[[1.0]]> : tensor<1x1xf32> -> tensor<1x?xf32>
101+
%c_1_index = arith.constant 1 : index
102+
%dim_0 = tensor.dim %input, %c_1_index : tensor<1x?xf32>
103+
%output = tensor.empty(%dim_0) : tensor<1x?xf32>
104+
%sm = linalg.softmax dimension(1) ins(%input : tensor<1x?xf32>)
105+
outs(%output : tensor<1x?xf32>) -> tensor<1x?xf32>
106+
107+
check.expect_almost_eq(%sm, %expected, atol 1.0e-04) : tensor<1x?xf32>
108+
return
109+
}
110+
111+
112+
// Softmax operation with dynamic reduction size.
113+
// Number of samples: 65. Size of reduction dimension: 1531.
114+
func.func @softmax_dynamic_reduction_N65_K1531(){
115+
116+
%input = flow.tensor.dynamic_constant dense<-3.1415> :
117+
tensor<65x1531xf32> -> tensor<65x?xf32>
118+
119+
// 1/1531 is 0.0006531678641410843
120+
%expected = flow.tensor.dynamic_constant dense<0.0006531678641410843> :
121+
tensor<65x1531xf32> -> tensor<65x?xf32>
122+
123+
%c_1_index = arith.constant 1 : index
124+
%dim_0 = tensor.dim %input, %c_1_index : tensor<65x?xf32>
125+
%output = tensor.empty(%dim_0) : tensor<65x?xf32>
126+
127+
%sm = linalg.softmax dimension(1) ins(%input : tensor<65x?xf32>)
128+
outs(%output : tensor<65x?xf32>) -> tensor<65x?xf32>
129+
130+
check.expect_almost_eq(%sm, %expected, atol 1.0e-04) : tensor<65x?xf32>
131+
return
132+
}

0 commit comments

Comments
 (0)