1- // RUN: onnx-mlir --EmitONNXBasic --printIR %s | FileCheck %s
1+ // RUN: onnx-mlir --EmitONNXIR --printIR %s | FileCheck %s
22<
33 ir_version: 8,
44 opset_import: ["" : 6]
@@ -11,9 +11,9 @@ test_instancenorm_e2e (float[2,3,4,5] input, float[3] scale, float[3] bias) => (
1111// CHECK-LABEL: func.func @main_graph
1212// CHECK-SAME: ([[INPUT:%.+]]: tensor<2x3x4x5xf32> {onnx.name = "input"}, [[SCALE:%.+]]: tensor<3xf32> {onnx.name = "scale"}, [[BIAS:%.+]]: tensor<3xf32> {onnx.name = "bias"}) -> (tensor<2x3x4x5xf32> {onnx.name = "output"}) {
1313// CHECK-DAG: [[AXES:%.+]] = onnx.Constant dense<[1, 2]> : tensor<2xi64>
14- // CHECK-DAG: [[UNSQUEEZE_SCALE:%.+]] = "onnx.Unsqueeze"([[SCALE]], [[AXES]]) : (tensor<3xf32>, tensor<2xi64>) -> tensor<3x1x1xf32>
15- // CHECK-DAG: [[UNSQUEEZE_BIAS:%.+]] = "onnx.Unsqueeze"([[BIAS]], [[AXES]]) : (tensor<3xf32>, tensor<2xi64>) -> tensor<3x1x1xf32>
16- // CHECK: [[OUTPUT:%.+]], [[MEAN:%.+]], [[INV_STD_DEV:%.+]] = "onnx.LayerNormalization"([[INPUT]], [[UNSQUEEZE_SCALE]], [[UNSQUEEZE_BIAS]]) {axis = 2 : si64, epsilon = 1.000000e-02 : f32, stash_type = 1 : si64 } : (tensor<2x3x4x5xf32>, tensor<3x1x1xf32>, tensor<3x1x1xf32>) -> (tensor<2x3x4x5xf32>, none, none)
17- // CHECK: onnx.Return [[OUTPUT]] : tensor<2x3x4x5xf32>
14+ // CHECK-DAG: [[UNSQUEEZE_SCALE:%.+]] = "onnx.Unsqueeze"([[SCALE]], [[AXES]]) {{.*}} : (tensor<3xf32>, tensor<2xi64>) -> tensor<3x1x1xf32>
15+ // CHECK-DAG: [[UNSQUEEZE_BIAS:%.+]] = "onnx.Unsqueeze"([[BIAS]], [[AXES]]) {{.*}} : (tensor<3xf32>, tensor<2xi64>) -> tensor<3x1x1xf32>
16+ // CHECK: [[OUTPUT:%.+]], {{.*}}, {{.*}} = "onnx.LayerNormalization"([[INPUT]], [[UNSQUEEZE_SCALE]], [[UNSQUEEZE_BIAS]]) {axis = 2 : si64, epsilon = {{.*}} : f32{{.*}} } : (tensor<2x3x4x5xf32>, tensor<3x1x1xf32>, tensor<3x1x1xf32>) -> (tensor<2x3x4x5xf32>, none, none)
17+ // CHECK: return [[OUTPUT]] : tensor<2x3x4x5xf32>
1818// CHECK: }
1919
0 commit comments