diff --git a/backends/arm/test/ops/test_ge.py b/backends/arm/test/ops/test_ge.py index 4090d04dc89..c66f6d164b9 100644 --- a/backends/arm/test/ops/test_ge.py +++ b/backends/arm/test/ops/test_ge.py @@ -13,6 +13,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t = Tuple[torch.Tensor] @@ -181,3 +182,55 @@ def test_ge_scalar_u85_INT(test_module): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_ge_tensor_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + GreaterEqual.aten_op_tensor, + GreaterEqual.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_ge_tensor_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + GreaterEqual.aten_op_tensor, + GreaterEqual.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_ge_scalar_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + GreaterEqual.aten_op_scalar, + GreaterEqual.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_ge_scalar_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + GreaterEqual.aten_op_tensor, + GreaterEqual.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_gelu.py b/backends/arm/test/ops/test_gelu.py index 8187ec69dc6..264f6b95e71 100644 --- a/backends/arm/test/ops/test_gelu.py +++ b/backends/arm/test/ops/test_gelu.py @@ -12,6 +12,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t1 = Tuple[torch.Tensor] @@ -125,3 +126,31 @@ def test_gelu_u85_INT(test_data: input_t1): Gelu.aten_op, Gelu.exir_op, ).run() + + +@common.parametrize("test_data", Gelu.test_data) +@common.SkipIfNoModelConverter +def test_gelu_vgf_FP(test_data: input_t1): + approximate, data = test_data() + pipeline = VgfPipeline[input_t1]( + Gelu(approximate), + (data,), + Gelu.aten_op, + Gelu.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", Gelu.test_data) +@common.SkipIfNoModelConverter +def test_gelu_vgf_INT(test_data: input_t1): + approximate, data = test_data() + pipeline = VgfPipeline[input_t1]( + Gelu(approximate), + (data,), + Gelu.aten_op, + Gelu.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_group_norm.py b/backends/arm/test/ops/test_group_norm.py index 248a13e51f8..5fa4cd328de 100644 --- a/backends/arm/test/ops/test_group_norm.py +++ b/backends/arm/test/ops/test_group_norm.py @@ -10,6 +10,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -143,3 +144,56 @@ def test_native_group_norm_u85_INT(test_data): ) pipeline.change_args("run_method_and_compare_outputs", atol=1, qtol=1) pipeline.run() + + +@common.parametrize( + "test_data", + test_data_suite, + xfails={ + "randn_1_12_8_6_groups_12": "MLETORCH-925: Fix numerical issue", + "rand_6_8_10_12_groups_1": "MLETORCH-925: Fix numerical issue", + "rand_6_8_10_12_groups_4_no_affine": "MLETORCH-925: Fix numerical issue", + "rand_6_8_10_12_groups_8": "MLETORCH-925: Fix numerical issue", + }, + strict=False, +) +@common.SkipIfNoModelConverter +def test_native_group_norm_vgf_FP(test_data): + aten_op = "torch.ops.aten.group_norm.default" + exir_op = "executorch_exir_dialects_edge__ops_aten_native_group_norm_default" + model, inp = test_data + pipeline = VgfPipeline[input_t]( + inp, + model, + aten_op=aten_op, + exir_op=exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize( + "test_data", + test_data_suite, + xfails={ + "randn_1_12_8_6_groups_12": "MLETORCH-925: Fix numerical issue", + "rand_6_8_10_12_groups_1": "MLETORCH-925: Fix numerical issue", + "rand_6_8_10_12_groups_4_no_affine": "MLETORCH-925: Fix numerical issue", + "rand_6_8_10_12_groups_8": "MLETORCH-925: Fix numerical issue", + }, + strict=False, +) +@common.SkipIfNoModelConverter +def test_native_group_norm_vgf_INT(test_data): + aten_op = "torch.ops.aten.sub.Tensor" + exir_op = "executorch_exir_dialects_edge__ops_aten_native_group_norm_default" + model, inp = test_data + pipeline = VgfPipeline[input_t]( + inp, + model, + aten_op=aten_op, + exir_op=exir_op, + tosa_version="TOSA-1.0+INT", + atol=0.1, # TODO: "MLETORCH-925: Fix numerical issue for aten.native_group_norm" + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_gt.py b/backends/arm/test/ops/test_gt.py index 76e18444185..83c85e5f9fc 100644 --- a/backends/arm/test/ops/test_gt.py +++ b/backends/arm/test/ops/test_gt.py @@ -13,6 +13,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -186,3 +187,55 @@ def test_gt_scalar_u85_INT(test_module): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_gt_tensor_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + Greater.aten_op_tensor, + Greater.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_gt_scalar_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + Greater.aten_op_scalar, + Greater.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_gt_tensor_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + Greater.aten_op_tensor, + Greater.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_gt_scalar_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + Greater.aten_op_tensor, + Greater.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_hardsigmoid.py b/backends/arm/test/ops/test_hardsigmoid.py index 6c928b4a37e..5f591c15617 100644 --- a/backends/arm/test/ops/test_hardsigmoid.py +++ b/backends/arm/test/ops/test_hardsigmoid.py @@ -14,6 +14,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) aten_op = "torch.ops.aten.hardsigmoid.default" @@ -87,3 +88,25 @@ def test_hardsigmoid_u85_INT(test_data: torch.Tensor): use_to_edge_transform_and_lower=True, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_hardsigmoid_vgf_FP(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Hardsigmoid(), (test_data(),), aten_op, exir_op=[], tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_hardsigmoid_vgf_INT(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Hardsigmoid(), + (test_data(),), + aten_op, + exir_op=[], + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_hardswish.py b/backends/arm/test/ops/test_hardswish.py index bfd559fc1d7..00db0cb296b 100644 --- a/backends/arm/test/ops/test_hardswish.py +++ b/backends/arm/test/ops/test_hardswish.py @@ -14,6 +14,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) aten_op = "torch.ops.aten.hardswish.default" @@ -77,3 +78,25 @@ def test_hardswish_u85_INT(test_data): run_on_fvp=True, use_to_edge_transform_and_lower=True, ).run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_hardswish_vgf_FP(test_data): + pipeline = VgfPipeline[input_t1]( + Hardswish(), (test_data(),), aten_op, exir_op, tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_hardswish_vgf_INT(test_data): + pipeline = VgfPipeline[input_t1]( + Hardswish(), + (test_data(),), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_hardtanh.py b/backends/arm/test/ops/test_hardtanh.py index 28f44c58a74..28f7e717351 100644 --- a/backends/arm/test/ops/test_hardtanh.py +++ b/backends/arm/test/ops/test_hardtanh.py @@ -16,6 +16,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) test_data_suite = { @@ -86,3 +87,25 @@ def test_hardtanh_u85_INT(test_data: torch.Tensor): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_hardtanh_vgf_FP(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t]( + HardTanh(), (test_data(),), aten_op, exir_op, tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_hardtanh_vgf_INT(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t]( + HardTanh(), + (test_data(),), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_index_select.py b/backends/arm/test/ops/test_index_select.py index a3e655db0ce..bb28d66f7cf 100644 --- a/backends/arm/test/ops/test_index_select.py +++ b/backends/arm/test/ops/test_index_select.py @@ -9,9 +9,12 @@ import pytest import torch + +from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import ( TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -115,3 +118,49 @@ def test_index_select_tosa_INT_rand(test_data: input_params): "run_method_and_compare_outputs", inputs=test_input, atol=0.9, rtol=0.2, qtol=1 ) pipeline.run() + + +@pytest.mark.parametrize("test_data", list(test_data.values())) +@common.SkipIfNoModelConverter +def test_index_select_vgf_FP(test_data: input_params): + op, inp = test_data + pipeline = VgfPipeline[input_params]( + op, + inp, + op.aten_op, + op.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@pytest.mark.parametrize("test_data", list(test_data.values())[:-1]) +@common.SkipIfNoModelConverter +def test_index_select_vgf_INT(test_data: input_params): + op, inp = test_data + pipeline = VgfPipeline[input_params]( + op, + inp, + op.aten_op, + op.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@pytest.mark.parametrize("test_data", list(test_data.values())[-1:]) +@common.SkipIfNoModelConverter +def test_index_select_vgf_INT_rand(test_data: input_params): + op, inp = test_data + pipeline = VgfPipeline[input_params]( + op, + inp, + op.aten_op, + op.exir_op, + tosa_version="TOSA-1.0+INT", + ) + # TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests + # pipeline.change_args( + # "run_method_and_compare_outputs", inputs=test_input, atol=0.9, rtol=0.2, qtol=1 + # ) + pipeline.run() diff --git a/backends/arm/test/ops/test_layer_norm.py b/backends/arm/test/ops/test_layer_norm.py index fddfd6af2ee..2c9b83dc7e7 100644 --- a/backends/arm/test/ops/test_layer_norm.py +++ b/backends/arm/test/ops/test_layer_norm.py @@ -12,6 +12,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -112,3 +113,29 @@ def test_native_layer_norm_u85_INT(test_data): symmetric_io_quantization=True, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_native_layer_norm_vgf_FP(test_data): + test_input, model = test_data() + pipeline = VgfPipeline[input_t]( + model, + test_input, + "torch.ops.aten.layer_norm.default", + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_native_layer_norm_vgf_INT(test_data): + test_input, model = test_data() + pipeline = VgfPipeline[input_t]( + model, + test_input, + "torch.ops.aten.sub.Tensor", + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_le.py b/backends/arm/test/ops/test_le.py index f5773713d9c..6cb185ecb92 100644 --- a/backends/arm/test/ops/test_le.py +++ b/backends/arm/test/ops/test_le.py @@ -13,6 +13,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -189,3 +190,55 @@ def test_le_scalar_u85_INT(test_module): use_to_edge_transform_and_lower=True, ) pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_le_tensor_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_tensor, + LessEqual.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_le_tensor_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_tensor, + LessEqual.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_le_scalar_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_scalar, + LessEqual.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_le_scalar_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_tensor, + LessEqual.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_leaky_relu.py b/backends/arm/test/ops/test_leaky_relu.py index 5be1a600150..c18255a73c0 100644 --- a/backends/arm/test/ops/test_leaky_relu.py +++ b/backends/arm/test/ops/test_leaky_relu.py @@ -12,6 +12,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) aten_op = "torch.ops.aten.leaky_relu.default" @@ -92,3 +93,35 @@ def test_leaky_relu_u85_INT(test_data): ) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.run() + + +@common.parametrize("test_data", LeakyReLU.test_data) +@common.SkipIfNoModelConverter +def test_leaky_relu_vgf_FP(test_data): + data, slope = test_data() + pipeline = VgfPipeline[input_t1]( + LeakyReLU(slope), + data, + [], + use_to_edge_transform_and_lower=True, + tosa_version="TOSA-1.0+FP", + ) + pipeline.add_stage_after( + "to_edge_transform_and_lower", pipeline.tester.check_not, [aten_op] + ) + pipeline.run() + + +@common.parametrize("test_data", LeakyReLU.test_data) +@common.SkipIfNoModelConverter +def test_leaky_relu_vgf_INT(test_data): + data, slope = test_data() + pipeline = VgfPipeline[input_t1]( + LeakyReLU(slope), + data, + [], + use_to_edge_transform_and_lower=True, + tosa_version="TOSA-1.0+INT", + ) + pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) + pipeline.run() diff --git a/backends/arm/test/ops/test_linalg_vector_norm.py b/backends/arm/test/ops/test_linalg_vector_norm.py index 8cd6c44ecab..1777cffb0a7 100644 --- a/backends/arm/test/ops/test_linalg_vector_norm.py +++ b/backends/arm/test/ops/test_linalg_vector_norm.py @@ -13,6 +13,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t = Tuple[torch.Tensor] @@ -125,3 +126,37 @@ def test_vector_norm_u85_INT_fvp(test_module): ) pipeline.pop_stage("check_not.exir") pipeline.run() + + +@common.parametrize("test_module", test_modules) +@common.SkipIfNoModelConverter +def test_vector_norm_vgf_FP(test_module): + model, input_tensor = test_module + # FP VGF + aten_op = "torch.ops.aten.linalg_vector_norm.default" + exir_op = "executorch_exir_dialects_edge__ops_aten_linalg_vector_norm_default" + pipeline = VgfPipeline[input_t]( + model, + input_tensor, + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_modules) +@common.SkipIfNoModelConverter +def test_vector_norm_vgf_INT(test_module): + model, input_tensor = test_module + # Should not found this op + exir_op = "executorch_exir_dialects_edge__ops_aten_linalg_vector_norm_default" + + pipeline = VgfPipeline[input_t]( + model, + input_tensor, + aten_op_q_decomposed_q, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_linear.py b/backends/arm/test/ops/test_linear.py index b35d108a8a3..57ce490dae8 100644 --- a/backends/arm/test/ops/test_linear.py +++ b/backends/arm/test/ops/test_linear.py @@ -18,6 +18,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) aten_op = "torch.ops.aten.linear.default" @@ -218,3 +219,42 @@ def test_linear_u85_INT(test_data: torch.Tensor): per_channel_quantization=per_channel_quantization, use_to_edge_transform_and_lower=True, ).run() + + +@common.parametrize("test_data", test_data_rank1_FP | test_data_rank4_FP) +@common.SkipIfNoModelConverter +def test_linear_vgf_FP(test_data: torch.Tensor): + test_data, out_features, has_bias = test_data() + in_features = test_data.shape[-1] + pipeline = VgfPipeline[input_t1]( + Linear( + in_features=in_features, + out_features=out_features, + bias=has_bias, + ), + (test_data,), + aten_op=aten_op, + exir_op=[], + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_rank1_INT | test_data_rank4_INT) +@common.SkipIfNoModelConverter +def test_linear_vgf_INT(test_data: torch.Tensor): + test_data, out_features, has_bias, per_channel_quantization = test_data() + in_features = test_data.shape[-1] + pipeline = VgfPipeline[input_t1]( + Linear( + in_features=in_features, + out_features=out_features, + bias=has_bias, + ), + (test_data,), + aten_op=aten_op, + exir_op=[], + tosa_version="TOSA-1.0+INT", + per_channel_quantization=per_channel_quantization, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_log.py b/backends/arm/test/ops/test_log.py index d24052c8793..1ed5c57f1ab 100644 --- a/backends/arm/test/ops/test_log.py +++ b/backends/arm/test/ops/test_log.py @@ -16,6 +16,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) aten_op = "torch.ops.aten.log.default" @@ -73,3 +74,29 @@ def test_log_u85_INT(test_data: input_t1): exir_op, run_on_fvp=True, ).run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_log_vgf_FP(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + Log(), + (test_data(),), + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_log_vgf_INT(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + Log(), + (test_data(),), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_logical.py b/backends/arm/test/ops/test_logical.py index de90077d71f..2b160ce7b50 100644 --- a/backends/arm/test/ops/test_logical.py +++ b/backends/arm/test/ops/test_logical.py @@ -13,6 +13,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -80,6 +81,11 @@ def forward(self, tensor: torch.Tensor): return torch.logical_not(tensor) +################# +## logical_and ## +################# + + @common.parametrize("test_data", And().test_data) def test_logical_and_tosa_FP(test_data: input_t2): pipeline = TosaPipelineFP[input_t2]( @@ -141,6 +147,39 @@ def test_logical_and_u85_INT(test_data: input_t2): pipeline.run() +@common.parametrize("test_data", And().test_data) +@common.SkipIfNoModelConverter +def test_logical_and_vgf_FP(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + And(), + test_data(), + And().aten_op, + And().exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", And().test_data) +@common.SkipIfNoModelConverter +def test_logical_and_vgf_INT(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + And(), + test_data(), + And().aten_op, + And().exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("quantize") + pipeline.pop_stage("check.quant_nodes") + pipeline.run() + + +################# +## logical_xor ## +################# + + @common.parametrize("test_data", Xor().test_data) def test_logical_xor_tosa_FP(test_data: input_t2): pipeline = TosaPipelineFP[input_t2]( @@ -202,6 +241,39 @@ def test_logical_xor_u85_INT(test_data: input_t2): pipeline.run() +@common.parametrize("test_data", Xor().test_data) +@common.SkipIfNoModelConverter +def test_logical_xor_vgf_FP(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + Xor(), + test_data(), + Xor().aten_op, + Xor().exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", Xor().test_data) +@common.SkipIfNoModelConverter +def test_logical_xor_vgf_INT(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + Xor(), + test_data(), + Xor().aten_op, + Xor().exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("quantize") + pipeline.pop_stage("check.quant_nodes") + pipeline.run() + + +################ +## logical_or ## +################ + + @common.parametrize("test_data", Or().test_data) def test_logical_or_tosa_FP(test_data: input_t2): pipeline = TosaPipelineFP[input_t2]( @@ -263,6 +335,39 @@ def test_logical_or_u85_INT(test_data: input_t2): pipeline.run() +@common.parametrize("test_data", Or().test_data) +@common.SkipIfNoModelConverter +def test_logical_or_vgf_FP(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + Or(), + test_data(), + Or().aten_op, + Or().exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", Or().test_data) +@common.SkipIfNoModelConverter +def test_logical_or_vgf_INT(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + Or(), + test_data(), + Or().aten_op, + Or().exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("quantize") + pipeline.pop_stage("check.quant_nodes") + pipeline.run() + + +################# +## logical_not ## +################# + + @common.parametrize("test_data", Not().test_data) def test_logical_not_tosa_FP(test_data: input_t2): pipeline = TosaPipelineFP[input_t2]( @@ -322,3 +427,31 @@ def test_logical_not_u85_INT(test_data: input_t2): pipeline.pop_stage("quantize") pipeline.pop_stage("check.quant_nodes") pipeline.run() + + +@common.parametrize("test_data", Not().test_data) +@common.SkipIfNoModelConverter +def test_logical_not_vgf_FP(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + Not(), + test_data(), + Not().aten_op, + Not().exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", Not().test_data) +@common.SkipIfNoModelConverter +def test_logical_not_vgf_INT(test_data: input_t2): + pipeline = VgfPipeline[input_t2]( + Not(), + test_data(), + Not().aten_op, + Not().exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("quantize") + pipeline.pop_stage("check.quant_nodes") + pipeline.run() diff --git a/backends/arm/test/ops/test_logsoftmax.py b/backends/arm/test/ops/test_logsoftmax.py index 27106bc40cc..b1b934fbcc8 100644 --- a/backends/arm/test/ops/test_logsoftmax.py +++ b/backends/arm/test/ops/test_logsoftmax.py @@ -14,6 +14,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) aten_op = "torch.ops.aten.log_softmax.default" # Used for checking that we do not have log_softmax in the graph @@ -103,3 +104,33 @@ def test_log_softmax_u85_INT(test_data): pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() + + +@common.parametrize("test_data", LogSoftmax.test_data) +@common.SkipIfNoModelConverter +def test_log_softmax_vgf_FP(test_data): + data, dim = test_data() + pipeline = VgfPipeline[input_t1]( + LogSoftmax(dim), data, [], [], tosa_version="TOSA-1.0+FP" + ) + pipeline.add_stage_after( + "to_edge_transform_and_lower", pipeline.tester.check_not, [aten_op] + ) + pipeline.run() + + +@common.parametrize("test_data", LogSoftmax.test_data) +@common.SkipIfNoModelConverter +def test_log_softmax_vgf_INT(test_data): + data, dim = test_data() + pipeline = VgfPipeline[input_t1]( + LogSoftmax(dim), + data, + [], + [], + tosa_version="TOSA-1.0+INT", + ) + pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) + # TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests + # pipeline.change_args("run_method_and_compare_outputs", qtol=1) + pipeline.run() diff --git a/backends/arm/test/ops/test_lshift.py b/backends/arm/test/ops/test_lshift.py index 6bd2a9202cd..bab364a4528 100644 --- a/backends/arm/test/ops/test_lshift.py +++ b/backends/arm/test/ops/test_lshift.py @@ -14,6 +14,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) scalar_input_t = tuple[torch.Tensor, int] @@ -67,8 +68,13 @@ def forward(self, x: torch.Tensor, shift: torch.Tensor): return x.bitwise_left_shift(shift) +################## +## LshiftScalar ## +################## + + @common.parametrize("test_data", LshiftScalar.test_data) -def test_lshift_scalar_tosa_FP_scalar(test_data): +def test_bitwise_left_shift_scalar_tosa_FP_scalar(test_data): TosaPipelineFP[scalar_input_t]( LshiftScalar(), test_data, @@ -117,8 +123,40 @@ def test_bitwise_left_shift_tensor_u85_INT_scalar(test_data): pipeline.run() +@common.parametrize("test_data", LshiftScalar.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_left_shift_scalar_vgf_FP_scalar(test_data: scalar_input_t): + pipeline = VgfPipeline[scalar_input_t]( + LshiftScalar(), + test_data, + LshiftScalar.torch_op_FP, + LshiftScalar.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", LshiftScalar.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_left_shift_tensor_vgf_INT_scalar(test_data: scalar_input_t): + pipeline = VgfPipeline[scalar_input_t]( + LshiftScalar(), + test_data, + LshiftScalar.torch_op_INT, + LshiftScalar.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("check.quant_nodes") + pipeline.run() + + +################## +## LshiftTensor ## +################## + + @common.parametrize("test_data", LshiftTensor.test_data) -def test_lshift_scalar_tosa_FP(test_data): +def test_bitwise_left_shift_tensor_tosa_FP(test_data): TosaPipelineFP[scalar_input_t]( LshiftTensor(), test_data, @@ -165,3 +203,30 @@ def test_bitwise_left_shift_tensor_u85_INT(test_data): ) pipeline.pop_stage("check.quant_nodes") pipeline.run() + + +@common.parametrize("test_data", LshiftTensor.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_left_shift_tensor_vgf_FP(test_data: tensor_input_t): + pipeline = VgfPipeline[tensor_input_t]( + LshiftTensor(), + test_data, + LshiftTensor.torch_op, + LshiftTensor.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", LshiftTensor.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_left_shift_tensor_vgf_INT(test_data: tensor_input_t): + pipeline = VgfPipeline[tensor_input_t]( + LshiftTensor(), + test_data, + LshiftTensor.torch_op, + LshiftTensor.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("check.quant_nodes") + pipeline.run() diff --git a/backends/arm/test/ops/test_lt.py b/backends/arm/test/ops/test_lt.py index 3193ef83e65..86d903e3f88 100644 --- a/backends/arm/test/ops/test_lt.py +++ b/backends/arm/test/ops/test_lt.py @@ -13,6 +13,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -186,3 +187,55 @@ def test_lt_scalar_u85_INT(test_module): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_lt_tensor_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessThan.aten_op_tensor, + LessThan.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_lt_scalar_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessThan.aten_op_scalar, + LessThan.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_lt_tensor_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessThan.aten_op_tensor, + LessThan.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_lt_scalar_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module(), + test_module().get_inputs(), + LessThan.aten_op_tensor, + LessThan.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_masked_fill.py b/backends/arm/test/ops/test_masked_fill.py index 80c0c4b0d8e..3aab19925ec 100644 --- a/backends/arm/test/ops/test_masked_fill.py +++ b/backends/arm/test/ops/test_masked_fill.py @@ -14,6 +14,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -142,3 +143,23 @@ def test_masked_fill_scalar_u85_INT(test_module): exir_ops=exir_op, ) pipeline.run() + + +@common.parametrize("test_module", test_modules) +@common.SkipIfNoModelConverter +def test_masked_fill_scalar_vgf_FP(test_module): + module, inputs = test_module() + pipeline = VgfPipeline[input_t]( + module, inputs, aten_op=[], tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_module", test_modules) +@common.SkipIfNoModelConverter +def test_masked_fill_scalar_vgf_INT(test_module): + module, inputs = test_module() + pipeline = VgfPipeline[input_t]( + module, inputs, aten_op=[], tosa_version="TOSA-1.0+INT" + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_matmul.py b/backends/arm/test/ops/test_matmul.py index 17356f98420..d1a21684325 100644 --- a/backends/arm/test/ops/test_matmul.py +++ b/backends/arm/test/ops/test_matmul.py @@ -12,6 +12,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) aten_op_mm = "torch.ops.aten.matmul.default" @@ -195,3 +196,73 @@ def test_matmul_combo_u85_INT(test_data: input_t1): use_to_edge_transform_and_lower=True, ) pipeline.run() + + +@common.parametrize("test_data", MatMul.test_data_generators) +@common.SkipIfNoModelConverter +def test_matmul_vgf_FP(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + MatMul(), test_data(), aten_op_mm, exir_op_mm, tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", MatMulSingleInput.test_data_generators) +@common.SkipIfNoModelConverter +def test_matmul_single_input_vgf_FP(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + MatMulSingleInput(), + test_data(), + aten_op_mm, + exir_op_mm, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", MatMulCombo.test_data_generators) +@common.SkipIfNoModelConverter +def test_matmul_combo_vgf_FP(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + MatMulCombo(), test_data(), aten_op_mm, exir_op_mm, tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", MatMul.test_data_generators) +@common.SkipIfNoModelConverter +def test_matmul_vgf_INT(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + MatMul(), + test_data(), + aten_op_mm, + exir_op_mm, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_data", MatMulSingleInput.test_data_generators) +@common.SkipIfNoModelConverter +def test_matmul_single_input_vgf_INT(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + MatMulSingleInput(), + test_data(), + aten_op_mm, + exir_op_mm, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_data", MatMulCombo.test_data_generators) +@common.SkipIfNoModelConverter +def test_matmul_combo_vgf_INT(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + MatMulCombo(), + test_data(), + aten_op_mm, + exir_op_mm, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_max_pool.py b/backends/arm/test/ops/test_max_pool.py index 488dda145d0..6b75c2b7d0a 100644 --- a/backends/arm/test/ops/test_max_pool.py +++ b/backends/arm/test/ops/test_max_pool.py @@ -17,6 +17,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) test_data_suite = { @@ -274,3 +275,94 @@ def test_max_pool2d_tosa_INT_dilation(test_data): symmetric_io_quantization=True, ) pipeline.run() + + +# VGF tests +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_max_pool2d_vgf_FP(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = VgfPipeline[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_max_pool2d_vgf_INT(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = VgfPipeline[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite_mult_batches) +@common.SkipIfNoModelConverter +def test_max_pool2d_vgf_FP_mult_batches(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = VgfPipeline[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite_mult_batches) +@common.SkipIfNoModelConverter +def test_max_pool2d_vgf_INT_mult_batches(test_data: torch.Tensor): + test_data, model_params = test_data() + pipeline = VgfPipeline[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_data", dilation_test_data) +@common.SkipIfNoModelConverter +def test_max_pool2d_vgf_FP_dilation(test_data: torch.Tensor): + """ + VGF FP pipeline with dilation > 1 (and dilation=1 sanity cases). + """ + test_data, model_params = test_data() + pipeline = VgfPipeline[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", dilation_test_data) +@common.SkipIfNoModelConverter +def test_max_pool2d_vgf_INT_dilation(test_data: torch.Tensor): + """ + VGF INT pipeline with dilation > 1 (and dilation=1 sanity cases). + """ + test_data, model_params = test_data() + pipeline = VgfPipeline[input_t1]( + MaxPool2d(*model_params), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_maximum.py b/backends/arm/test/ops/test_maximum.py index 5b7dd7fb520..eb0d4b86efc 100644 --- a/backends/arm/test/ops/test_maximum.py +++ b/backends/arm/test/ops/test_maximum.py @@ -15,6 +15,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) test_t = tuple[torch.Tensor, torch.Tensor] @@ -73,3 +74,27 @@ def test_maximum_u85_INT(test_data: Tuple): aten_op, run_on_fvp=True, ).run() + + +@common.parametrize("test_data", Maximum.test_parameters) +@common.SkipIfNoModelConverter +def test_maximum_vgf_FP(test_data: Tuple): + pipeline = VgfPipeline[test_t]( + Maximum(), + test_data(), + aten_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", Maximum.test_parameters) +@common.SkipIfNoModelConverter +def test_maximum_vgf_INT(test_data: Tuple): + pipeline = VgfPipeline[test_t]( + Maximum(), + test_data(), + aten_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_mean_dim.py b/backends/arm/test/ops/test_mean_dim.py index 2685c047222..1483b5d82b6 100644 --- a/backends/arm/test/ops/test_mean_dim.py +++ b/backends/arm/test/ops/test_mean_dim.py @@ -12,6 +12,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t = tuple[torch.Tensor] @@ -83,6 +84,33 @@ def test_adaptive_avg_pool2d_u85_INT(test_data): ).run() +@common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) +@common.SkipIfNoModelConverter +def test_adaptive_avg_pool2d_vgf_FP(test_data): + pipeline = VgfPipeline[input_t]( + AdaptiveAveragePool2d(), + test_data(), + AdaptiveAveragePool2d.aten_op, + AdaptiveAveragePool2d.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) +@common.SkipIfNoModelConverter +def test_adaptive_avg_pool2d_vgf_INT(test_data): + pipeline = VgfPipeline[input_t]( + AdaptiveAveragePool2d(), + test_data(), + AdaptiveAveragePool2d.aten_op, + AdaptiveAveragePool2d.exir_op, + symmetric_io_quantization=True, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + class MeanDim(torch.nn.Module): test_data_suite: dict[str, tuple] = { "rank_1_keepdim": lambda: ( @@ -296,3 +324,31 @@ def test_mean_dim_u85_INT(test_data): symmetric_io_quantization=True, ) pipeline.run() + + +@common.parametrize("test_data", MeanDim.test_data_suite) +@common.SkipIfNoModelConverter +def test_mean_dim_vgf_FP(test_data): + test_data_val, dim, keep_dim = test_data() + pipeline = VgfPipeline[input_t]( + MeanDim(dim, keep_dim), + (test_data_val,), + MeanDim.torch_op, + MeanDim.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", MeanDim.test_data_suite) +@common.SkipIfNoModelConverter +def test_mean_dim_vgf_INT(test_data): + test_data_val, dim, keep_dim = test_data() + pipeline = VgfPipeline[input_t]( + MeanDim(dim, keep_dim), + (test_data_val,), + [], # Might be sum, avgpool, or both + symmetric_io_quantization=True, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_minimum.py b/backends/arm/test/ops/test_minimum.py index 273dee31adc..88ae2c2b8da 100644 --- a/backends/arm/test/ops/test_minimum.py +++ b/backends/arm/test/ops/test_minimum.py @@ -15,6 +15,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) test_t = tuple[torch.Tensor, torch.Tensor] @@ -73,3 +74,22 @@ def test_minimum_u85_INT(test_data: Tuple): aten_op, run_on_fvp=True, ).run() + + +@common.parametrize("test_data", Minimum.test_parameters) +@common.SkipIfNoModelConverter +def test_minimum_vgf_FP(test_data: test_t): + pipeline = VgfPipeline[test_t](Minimum(), test_data(), aten_op) + pipeline.run() + + +@common.parametrize("test_data", Minimum.test_parameters) +@common.SkipIfNoModelConverter +def test_minimum_vgf_INT(test_data: test_t): + pipeline = VgfPipeline[test_t]( + Minimum(), + test_data(), + aten_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_mm.py b/backends/arm/test/ops/test_mm.py index 6a73ca3db59..1b76baaeff0 100644 --- a/backends/arm/test/ops/test_mm.py +++ b/backends/arm/test/ops/test_mm.py @@ -14,6 +14,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) test_t = tuple[torch.Tensor, torch.Tensor] @@ -66,3 +67,25 @@ def test_mm_u85_INT(test_data: Tuple): MM.exir_op, run_on_fvp=True, ).run() + + +@common.parametrize("test_data", MM.test_data_generators) +@common.SkipIfNoModelConverter +def test_mm_vgf_FP(test_data: Tuple): + pipeline = VgfPipeline[test_t]( + MM(), test_data(), MM.aten_op, MM.exir_op, tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", MM.test_data_generators) +@common.SkipIfNoModelConverter +def test_mm_vgf_INT(test_data: Tuple): + pipeline = VgfPipeline[test_t]( + MM(), + test_data(), + MM.aten_op, + MM.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_mul.py b/backends/arm/test/ops/test_mul.py index 122b44cf154..b0b7f5f4b7d 100644 --- a/backends/arm/test/ops/test_mul.py +++ b/backends/arm/test/ops/test_mul.py @@ -16,6 +16,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x @@ -241,3 +242,45 @@ def test_mul_tensor_u85_INT_int32(test_data: torch.Tensor): ) pipeline.pop_stage("check.quant_nodes") pipeline.run() + + +@common.parametrize( + "test_data", test_data_suite | test_data_suite_2 | test_data_suite_int32 +) +@common.SkipIfNoModelConverter +def test_mul_tensor_vgf_FP(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Mul(), + test_data(), + aten_op, + exir_op=[], + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite | test_data_suite_2) +@common.SkipIfNoModelConverter +def test_mul_tensor_vgf_INT(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Mul(), + test_data(), + aten_op, + exir_op=[], + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite_int32) +@common.SkipIfNoModelConverter +def test_mul_tensor_vgf_INT_int32(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Mul(), + test_data(), + aten_op, + exir_op=[], + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("check.quant_nodes") + pipeline.run() diff --git a/backends/arm/test/ops/test_ne.py b/backends/arm/test/ops/test_ne.py index 356886837e2..60f07ad9fdd 100644 --- a/backends/arm/test/ops/test_ne.py +++ b/backends/arm/test/ops/test_ne.py @@ -13,6 +13,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -194,3 +195,55 @@ def test_ne_scalar_u85_INT(test_module): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_ne_tensor_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module, + test_module.get_inputs(), + NotEqual.aten_op_Tensor, + NotEqual.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.SkipIfNoModelConverter +def test_ne_tensor_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module, + test_module.get_inputs(), + NotEqual.decomposed_ops, + NotEqual.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_ne_scalar_vgf_FP(test_module): + pipeline = VgfPipeline[input_t]( + test_module, + test_module.get_inputs(), + NotEqual.aten_op_Scalar, + NotEqual.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_scalar) +@common.SkipIfNoModelConverter +def test_ne_scalar_vgf_INT(test_module): + pipeline = VgfPipeline[input_t]( + test_module, + test_module.get_inputs(), + NotEqual.decomposed_ops, + NotEqual.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_neg.py b/backends/arm/test/ops/test_neg.py index 272e79e6403..395a4815b62 100644 --- a/backends/arm/test/ops/test_neg.py +++ b/backends/arm/test/ops/test_neg.py @@ -13,6 +13,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t1 = Tuple[torch.Tensor] @@ -64,3 +65,25 @@ def test_neg_u85_INT(test_data: input_t1): Neg(), test_data, Neg.aten_op, Neg.exir_op, run_on_fvp=True ) pipeline.run() + + +@common.parametrize("test_data", Neg.test_data) +@common.SkipIfNoModelConverter +def test_neg_vgf_FP(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + Neg(), test_data, Neg.aten_op, Neg.exir_op, tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", Neg.test_data) +@common.SkipIfNoModelConverter +def test_neg_vgf_INT(test_data: input_t1): + pipeline = VgfPipeline[input_t1]( + Neg(), + test_data, + Neg.aten_op, + Neg.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_ones.py b/backends/arm/test/ops/test_ones.py index c115e34d595..18204a8eaaa 100644 --- a/backends/arm/test/ops/test_ones.py +++ b/backends/arm/test/ops/test_ones.py @@ -12,6 +12,7 @@ OpNotSupportedPipeline, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t = tuple[torch.Tensor] @@ -114,3 +115,27 @@ def test_ones_tosa_INT_not_delegated(test_data: test_data_t): OnesAdd(*init_data), input_data(), non_delegated_ops={}, quantize=True ) pipeline.run() + + +@common.parametrize("test_data", OnesAdd.test_data) +@common.SkipIfNoModelConverter +def test_ones_vgf_FP(test_data: test_data_t): + input_data, init_data = test_data + pipeline = VgfPipeline[input_t]( + OnesAdd(*init_data), input_data(), OnesAdd.aten_op, tosa_version="TOSA-1.0+FP" + ) + pipeline.run() + + +@common.parametrize("test_data", OnesAdd.test_data) +@common.SkipIfNoModelConverter +def test_ones_vgf_INT(test_data: test_data_t): + input_data, init_data = test_data + pipeline = VgfPipeline[input_t]( + OnesAdd(*init_data), + input_data(), + OnesAdd.aten_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("check.quant_nodes") + pipeline.run() diff --git a/backends/arm/test/ops/test_permute.py b/backends/arm/test/ops/test_permute.py index 1e043db550f..57f7f9603a1 100644 --- a/backends/arm/test/ops/test_permute.py +++ b/backends/arm/test/ops/test_permute.py @@ -17,6 +17,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) from torchvision.ops import Permute @@ -104,3 +105,31 @@ def test_permute_u85_INT(test_data: torch.Tensor): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_permute_vgf_FP(test_data): + test_data, dims = test_data() + pipeline = VgfPipeline[input_t1]( + SimplePermute(dims=dims), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_permute_vgf_INT(test_data): + test_data, dims = test_data() + pipeline = VgfPipeline[input_t1]( + SimplePermute(dims=dims), + (test_data,), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_pow.py b/backends/arm/test/ops/test_pow.py index 74c37195733..016c3e97265 100644 --- a/backends/arm/test/ops/test_pow.py +++ b/backends/arm/test/ops/test_pow.py @@ -13,6 +13,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -102,6 +103,19 @@ def test_pow_tensor_tensor_tosa_FP(test_data: Pow_TensorTensor.input_t): pipeline.run() +@common.parametrize("test_data", Pow_TensorTensor.test_data, x_fail, strict=False) +@common.SkipIfNoModelConverter +def test_pow_tensor_tensor_vgf_FP(test_data: Pow_TensorTensor.input_t): + pipeline = VgfPipeline[Pow_TensorTensor.input_t]( + Pow_TensorTensor(), + test_data(), + Pow_TensorTensor.aten_op, + Pow_TensorTensor.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + x_fail = { "exp_minus_three": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", "exp_minus_one": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", @@ -162,3 +176,31 @@ def test_pow_tensor_scalar_u85_INT(test_data: Pow_TensorScalar.input_t): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_data", Pow_TensorScalar.test_data, x_fail, strict=False) +@common.SkipIfNoModelConverter +def test_pow_tensor_scalar_vgf_FP(test_data: Pow_TensorScalar.input_t): + base, exp = test_data() + pipeline = VgfPipeline[Pow_TensorScalar.input_t]( + Pow_TensorScalar(exp), + (base,), + Pow_TensorScalar.aten_op, + Pow_TensorScalar.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", Pow_TensorScalar.test_data, x_fail, strict=False) +@common.SkipIfNoModelConverter +def test_pow_tensor_scalar_vgf_INT(test_data: Pow_TensorScalar.input_t): + base, exp = test_data() + pipeline = VgfPipeline[Pow_TensorScalar.input_t]( + Pow_TensorScalar(exp), + (base,), + Pow_TensorScalar.aten_op, + Pow_TensorScalar.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_reciprocal.py b/backends/arm/test/ops/test_reciprocal.py index dbc489aef2e..78edbb980e8 100644 --- a/backends/arm/test/ops/test_reciprocal.py +++ b/backends/arm/test/ops/test_reciprocal.py @@ -15,6 +15,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t1 = Tuple[torch.Tensor] # Input x, Input y @@ -87,3 +88,27 @@ def test_reciprocal_u85_INT(test_data: torch.Tensor): symmetric_io_quantization=True, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_reciprocal_vgf_FP(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Reciprocal(), + (test_data(),), + aten_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_reciprocal_vgf_INT(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Reciprocal(), + (test_data(),), + aten_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_relu.py b/backends/arm/test/ops/test_relu.py index 2babf8963f7..0b29bc24e75 100644 --- a/backends/arm/test/ops/test_relu.py +++ b/backends/arm/test/ops/test_relu.py @@ -15,6 +15,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t1 = Tuple[torch.Tensor] # Input x @@ -86,3 +87,29 @@ def test_relu_u85_INT(test_data: torch.Tensor): run_on_fvp=False, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_relu_vgf_FP(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Relu(), + (test_data(),), + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_relu_vgf_INT(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Relu(), + (test_data(),), + aten_op, + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_repeat.py b/backends/arm/test/ops/test_repeat.py index e80f381786e..3236515b661 100644 --- a/backends/arm/test/ops/test_repeat.py +++ b/backends/arm/test/ops/test_repeat.py @@ -18,6 +18,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x, Input y @@ -110,3 +111,29 @@ def test_repeat_u85_INT(test_data: Tuple): run_on_fvp=False, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_repeat_vgf_FP(test_data: Tuple): + module, args = test_data() + pipeline = VgfPipeline[input_t1]( + module, + args, + module.aten_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_repeat_vgf_INT(test_data: Tuple): + module, args = test_data() + pipeline = VgfPipeline[input_t1]( + module, + args, + module.aten_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_round.py b/backends/arm/test/ops/test_round.py index 391c05a0962..a4fea455e4f 100644 --- a/backends/arm/test/ops/test_round.py +++ b/backends/arm/test/ops/test_round.py @@ -14,6 +14,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) input_t1 = Tuple[torch.Tensor] # Input x @@ -82,3 +83,29 @@ def test_round_u85_INT(test_data: torch.Tensor): exir_op, ) pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_round_vgf_FP(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Round(), + (test_data(),), + aten_op, + exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", test_data_suite) +@common.SkipIfNoModelConverter +def test_round_vgf_INT(test_data: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Round(), + (test_data(),), + [], + exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_rshift.py b/backends/arm/test/ops/test_rshift.py index ac4c3337980..e97bfb840ae 100644 --- a/backends/arm/test/ops/test_rshift.py +++ b/backends/arm/test/ops/test_rshift.py @@ -14,6 +14,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) scalar_input_t = tuple[torch.Tensor, int] @@ -67,8 +68,13 @@ def forward(self, x: torch.Tensor, shift: torch.Tensor): return x.bitwise_right_shift(shift) +################## +## RshiftScalar ## +################## + + @common.parametrize("test_data", RshiftScalar.test_data) -def test_rshift_scalar_tosa_FP_scalar(test_data): +def test_bitwise_right_shift_scalar_tosa_FP_scalar(test_data): TosaPipelineFP[scalar_input_t]( RshiftScalar(), test_data(), @@ -120,8 +126,40 @@ def test_bitwise_right_shift_tensor_u85_INT_scalar(test_data): pipeline.run() +@common.parametrize("test_data", RshiftScalar.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_right_shift_scalar_vgf_FP_scalar(test_data): + pipeline = VgfPipeline[scalar_input_t]( + RshiftScalar(), + test_data(), + RshiftScalar.torch_op_FP, + RshiftScalar.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", RshiftScalar.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_right_shift_tensor_vgf_INT_scalar(test_data): + pipeline = VgfPipeline[scalar_input_t]( + RshiftScalar(), + test_data(), + RshiftScalar.torch_op_INT, + RshiftScalar.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("check.quant_nodes") + pipeline.run() + + +################## +## RshiftTensor ## +################## + + @common.parametrize("test_data", RshiftTensor.test_data) -def test_rshift_scalar_tosa_FP(test_data): +def test_bitwise_right_shift_tensor_tosa_FP(test_data): TosaPipelineFP[scalar_input_t]( RshiftTensor(), test_data(), @@ -171,3 +209,30 @@ def test_bitwise_right_shift_tensor_u85_INT(test_data): ) pipeline.pop_stage("check.quant_nodes") pipeline.run() + + +@common.parametrize("test_data", RshiftTensor.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_right_shift_tensor_vgf_FP(test_data): + pipeline = VgfPipeline[tensor_input_t]( + RshiftTensor(), + test_data(), + RshiftTensor.torch_op, + RshiftTensor.exir_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_data", RshiftTensor.test_data) +@common.SkipIfNoModelConverter +def test_bitwise_right_shift_tensor_vgf_INT(test_data): + pipeline = VgfPipeline[tensor_input_t]( + RshiftTensor(), + test_data(), + RshiftTensor.torch_op, + RshiftTensor.exir_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.pop_stage("check.quant_nodes") + pipeline.run() diff --git a/backends/arm/test/ops/test_rsqrt.py b/backends/arm/test/ops/test_rsqrt.py index 65ea46f247c..d146a83287e 100644 --- a/backends/arm/test/ops/test_rsqrt.py +++ b/backends/arm/test/ops/test_rsqrt.py @@ -16,6 +16,7 @@ EthosU85PipelineINT, TosaPipelineFP, TosaPipelineINT, + VgfPipeline, ) @@ -81,3 +82,27 @@ def test_rsqrt_u85_INT(test_tensor: torch.Tensor): run_on_fvp=True, ) pipeline.run() + + +@common.parametrize("test_tensor", Rsqrt.test_parameters) +@common.SkipIfNoModelConverter +def test_rsqrt_vgf_FP(test_tensor: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Rsqrt(), + test_tensor(), + aten_op, + tosa_version="TOSA-1.0+FP", + ) + pipeline.run() + + +@common.parametrize("test_tensor", Rsqrt.test_parameters) +@common.SkipIfNoModelConverter +def test_rsqrt_vgf_INT(test_tensor: torch.Tensor): + pipeline = VgfPipeline[input_t1]( + Rsqrt(), + test_tensor(), + aten_op, + tosa_version="TOSA-1.0+INT", + ) + pipeline.run()