|
17 | 17 | import torch |
18 | 18 | from executorch.backends.arm._passes import InsertCastForOpsWithInt64InputPass |
19 | 19 |
|
20 | | -from executorch.backends.arm.test import conftest |
| 20 | +from executorch.backends.arm.test import common, conftest |
21 | 21 | from executorch.backends.arm.test.tester.test_pipeline import ( |
22 | 22 | TosaPipelineFP, |
23 | 23 | TosaPipelineINT, |
| 24 | + VgfPipeline, |
24 | 25 | ) |
25 | 26 | from executorch.examples.models.llama.export_llama_lib import ( |
26 | 27 | build_args_parser, |
@@ -131,3 +132,42 @@ def test_llama_tosa_INT(): |
131 | 132 | use_to_edge_transform_and_lower=True, |
132 | 133 | ) |
133 | 134 | pipeline.run() |
| 135 | + |
| 136 | + |
| 137 | +@common.SkipIfNoModelConverter |
| 138 | +def test_llama_vgf_FP(): |
| 139 | + llama_model, llama_inputs, llama_meta = TestLlama().prepare_model() |
| 140 | + |
| 141 | + if llama_model is None or llama_inputs is None: |
| 142 | + pytest.skip("Missing model and/or input files") |
| 143 | + |
| 144 | + with torch.no_grad(): |
| 145 | + pipeline = VgfPipeline[input_t]( |
| 146 | + llama_model, |
| 147 | + llama_inputs, |
| 148 | + aten_op=[], |
| 149 | + exir_op=[], |
| 150 | + tosa_version="TOSA-1.0+FP", |
| 151 | + use_to_edge_transform_and_lower=True, |
| 152 | + ) |
| 153 | + pipeline.run() |
| 154 | + |
| 155 | + |
| 156 | +@common.SkipIfNoModelConverter |
| 157 | +def test_llama_vgf_INT(): |
| 158 | + llama_model, llama_inputs, llama_meta = TestLlama().prepare_model() |
| 159 | + |
| 160 | + if llama_model is None or llama_inputs is None: |
| 161 | + pytest.skip("Missing model and/or input files") |
| 162 | + |
| 163 | + with torch.no_grad(): |
| 164 | + pipeline = VgfPipeline[input_t]( |
| 165 | + llama_model, |
| 166 | + llama_inputs, |
| 167 | + aten_op=[], |
| 168 | + exir_op=[], |
| 169 | + tosa_version="TOSA-1.0+INT", |
| 170 | + use_to_edge_transform_and_lower=True, |
| 171 | + transform_passes=[InsertCastForOpsWithInt64InputPass()], |
| 172 | + ) |
| 173 | + pipeline.run() |
0 commit comments