Skip to content

Commit 66aaf82

Browse files
committed
Fix test_llm_config (pytorch#11977)
Summary: Pull Request resolved: pytorch#11977 Reviewed By: GregoryComer Differential Revision: D77321057
1 parent b677429 commit 66aaf82

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

examples/models/llama/config/test_llm_config.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def test_local_global_attention_without_kv(self):
4141

4242
def test_invalid_export_config_context_length(self):
4343
with self.assertRaises(ValueError):
44-
ExportConfig(max_seq_length=128, max_context_length=256)
44+
ExportConfig(max_seq_length=256, max_context_length=128)
4545

4646
def test_invalid_qmode(self):
4747
with self.assertRaises(ValueError):
@@ -84,8 +84,8 @@ def test_valid_llm_config(self):
8484
local_global_attention="[16, 32]",
8585
),
8686
export=ExportConfig(
87-
max_seq_length=256,
88-
max_context_length=128,
87+
max_seq_length=128,
88+
max_context_length=256,
8989
output_dir="/tmp/export",
9090
output_name="model.pte",
9191
),
@@ -94,7 +94,7 @@ def test_valid_llm_config(self):
9494
backend=BackendConfig(
9595
xnnpack=XNNPackConfig(enabled=False),
9696
coreml=CoreMLConfig(
97-
enabled=True, ios=17, compute_units=CoreMLComputeUnit.ALL
97+
enabled=True, ios=17, compute_units=CoreMLComputeUnit.cpu_only
9898
),
9999
),
100100
)

0 commit comments

Comments
 (0)