Skip to content

Commit 54edeca

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 37051e5 commit 54edeca

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

tests/test_model.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,6 @@
3333
from transformers.models.qwen3 import Qwen3Config, Qwen3ForCausalLM
3434
from transformers.models.qwen3_moe import Qwen3MoeConfig, Qwen3MoeForCausalLM
3535

36-
import litgpt.attention
37-
import litgpt.attention_utils
3836
import litgpt.config as config_module
3937
from litgpt import GPT, Config
4038
from litgpt.model import CausalSelfAttention
@@ -1470,7 +1468,8 @@ def assert_sdpa_backend(original_fn, query, k_and_v, mask, return_scores):
14701468
pytest.xfail()
14711469

14721470
model.mha.scaled_dot_product_attention = partial(
1473-
assert_sdpa_backend, model.mha.scaled_dot_product_attention,
1471+
assert_sdpa_backend,
1472+
model.mha.scaled_dot_product_attention,
14741473
)
14751474

14761475
if SUPPORTS_FLASH_ATTENTION:
@@ -1522,7 +1521,8 @@ def assert_sdpa_backend(original_fn, query, k_and_v, mask, return_scores):
15221521
pytest.xfail()
15231522

15241523
model.mha.scaled_dot_product_attention = partial(
1525-
assert_sdpa_backend, model.mha.scaled_dot_product_attention,
1524+
assert_sdpa_backend,
1525+
model.mha.scaled_dot_product_attention,
15261526
)
15271527

15281528
if SUPPORTS_FLASH_ATTENTION:

0 commit comments

Comments
 (0)