Skip to content

Commit 91d2f46

Browse files
committed
Bump version to 0.4.2
1 parent 3a904f0 commit 91d2f46

File tree

3 files changed

+83
-32
lines changed

3 files changed

+83
-32
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,4 @@ repos:
1616
hooks:
1717
- id: ruff
1818
args: [--fix, --exit-non-zero-on-fix]
19+
- id: ruff-format

fla/__init__.py

Lines changed: 76 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
12

23
from fla.layers import (
34
ABCAttention,
@@ -79,32 +80,80 @@
7980
)
8081

8182
__all__ = [
82-
'ABCAttention', 'ABCForCausalLM', 'ABCModel',
83-
'Attention', 'TransformerForCausalLM', 'TransformerModel',
84-
'BasedLinearAttention',
85-
'BitAttention', 'BitNetForCausalLM', 'BitNetModel',
86-
'Comba', 'CombaForCausalLM', 'CombaModel',
87-
'DeltaNet', 'DeltaNetForCausalLM', 'DeltaNetModel',
88-
'DeltaFormerAttention', 'DeltaFormerForCausalLM', 'DeltaFormerModel',
89-
'GatedDeltaNet', 'GatedDeltaNetForCausalLM', 'GatedDeltaNetModel',
90-
'GatedDeltaProduct', 'GatedDeltaProductForCausalLM', 'GatedDeltaProductModel',
91-
'GatedLinearAttention', 'GLAForCausalLM', 'GLAModel',
92-
'GatedSlotAttention', 'GSAForCausalLM', 'GSAModel',
93-
'HGRNAttention', 'HGRNForCausalLM', 'HGRNModel',
94-
'HGRN2Attention', 'HGRN2ForCausalLM', 'HGRN2Model',
95-
'LightNetAttention', 'LightNetForCausalLM', 'LightNetModel',
96-
'LinearAttention', 'LinearAttentionForCausalLM', 'LinearAttentionModel',
97-
'LogLinearMamba2', 'LogLinearMamba2ForCausalLM', 'LogLinearMamba2Model',
98-
'MesaNet', 'MesaNetForCausalLM', 'MesaNetModel',
99-
'MomAttention', 'MomForCausalLM', 'MomModel',
100-
'MultiheadLatentAttention', 'MLAForCausalLM', 'MLAModel',
101-
'MultiScaleRetention', 'RetNetForCausalLM', 'RetNetModel',
102-
'NativeSparseAttention', 'NSAForCausalLM', 'NSAModel',
103-
'PaTHAttention', 'PaTHAttentionForCausalLM', 'PaTHAttentionModel',
104-
'ReBasedLinearAttention',
105-
'RodimusAttention', 'RodimusForCausalLM', 'RodimusModel',
106-
'RWKV6Attention', 'RWKV6ForCausalLM', 'RWKV6Model',
107-
'RWKV7Attention', 'RWKV7ForCausalLM', 'RWKV7Model',
83+
"ABCAttention",
84+
"ABCForCausalLM",
85+
"ABCModel",
86+
"Attention",
87+
"BasedLinearAttention",
88+
"BitAttention",
89+
"BitNetForCausalLM",
90+
"BitNetModel",
91+
"Comba",
92+
"CombaForCausalLM",
93+
"CombaModel",
94+
"DeltaFormerAttention",
95+
"DeltaFormerForCausalLM",
96+
"DeltaFormerModel",
97+
"DeltaNet",
98+
"DeltaNetForCausalLM",
99+
"DeltaNetModel",
100+
"GLAForCausalLM",
101+
"GLAModel",
102+
"GSAForCausalLM",
103+
"GSAModel",
104+
"GatedDeltaNet",
105+
"GatedDeltaNetForCausalLM",
106+
"GatedDeltaNetModel",
107+
"GatedDeltaProduct",
108+
"GatedDeltaProductForCausalLM",
109+
"GatedDeltaProductModel",
110+
"GatedLinearAttention",
111+
"GatedSlotAttention",
112+
"HGRN2Attention",
113+
"HGRN2ForCausalLM",
114+
"HGRN2Model",
115+
"HGRNAttention",
116+
"HGRNForCausalLM",
117+
"HGRNModel",
118+
"LightNetAttention",
119+
"LightNetForCausalLM",
120+
"LightNetModel",
121+
"LinearAttention",
122+
"LinearAttentionForCausalLM",
123+
"LinearAttentionModel",
124+
"LogLinearMamba2",
125+
"LogLinearMamba2ForCausalLM",
126+
"LogLinearMamba2Model",
127+
"MLAForCausalLM",
128+
"MLAModel",
129+
"MesaNet",
130+
"MesaNetForCausalLM",
131+
"MesaNetModel",
132+
"MomAttention",
133+
"MomForCausalLM",
134+
"MomModel",
135+
"MultiScaleRetention",
136+
"MultiheadLatentAttention",
137+
"NSAForCausalLM",
138+
"NSAModel",
139+
"NativeSparseAttention",
140+
"PaTHAttention",
141+
"PaTHAttentionForCausalLM",
142+
"PaTHAttentionModel",
143+
"RWKV6Attention",
144+
"RWKV6ForCausalLM",
145+
"RWKV6Model",
146+
"RWKV7Attention",
147+
"RWKV7ForCausalLM",
148+
"RWKV7Model",
149+
"ReBasedLinearAttention",
150+
"RetNetForCausalLM",
151+
"RetNetModel",
152+
"RodimusAttention",
153+
"RodimusForCausalLM",
154+
"RodimusModel",
155+
"TransformerForCausalLM",
156+
"TransformerModel",
108157
]
109158

110-
__version__ = '0.4.1'
159+
__version__ = "0.4.2"

pyproject.toml

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,6 @@ Repository = "https://github.com/fla-org/flash-linear-attention"
3333
[build-system]
3434
requires = ["setuptools>=45", "wheel"]
3535

36-
[tool.ruff.lint.isort]
37-
known-first-party = ["fla"]
38-
force-sort-within-sections = false
39-
4036
[tool.pytest.ini_options]
4137
log_cli = true
4238
log_cli_level = "INFO"
@@ -50,6 +46,7 @@ line-length = 127
5046

5147
[tool.ruff.format]
5248
docstring-code-format = true
49+
indent-style = "space"
5350

5451
[tool.ruff.lint]
5552
select = [
@@ -87,10 +84,14 @@ ignore = [
8784
]
8885
extend-select = ["RUF022"]
8986

87+
[tool.ruff.lint.isort]
88+
known-first-party = ["fla"]
89+
force-sort-within-sections = false
90+
9091
[tool.ruff.lint.per-file-ignores]
9192
"__init__.py" = ["F401"]
9293
"fla/utils.py" = ["TCH004"]
9394
"evals/harness.py" = ["I", "TCH"]
9495
"tests/*/*.py" = ["UP030"]
9596
"scripts/*.py" = ["C414"]
96-
"egacy/training/flame/*.py" = ["C408"]
97+
"legacy/training/flame/*.py" = ["C408"]

0 commit comments

Comments
 (0)