Skip to content

Commit e4a2c30

Browse files
committed
typo
1 parent 13e33da commit e4a2c30

File tree

2 files changed

+11
-6
lines changed

2 files changed

+11
-6
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "boltz"
7-
version = "2.1.0"
7+
version = "2.1.1"
88
requires-python = ">=3.10,<3.13"
99
description = "Boltz"
1010
readme = "README.md"

src/boltz/model/layers/triangular_attention/attention.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
# limitations under the License.
1515

1616
from functools import partial, partialmethod
17-
from typing import List, Optional
17+
from typing import Optional
1818

1919
import torch
2020
import torch.nn as nn
@@ -61,7 +61,9 @@ def __init__(
6161
def _chunk(
6262
self,
6363
x: torch.Tensor,
64-
biases: list[torch.Tensor],
64+
tri_bias: torch.Tensor,
65+
mask_bias: torch.Tensor,
66+
mask: torch.Tensor,
6567
chunk_size: int,
6668
use_kernels: bool = False,
6769
) -> torch.Tensor:
@@ -87,7 +89,9 @@ def _chunk(
8789
mha_inputs = {
8890
"q_x": x,
8991
"kv_x": x,
90-
"biases": biases,
92+
"tri_bias": tri_bias,
93+
"mask_bias": mask_bias,
94+
"mask": mask,
9195
}
9296

9397
return chunk_layer(
@@ -151,10 +155,11 @@ def forward(
151155
triangle_bias = triangle_bias.unsqueeze(-4)
152156

153157
if chunk_size is not None and not use_kernels:
154-
biases = [triangle_bias, mask_bias]
155158
x = self._chunk(
156159
x,
157-
biases,
160+
triangle_bias,
161+
mask_bias,
162+
mask,
158163
chunk_size,
159164
use_kernels=use_kernels,
160165
)

0 commit comments

Comments
 (0)