We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ddc8cd5 commit f696fbdCopy full SHA for f696fbd
flash_dmattn/__init__.py
@@ -7,11 +7,11 @@
7
8
# Import CUDA functions when available
9
try:
10
- from flash_dmattn.flash_dmattn_interface import flash_dmattn_func
+ from flash_dmattn.flash_dmattn_interface import flash_dmattn_func, flash_dmattn_varlen_func
11
CUDA_AVAILABLE = True
12
except ImportError:
13
CUDA_AVAILABLE = False
14
- flash_dmattn_func = None
+ flash_dmattn_func, flash_dmattn_varlen_func = None, None
15
16
# Import Triton functions when available
17
@@ -89,6 +89,7 @@ def flash_dmattn_func_auto(backend: Optional[str] = None, **kwargs):
89
"TRITON_AVAILABLE",
90
"FLEX_AVAILABLE",
91
"flash_dmattn_func",
92
+ "flash_dmattn_varlen_func",
93
"triton_dmattn_func",
94
"flex_dmattn_func",
95
"get_available_backends",
0 commit comments