Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions tritonbench/operators/flash_attention/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,18 @@
except (ImportError, IOError, AttributeError, TypeError):
HAS_XFORMERS = False

# [Optional] TLX backend
try:
import triton.language.extra.tlx as tlx

from .tlx_attn_ws_pipelined_pingpong_hopper import (
attention as tlx_attn_ws_pipelined_pingpong_hopper,
)

HAS_TLX = True
except (ImportError, IOError, AttributeError):
HAS_TLX = False

from typing import Any, Generator, List

from tritonbench.utils.input import input_filter
Expand Down Expand Up @@ -299,6 +311,16 @@ def triton_tutorial_flash_v2_tma(
q, k, v, self.causal, self.sm_scale, "tma"
)

@register_benchmark(enabled=HAS_TLX)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is the intention to run on CI? If you just want to trigger this test directly you can do with enabled=False and without any risk of it running on hardware you don't expect (e.g. MI300), which it will by default right now.

def tlx_attn_ws_pipelined_pingpong_hopper(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
) -> Callable:
# TLX flash attention with Hopper optimizations
return lambda: tlx_attn_ws_pipelined_pingpong_hopper(q, k, v, self.sm_scale)

def xformers_preprocess(
self,
q: torch.Tensor,
Expand Down
Loading