Skip to content

Commit e852082

Browse files
Andrew Grebenisanfacebook-github-bot
authored andcommitted
data copy ops (#15164)
Summary: References for data copies. Reviewed By: skrtskrtfb Differential Revision: D84674296
1 parent ed94c95 commit e852082

File tree

2 files changed

+21
-4
lines changed

2 files changed

+21
-4
lines changed

backends/cadence/aot/ops_registrations.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,6 @@ def _validate_ref_impl_exists() -> None:
5555
_WARN_ONLY = {
5656
"cadence::quantized_w8a32_linear",
5757
"cadence::quantized_add", # We should only support per_tensor variant, should remove
58-
"cadence::idma_store",
59-
"cadence::idma_load",
6058
"cadence::_softmax_f32_f32",
6159
"cadence::requantize", # We should only support per_tensor variant, should remove
6260
"cadence::quantized_softmax.per_tensor",
@@ -70,13 +68,11 @@ def _validate_ref_impl_exists() -> None:
7068
"cadence::quantized_relu", # We should only support per_tensor variant, should remove
7169
"cadence::linalg_svd",
7270
"cadence::quantized_conv2d_nhwc", # We should only support per_tensor variant, should remove
73-
"cadence::idma_copy",
7471
"cadence::quantize_per_tensor_asym16u",
7572
"cadence::dequantize_per_tensor_asym8s",
7673
"cadence::quantize_per_tensor_asym16s",
7774
"cadence::dequantize_per_tensor_asym16s",
7875
"cadence::quantized_softmax",
79-
"cadence::idma_wait",
8076
"cadence::quantized_w8a32_gru",
8177
"cadence::quantized_layer_norm", # We should only support per_tensor variant, should remove
8278
}
@@ -2003,6 +1999,7 @@ def im2row_per_tensor_meta(
20031999
)
20042000
return input.new_empty(output_size, dtype=input.dtype)
20052001

2002+
20062003
@register_fake("cadence::linalg_svd")
20072004
def linalg_svd_meta(
20082005
A: torch.Tensor,

backends/cadence/aot/ref_implementations.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1636,3 +1636,23 @@ def quantized_embedding_byte(
16361636
)
16371637

16381638
return weight[indices]
1639+
1640+
1641+
@impl_tracked(m, "idma_copy")
1642+
def idma_copy(src: torch.Tensor, task_num: int = 0, channel: int = 0) -> torch.Tensor:
1643+
return src.clone()
1644+
1645+
1646+
@impl_tracked(m, "idma_store")
1647+
def idma_store(src: torch.Tensor, task_num: int = 0, channel: int = 0) -> torch.Tensor:
1648+
return src.clone()
1649+
1650+
1651+
@impl_tracked(m, "idma_load")
1652+
def idma_load(src: torch.Tensor, task_num: int = 0, channel: int = 0) -> torch.Tensor:
1653+
return src.clone()
1654+
1655+
1656+
@impl_tracked(m, "idma_wait")
1657+
def idma_wait(src: torch.Tensor, task_num: int = 0, channel: int = 0) -> torch.Tensor:
1658+
return src.clone()

0 commit comments

Comments
 (0)