We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c1bfdfc commit 53af800Copy full SHA for 53af800
onnxscript/function_libs/torch_lib/ops/core.py
@@ -8753,6 +8753,14 @@ def aten_sym_size(self: TensorType, dim: int = 0) -> INT64:
8753
return op.Squeeze(op.Shape(self, end=dim + 1, start=dim))
8754
8755
8756
+@torch_op("aten::sym_storage_offset", trace_only=True)
8757
+def aten_sym_storage_offset(self: TensorType, dim: int = 0) -> INT64:
8758
+ """sym_storage_offset(Tensor self, int dim) -> SymInt"""
8759
+ # storage offset is not used in onnx world.
8760
+ # the output of this function is not used.
8761
+ return op.Constant(value_int=0)
8762
+
8763
8764
def aten_symeig(
8765
self: TensorType, eigenvectors: bool = False, upper: bool = True
8766
) -> tuple[TensorType, TensorType]:
0 commit comments