We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 1dd9d04 commit d36a280Copy full SHA for d36a280
onnxscript/function_libs/torch_lib/ops/core.py
@@ -8750,6 +8750,14 @@ def aten_sym_size(self: TensorType, dim: int = 0) -> INT64:
8750
return op.Squeeze(op.Shape(self, end=dim + 1, start=dim))
8751
8752
8753
+@torch_op("aten::sym_storage_offset", trace_only=True)
8754
+def aten_sym_storage_offset(self: TensorType, dim: int = 0) -> INT64:
8755
+ """sym_storage_offset(Tensor self, int dim) -> SymInt"""
8756
+ # storage offset is not used in onnx world.
8757
+ # the output of this function is not used.
8758
+ return op.Constant(value_int=0)
8759
+
8760
8761
def aten_symeig(
8762
self: TensorType, eigenvectors: bool = False, upper: bool = True
8763
) -> tuple[TensorType, TensorType]:
0 commit comments