Skip to content
This repository was archived by the owner on Aug 21, 2025. It is now read-only.

Commit a60ef90

Browse files
authored
Fix functionalize failures (#911)
We just needed to update the expecttest because we changed to representing overloads in the graph
1 parent 46c9b89 commit a60ef90

File tree

1 file changed

+24
-24
lines changed

1 file changed

+24
-24
lines changed

test/test_eager_transforms.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -3002,12 +3002,12 @@ def f(x: torch.Tensor) -> torch.Tensor:
30023002
30033003
30043004
def forward(self, x_1) -> torch.Tensor:
3005-
view_copy = torch.ops.aten.view_copy(x_1, [4, 2])
3005+
view_copy_default = torch.ops.aten.view_copy.default(x_1, [4, 2])
30063006
_tensor_constant0 = self._tensor_constant0
3007-
add = torch.ops.aten.add(view_copy, _tensor_constant0); view_copy = _tensor_constant0 = None
3008-
view_copy_1 = torch.ops.aten.view_copy(add, [4, 2]); add = None
3009-
copy_ = torch.ops.aten.copy_(x_1, view_copy_1); x_1 = None
3010-
return view_copy_1
3007+
add_tensor = torch.ops.aten.add.Tensor(view_copy_default, _tensor_constant0); view_copy_default = _tensor_constant0 = None
3008+
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [4, 2]); add_tensor = None
3009+
copy__default = torch.ops.aten.copy_.default(x_1, view_copy_default_1); x_1 = None
3010+
return view_copy_default_1
30113011
""")
30123012

30133013
def test_functionalize_fx_transpose_simple(self, device):
@@ -3021,8 +3021,8 @@ def f(x: torch.Tensor) -> torch.Tensor:
30213021
30223022
30233023
def forward(self, x_1) -> torch.Tensor:
3024-
transpose_copy = torch.ops.aten.transpose_copy(x_1, 1, 0); x_1 = None
3025-
return transpose_copy
3024+
transpose_copy_int = torch.ops.aten.transpose_copy.int(x_1, 1, 0); x_1 = None
3025+
return transpose_copy_int
30263026
""")
30273027

30283028
def test_functionalize_fx_out_op(self, device):
@@ -3041,12 +3041,12 @@ def f(inpt: torch.Tensor) -> torch.Tensor:
30413041
30423042
30433043
def forward(self, inpt_1) -> torch.Tensor:
3044-
add = torch.ops.aten.add(inpt_1, inpt_1); inpt_1 = None
3045-
view_copy = torch.ops.aten.view_copy(add, [4])
3046-
view_copy_1 = torch.ops.aten.view_copy(add, [4]); add = None
3047-
add_1 = torch.ops.aten.add(view_copy_1, 1); view_copy_1 = None
3048-
view_copy_2 = torch.ops.aten.view_copy(add_1, [4]); add_1 = None
3049-
return view_copy_2
3044+
add_tensor = torch.ops.aten.add.Tensor(inpt_1, inpt_1); inpt_1 = None
3045+
view_copy_default = torch.ops.aten.view_copy.default(add_tensor, [4])
3046+
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [4]); add_tensor = None
3047+
add_tensor_1 = torch.ops.aten.add.Tensor(view_copy_default_1, 1); view_copy_default_1 = None
3048+
view_copy_default_2 = torch.ops.aten.view_copy.default(add_tensor_1, [4]); add_tensor_1 = None
3049+
return view_copy_default_2
30503050
""")
30513051

30523052
def test_functionalize_fx_multi_out_op(self, device):
@@ -3066,12 +3066,12 @@ def f(inpt: torch.Tensor) -> torch.Tensor:
30663066
30673067
30683068
def forward(self, inpt_1) -> torch.Tensor:
3069-
view_copy = torch.ops.aten.view_copy(inpt_1, [2, 4]); inpt_1 = None
3070-
aminmax = torch.ops.aten.aminmax(view_copy, dim = 0); view_copy = None
3071-
getitem = aminmax[0]
3072-
getitem_1 = aminmax[1]; aminmax = None
3073-
view_copy_1 = torch.ops.aten.view_copy(getitem_1, [2, 2]); getitem_1 = None
3074-
return (view_copy_1, getitem)
3069+
view_copy_default = torch.ops.aten.view_copy.default(inpt_1, [2, 4]); inpt_1 = None
3070+
aminmax_default = torch.ops.aten.aminmax.default(view_copy_default, dim = 0); view_copy_default = None
3071+
getitem = aminmax_default[0]
3072+
getitem_1 = aminmax_default[1]; aminmax_default = None
3073+
view_copy_default_1 = torch.ops.aten.view_copy.default(getitem_1, [2, 2]); getitem_1 = None
3074+
return (view_copy_default_1, getitem)
30753075
""")
30763076

30773077
def test_functionalize_fx_reapply_views_simple(self, device):
@@ -3088,12 +3088,12 @@ def f(x: torch.Tensor) -> torch.Tensor:
30883088
30893089
30903090
def forward(self, x_1) -> torch.Tensor:
3091-
view = torch.ops.aten.view(x_1, [4, 2])
3091+
view_default = torch.ops.aten.view.default(x_1, [4, 2])
30923092
_tensor_constant0 = self._tensor_constant0
3093-
add = torch.ops.aten.add(view, _tensor_constant0); view = _tensor_constant0 = None
3094-
view_1 = torch.ops.aten.view(add, [4, 2]); add = None
3095-
copy_ = torch.ops.aten.copy_(x_1, view_1); x_1 = None
3096-
return view_1
3093+
add_tensor = torch.ops.aten.add.Tensor(view_default, _tensor_constant0); view_default = _tensor_constant0 = None
3094+
view_default_1 = torch.ops.aten.view.default(add_tensor, [4, 2]); add_tensor = None
3095+
copy__default = torch.ops.aten.copy_.default(x_1, view_default_1); x_1 = None
3096+
return view_default_1
30973097
""")
30983098

30993099
def test_functionalize_nonfunctional_output(self, device):

0 commit comments

Comments
 (0)