Skip to content

Commit 9968c85

Browse files
shinkwilliamwen42
authored andcommitted
[Dynamo] Replace unimplemented with unimplemented_v2 in torch/_dynamo/variables/tensor.py (pytorch#153146)
Part of pytorch#147913 Replace `unimplemented` with`unimplemented_v2` in `torch/_dynamo/variables/tensor.py` Pull Request resolved: pytorch#153146 Approved by: https://github.com/williamwen42 Co-authored-by: William Wen <[email protected]>
1 parent 9b4a748 commit 9968c85

File tree

4 files changed

+188
-29
lines changed

4 files changed

+188
-29
lines changed

test/distributed/_composable/fsdp/test_fully_shard_compile.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -543,7 +543,16 @@ def test_compiled():
543543
)
544544
if fwd_fullgraph:
545545
self.assertEqual(len(counters["graph_break"]), 1)
546-
self.assertIn("Tensor.backward", counters["graph_break"])
546+
self.assertExpectedInline(
547+
next(iter(counters["graph_break"].keys())),
548+
"""\
549+
Unsupported Tensor.backward() call
550+
Explanation: Dynamo currently does not support tracing `Tensor.backward()`.
551+
Hint: This graph break is fundamental - it is unlikely that Dynamo will ever be able to trace through your code. Consider finding a workaround.
552+
553+
Developer debug context: call_method TensorVariable() backward () {}
554+
""", # noqa: B950
555+
)
547556
else:
548557
self.assertGreater(len(counters["graph_break"]), 1)
549558
return res

test/dynamo/test_error_messages.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,12 @@ def fn(x):
9696
torch.Tensor([1])
9797
),
9898
"""\
99-
Tensor.item
99+
Unsupported Tensor.item() call with capture_scalar_outputs=False
100+
Explanation: Dynamo does not support tracing `Tensor.item()` with config.capture_scalar_outputs=False.
101+
Hint: Set `torch._dynamo.config.capture_scalar_outputs = True` or `export TORCHDYNAMO_CAPTURE_SCALAR_OUTPUTS=1` to include these operations in the captured graph.
102+
103+
Developer debug context: call_method TensorVariable() item () {}
104+
100105
101106
from user code:
102107
File "test_error_messages.py", line N, in fn

test/dynamo/test_reorder_logs.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,16 @@ def f(x):
202202

203203
graph_break_key = counters["graph_break"].keys()
204204
self.assertEqual(len(graph_break_key), 1)
205-
self.assertEqual(next(iter(graph_break_key)), "Tensor.item")
205+
self.assertExpectedInline(
206+
next(iter(graph_break_key)),
207+
"""\
208+
Unsupported Tensor.item() call with capture_scalar_outputs=False
209+
Explanation: Dynamo does not support tracing `Tensor.item()` with config.capture_scalar_outputs=False.
210+
Hint: Set `torch._dynamo.config.capture_scalar_outputs = True` or `export TORCHDYNAMO_CAPTURE_SCALAR_OUTPUTS=1` to include these operations in the captured graph.
211+
212+
Developer debug context: call_method TensorVariable() item () {}
213+
""", # noqa: B950
214+
)
206215

207216

208217
if __name__ == "__main__":

torch/_dynamo/variables/tensor.py

Lines changed: 162 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@
4545
from .. import config, graph_break_hints, variables
4646
from .._trace_wrapped_higher_order_op import trace_wrapped
4747
from ..exc import (
48-
unimplemented,
4948
unimplemented_v2,
5049
UnknownPropertiesDuringBackwardTrace,
5150
UserError,
@@ -376,7 +375,12 @@ def method_attr_is_nested(self, tx):
376375
return ConstantVariable.create(self.is_nested)
377376

378377
def method_attr_retain_grad(self, tx):
379-
unimplemented("retain_grad does not work with AOTDispatcher")
378+
unimplemented_v2(
379+
gb_type="Tensor.retain_grad() with AOTDispatcher",
380+
context=f"var_getattr {self} retain_grad",
381+
explanation="`Tensor.retain_grad()` does not work with AOTDispatcher.",
382+
hints=[],
383+
)
380384

381385
def method_attr_data(self, tx):
382386
return variables.TorchInGraphFunctionVariable(
@@ -385,7 +389,12 @@ def method_attr_data(self, tx):
385389

386390
def method_attr_grad_fn(self, tx):
387391
if self.has_grad_fn:
388-
unimplemented("TensorVariable has a grad_fn")
392+
unimplemented_v2(
393+
gb_type="Tensor with grad_fn()",
394+
context=f"var_getattr {self} grad_fn",
395+
explanation="Dynamo does not support tracing tensors with a grad_fn directly.",
396+
hints=[],
397+
)
389398
else:
390399
return variables.ConstantVariable(None)
391400

@@ -427,8 +436,14 @@ def call_obj_hasattr(self, tx: "InstructionTranslator", name):
427436
def var_getattr(self, tx: "InstructionTranslator", name):
428437
if self.is_strict_mode(tx):
429438
if name in self._strict_mode_banned_ops():
430-
unimplemented(
431-
f"Getattr invocation {name} in strict mode is not supported"
439+
unimplemented_v2(
440+
gb_type="Strict mode banned op",
441+
context=f"var_getattr {self} {name}",
442+
explanation=f"Getattr invocation '{name}' in strict mode is not supported.",
443+
hints=[
444+
f"Remove `{name}` from the list of banned ops by "
445+
"setting `torch._dynamo.config._autograd_backward_strict_mode_banned_ops`.",
446+
],
432447
)
433448
elif name in self._strict_mode_conditional_banned_ops():
434449
raise UnknownPropertiesDuringBackwardTrace(
@@ -511,17 +526,34 @@ def try_generic_attr_handling():
511526

512527
def call_id(self, tx):
513528
if not self.source:
514-
unimplemented("call_id not supported for sourceless TensorVariable")
529+
unimplemented_v2(
530+
gb_type="Unsupported call_id() without source",
531+
context=f"call_id {self}",
532+
explanation="call_id() not supported for sourceless TensorVariable.",
533+
hints=[],
534+
)
515535

516536
# For local source, we associate the real value. We use this real value
517537
scope = {"L": tx.output.local_scope, "G": tx.output.global_scope}
518538
try:
519539
_input_associated_real_value = eval(self.source.name(), scope)
520540
except Exception as exc:
521-
unimplemented(f"error getting associated real value: {exc}")
541+
unimplemented_v2(
542+
gb_type="Error getting associated real value",
543+
context=f"call_id {self}",
544+
explanation="Dynamo encountered an error while trying to "
545+
"get the associated real value.",
546+
hints=[],
547+
from_exc=exc,
548+
)
522549

523550
if _input_associated_real_value is None:
524-
unimplemented("call_id without associated real value")
551+
unimplemented_v2(
552+
gb_type="call_id() without associated real value",
553+
context=f"call_id {self}",
554+
explanation="Dynamo could not find an associated real value for the tensor.",
555+
hints=[],
556+
)
525557

526558
install_guard(self.source.make_guard(GuardBuilder.ID_MATCH))
527559
id_value = id(_input_associated_real_value)
@@ -592,7 +624,13 @@ def call_method(
592624
from .torch_function import can_dispatch_torch_function, dispatch_torch_function
593625

594626
if self.is_strict_mode(tx) and name in self._strict_mode_banned_ops():
595-
unimplemented(f"Illegal method invocation {name} in strict mode")
627+
unimplemented_v2(
628+
gb_type="Illegal method invocation in strict mode",
629+
context=f"call_method {self} {name} {args} {kwargs}",
630+
explanation="Dynamo currently does not support this method "
631+
f"({name}) invocation in strict mode.",
632+
hints=[],
633+
)
596634

597635
# Only override builtin tensor methods
598636
# The user can manually add override handling
@@ -660,7 +698,14 @@ def call_method(
660698
if result:
661699
return result
662700
except TypeError as e:
663-
unimplemented(f"unhandled args for {name}: {e}")
701+
unimplemented_v2(
702+
gb_type="Unhandled args for method",
703+
context=f"call_method {self} {name} {args} {kwargs}",
704+
explanation="Dynamo encountered an error while calling "
705+
f"the method `{name}`.",
706+
hints=[],
707+
from_exc=e,
708+
)
664709

665710
from .builder import wrap_fx_proxy
666711

@@ -850,9 +895,26 @@ def method_element_size(self):
850895

851896
def method_numpy(self, *, force=False):
852897
if not config.trace_numpy:
853-
unimplemented("Tensor.numpy(). config.trace_numpy is False")
898+
unimplemented_v2(
899+
gb_type="Tensor.numpy() with trace_numpy=False",
900+
context=f"call_method {self} numpy",
901+
explanation="`Tensor.numpy()` was called, but the `trace_numpy` "
902+
"configuration was manually disabled.",
903+
hints=[
904+
"Set `torch._dynamo.config.trace_numpy = True` to allow "
905+
"Dynamo to trace through NumPy.",
906+
],
907+
)
854908
if not np:
855-
unimplemented("Tensor.numpy(). NumPy is not available")
909+
unimplemented_v2(
910+
gb_type="Tensor.numpy() without NumPy installed",
911+
context=f"call_method {self} numpy",
912+
explanation="`Tensor.numpy()` was called, but the NumPy library "
913+
"is not available in the current environment.",
914+
hints=[
915+
"Ensure NumPy is installed in your Python environment.",
916+
],
917+
)
856918
if self.layout != torch.strided:
857919
raise TypeError(
858920
f"can't convert {self.layout} layout tensor to numpy. Use Tensor.to_dense() first"
@@ -898,7 +960,16 @@ def wrap(i, sub_proxy):
898960
torch.int32,
899961
torch.int64,
900962
]:
901-
unimplemented("Input tensor for tolist must be an integer tensor")
963+
unimplemented_v2(
964+
gb_type="Tensor.tolist() with non-integer tensor",
965+
context=f"call_method {self} to_list",
966+
explanation="Dynamo currently does not support tracing "
967+
"`tolist()` on non-integer tensors.",
968+
hints=[
969+
"Ensure the input tensor to `tolist()` is an integer "
970+
"type (e.g., int8, int16, int32, int64)."
971+
],
972+
)
902973

903974
if tensor.dim() == 0:
904975
return wrap(tensor, sub_proxy)
@@ -916,15 +987,30 @@ def wrap(i, sub_proxy):
916987
return VariableTracker.build(tx, out)
917988

918989
def method_backward(self, *args, **kwargs):
919-
unimplemented("Tensor.backward")
990+
unimplemented_v2(
991+
gb_type="Unsupported Tensor.backward() call",
992+
context=f"call_method {self} backward {args} {kwargs}",
993+
explanation="Dynamo currently does not support tracing `Tensor.backward()`.",
994+
hints=[*graph_break_hints.FUNDAMENTAL],
995+
)
920996

921997
def method_data_ptr(self, *args, **kwargs):
922998
return DataPtrVariable(self)
923999

9241000
def method_item(self, *args, **kwargs):
9251001
if not config.capture_scalar_outputs:
9261002
self._warn_capture_scalar_outputs()
927-
unimplemented("Tensor.item")
1003+
unimplemented_v2(
1004+
gb_type="Unsupported Tensor.item() call with capture_scalar_outputs=False",
1005+
context=f"call_method {self} item {args} {kwargs}",
1006+
explanation="Dynamo does not support tracing `Tensor.item()` "
1007+
"with config.capture_scalar_outputs=False.",
1008+
hints=[
1009+
"Set `torch._dynamo.config.capture_scalar_outputs = True` "
1010+
"or `export TORCHDYNAMO_CAPTURE_SCALAR_OUTPUTS=1` "
1011+
"to include these operations in the captured graph.",
1012+
],
1013+
)
9281014

9291015
def method___getitem__(self, *args, **kwargs):
9301016
from ..symbolic_convert import InstructionTranslator
@@ -1013,16 +1099,36 @@ def has_bool_key(v):
10131099
return ConstantVariable.create(None)
10141100

10151101
def method_resize_(self, *args, **kwargs):
1016-
unimplemented("Tensor.resize_")
1102+
unimplemented_v2(
1103+
gb_type="Unsupported Tensor.resize_() call",
1104+
context=f"call_method {self} resize_ {args} {kwargs}",
1105+
explanation="Dynamo currently does not support tracing `Tensor.resize_()`.",
1106+
hints=[],
1107+
)
10171108

10181109
def method_resize_as_(self, *args, **kwargs):
1019-
unimplemented("Tensor.resize_as_")
1110+
unimplemented_v2(
1111+
gb_type="Unsupported Tensor.resize_as_() call",
1112+
context=f"call_method {self} resize_as_ {args} {kwargs}",
1113+
explanation="Dynamo currently does not support tracing `Tensor.resize_as_()`.",
1114+
hints=[],
1115+
)
10201116

10211117
def method_sparse_resize_(self, *args, **kwargs):
1022-
unimplemented("Tensor.sparse_resize_")
1118+
unimplemented_v2(
1119+
gb_type="Unsupported Tensor.sparse_resize_() call",
1120+
context=f"call_method {self} sparse_resize_ {args} {kwargs}",
1121+
explanation="Dynamo currently does not support tracing `Tensor.sparse_resize_()`.",
1122+
hints=[],
1123+
)
10231124

10241125
def method_sparse_resize_and_clear_(self, *args, **kwargs):
1025-
unimplemented("Tensor.sparse_resize_and_clear_")
1126+
unimplemented_v2(
1127+
gb_type="Unsupported Tensor.sparse_resize_and_clear_() call",
1128+
context=f"call_method {self} sparse_resize_and_clear_ {args} {kwargs}",
1129+
explanation="Dynamo currently does not support tracing `Tensor.sparse_resize_and_clear_()`.",
1130+
hints=[],
1131+
)
10261132

10271133
def method_set_(self, *args, **kwargs):
10281134
if len(args) > 1:
@@ -1032,7 +1138,13 @@ def method_set_(self, *args, **kwargs):
10321138
# overload and is used by FSDP.
10331139
# graph-breaking on aten::set_source_Tensor_storage_offset for now,
10341140
# unless we find that we need to make it work.
1035-
unimplemented("Tensor.set_.source_Tensor_storage_offset")
1141+
unimplemented_v2(
1142+
gb_type="Unsupported Tensor.set_() call",
1143+
context=f"call_method {self} set_ {args} {kwargs}",
1144+
explanation="Dynamo currently does not support tracing `Tensor.set_()` "
1145+
"overloads that include more than one argument.",
1146+
hints=[*graph_break_hints.SUPPORTABLE],
1147+
)
10361148

10371149
def method_add_(self, other, *, alpha=None):
10381150
if alpha is not None:
@@ -1158,8 +1270,11 @@ def _method_register_hook(self, name: str, hook: VariableTracker):
11581270
# would have no recourse - their forward traces just fine, but will fail at backwards unless
11591271
# compiled_autograd is enabled. If compiled_autograd fails (there are a lot of failures today)
11601272
# then they have nothing they can do except disable compile.
1161-
unimplemented(
1162-
"Compilation of intermediate hooks requires compiled autograd"
1273+
unimplemented_v2(
1274+
gb_type="Compilation of intermediate hooks requires compiled autograd",
1275+
context=f"var_getattr {self} {name}",
1276+
explanation="Dynamo must be in compiled_autograd to register hooks.",
1277+
hints=[],
11631278
)
11641279

11651280
hook_name, bw_state_proxy = tx.output.add_backward_state_hook(hook)
@@ -1205,7 +1320,13 @@ def method_requires_grad_(self, requires_grad=True):
12051320
requires_grad = requires_grad.as_python_constant()
12061321

12071322
if self.as_proxy().node.meta["example_value"].requires_grad != requires_grad:
1208-
unimplemented("Tensor.requires_grad_")
1323+
unimplemented_v2(
1324+
gb_type="Unsupported Tensor.requires_grad_() call",
1325+
context=f"call_method {self} requires_grad_",
1326+
explanation="Dynamo does not support changes to a Tensor's "
1327+
"`requires_grad` through calling `requires_grad_()`.",
1328+
hints=[],
1329+
)
12091330
else:
12101331
return self
12111332

@@ -1391,9 +1512,19 @@ def insert_into_graph():
13911512
return ConstantVariable.create(int(r))
13921513
return insert_into_graph()
13931514
elif name in ["base", "flags", "dtype"]:
1394-
unimplemented(f"TODO: add support for ndarray.{name}")
1515+
unimplemented_v2(
1516+
gb_type="Unsupported ndarray attribute access",
1517+
context=f"var_getattr {self} {name}",
1518+
explanation=f"Dynamo currently does not support tracing `ndarray.{name}`.",
1519+
hints=[],
1520+
)
13951521
elif name in ["__version__"]:
1396-
unimplemented("delegate np.__version__ to NumPy")
1522+
unimplemented_v2(
1523+
gb_type="Unsupported ndarray.__version__ access",
1524+
context=f"var_getattr {self} {name}",
1525+
explanation=f"Dynamo currently does not support tracing `ndarray.{name}`.",
1526+
hints=[],
1527+
)
13971528
if result is None:
13981529
raise NotImplementedError
13991530
return result
@@ -1420,7 +1551,12 @@ def call_method(
14201551
# delegate back to TensorVariable
14211552
return super().call_method(tx, name, args, kwargs)
14221553
if name in ("tostring", "tobytes", "__delattr__"):
1423-
unimplemented(f"{name} is not modelled in torch._numpy")
1554+
unimplemented_v2(
1555+
gb_type="Unsupported ndarray method call",
1556+
context=f"call_method {self} {name} {args} {kwargs}",
1557+
explanation=f"`ndarray.{name}()` is not modelled in `torch._numpy`.",
1558+
hints=[],
1559+
)
14241560
proxy = tx.output.create_proxy(
14251561
"call_function",
14261562
numpy_method_wrapper(name),

0 commit comments

Comments
 (0)