diff --git a/.lintrunner.toml b/.lintrunner.toml index e7ed0a638d8..254e287f980 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -292,9 +292,9 @@ include_patterns = [ # TODO(https://github.com/pytorch/executorch/issues/7441): Gradually start enabling all folders. # 'backends/**/*.py', 'build/**/*.py', - # 'codegen/**/*.py', + 'codegen/**/*.py', # 'devtools/**/*.py', - # 'docs/**/*.py', + 'docs/**/*.py', # 'examples/**/*.py', # 'exir/**/*.py', # 'extension/**/*.py', diff --git a/.mypy.ini b/.mypy.ini index 137cb5f589e..171f5947163 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -10,9 +10,6 @@ local_partial_types = True enable_error_code = possibly-undefined warn_unused_ignores = False -# TODO(https://github.com/pytorch/executorch/issues/7441): Remove this -# disable_error_code = import-untyped - files = backends, codegen, @@ -31,35 +28,53 @@ mypy_path = executorch [mypy-executorch.codegen.*] follow_untyped_imports = True -[mypy-executorch.extension.*] +[mypy-executorch.devtools.*] follow_untyped_imports = True [mypy-executorch.exir.*] follow_untyped_imports = True +[mypy-executorch.extension.*] +follow_untyped_imports = True + [mypy-executorch.kernels.*] follow_untyped_imports = True [mypy-executorch.runtime.*] follow_untyped_imports = True +[mypy-requests.*] +follow_untyped_imports = True + [mypy-torchgen.*] follow_untyped_imports = True -[mypy-setuptools.*] +[mypy-buck_util] ignore_missing_imports = True -[mypy-buck_util] +[mypy-docutils.*] ignore_missing_imports = True -[mypy-tomllib] +[mypy-pandas] ignore_missing_imports = True -[mypy-zstd] +[mypy-pytorch_sphinx_theme] +ignore_missing_imports = True + +[mypy-ruamel] +ignore_missing_imports = True + +[mypy-setuptools.*] +ignore_missing_imports = True + +[mypy-sphinx.*] +ignore_missing_imports = True + +[mypy-tomllib] ignore_missing_imports = True [mypy-yaml] ignore_missing_imports = True -[mypy-ruamel] +[mypy-zstd] ignore_missing_imports = True \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 423f0618f68..12453dbfd25 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -22,6 +22,7 @@ import glob import os import sys +from typing import Any import pytorch_sphinx_theme @@ -103,7 +104,7 @@ myst_heading_anchors = 4 -sphinx_gallery_conf = { +sphinx_gallery_conf: dict[str, Any] = { "examples_dirs": ["tutorials_source"], "ignore_pattern": "template_tutorial.py", "gallery_dirs": ["tutorials"], @@ -197,7 +198,7 @@ SupportedDevices, SupportedProperties, ) -from docutils.parsers import rst +from docutils.parsers import rst # type: ignore[import-untyped] # Register custom directives diff --git a/docs/source/custom_directives.py b/docs/source/custom_directives.py index ee2c8adf968..8486027f3e4 100644 --- a/docs/source/custom_directives.py +++ b/docs/source/custom_directives.py @@ -102,7 +102,7 @@ class SupportedDevices(BaseShield): required_arguments = 1 final_argument_whitespace = True - def run(self) -> List[nodes.Node]: + def run(self, params, alt, _) -> List[nodes.Node]: devices = _parse_devices(self.arguments[0]) alt = f"This feature supports the following devices: {devices}" params = { @@ -121,7 +121,7 @@ class SupportedProperties(BaseShield): required_arguments = 1 final_argument_whitespace = True - def run(self) -> List[nodes.Node]: + def run(self, params, alt, _) -> List[nodes.Node]: properties = _parse_properties(self.arguments[0]) alt = f"This API supports the following properties: {properties}" params = { diff --git a/docs/source/executorch_custom_versions.py b/docs/source/executorch_custom_versions.py index 64a176c0e52..29c48a337ea 100644 --- a/docs/source/executorch_custom_versions.py +++ b/docs/source/executorch_custom_versions.py @@ -24,7 +24,7 @@ "pytorch.txt", ] -variables = {} +variables: dict[str, str] = {} def read_version_files(): diff --git a/docs/source/tutorials_source/devtools-integration-tutorial.py b/docs/source/tutorials_source/devtools-integration-tutorial.py index b9028dc91f5..45779063a6a 100644 --- a/docs/source/tutorials_source/devtools-integration-tutorial.py +++ b/docs/source/tutorials_source/devtools-integration-tutorial.py @@ -232,7 +232,7 @@ def forward(self, x): # Via EventBlocks for event in event_block.events: if event.name == "native_call_addmm.out": - print(event.name, event.perf_data.raw) + print(event.name, event.perf_data.raw if event.perf_data else "") # Via Dataframe df = event_block.to_dataframe() @@ -264,11 +264,12 @@ def forward(self, x): df = df[df.event_name == "native_call_convolution.out"] if len(df) > 0: slowest = df.loc[df["p50"].idxmax()] - print(slowest.event_name) + assert slowest + print(slowest.name) print() - pp.pprint(slowest.stack_traces) + pp.pprint(slowest.stack_traces if slowest.stack_traces else "") print() - pp.pprint(slowest.module_hierarchy) + pp.pprint(slowest.module_hierarchy if slowest.module_hierarchy else "") ###################################################################### # If a user wants the total runtime of a module, they can use diff --git a/docs/source/tutorials_source/export-to-executorch-tutorial.py b/docs/source/tutorials_source/export-to-executorch-tutorial.py index 87ae6d8ca60..34839a9c1fb 100644 --- a/docs/source/tutorials_source/export-to-executorch-tutorial.py +++ b/docs/source/tutorials_source/export-to-executorch-tutorial.py @@ -65,7 +65,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.relu(a) -example_args = (torch.randn(1, 3, 256, 256),) +example_args: tuple[torch.Tensor] = (torch.randn(1, 3, 256, 256),) aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True) print(aten_dialect) @@ -100,8 +100,11 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x + y -example_args = (torch.randn(3, 3), torch.randn(3, 3)) -aten_dialect: ExportedProgram = export(Basic(), example_args, strict=True) +example_args_2: tuple[torch.Tensor, torch.Tensor] = ( + torch.randn(3, 3), + torch.randn(3, 3), +) +aten_dialect = export(Basic(), example_args_2, strict=True) # Works correctly print(aten_dialect.module()(torch.ones(3, 3), torch.ones(3, 3))) @@ -118,20 +121,11 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: from torch.export import Dim - -class Basic(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - return x + y - - -example_args = (torch.randn(3, 3), torch.randn(3, 3)) +example_args_2 = (torch.randn(3, 3), torch.randn(3, 3)) dim1_x = Dim("dim1_x", min=1, max=10) dynamic_shapes = {"x": {1: dim1_x}, "y": {1: dim1_x}} -aten_dialect: ExportedProgram = export( - Basic(), example_args, dynamic_shapes=dynamic_shapes, strict=True +aten_dialect = export( + Basic(), example_args_2, dynamic_shapes=dynamic_shapes, strict=True ) print(aten_dialect) @@ -207,13 +201,13 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: ) quantizer = XNNPACKQuantizer().set_global(get_symmetric_quantization_config()) -prepared_graph = prepare_pt2e(pre_autograd_aten_dialect, quantizer) +prepared_graph = prepare_pt2e(pre_autograd_aten_dialect, quantizer) # type: ignore[arg-type] # calibrate with a sample dataset converted_graph = convert_pt2e(prepared_graph) print("Quantized Graph") print(converted_graph) -aten_dialect: ExportedProgram = export(converted_graph, example_args, strict=True) +aten_dialect = export(converted_graph, example_args, strict=True) print("ATen Dialect Graph") print(aten_dialect) @@ -243,7 +237,7 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: from executorch.exir import EdgeProgramManager, to_edge example_args = (torch.randn(1, 3, 256, 256),) -aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True) +aten_dialect = export(SimpleConv(), example_args, strict=True) edge_program: EdgeProgramManager = to_edge(aten_dialect) print("Edge Dialect Graph") @@ -272,9 +266,7 @@ def forward(self, x): decode_args = (torch.randn(1, 5),) aten_decode: ExportedProgram = export(Decode(), decode_args, strict=True) -edge_program: EdgeProgramManager = to_edge( - {"encode": aten_encode, "decode": aten_decode} -) +edge_program = to_edge({"encode": aten_encode, "decode": aten_decode}) for method in edge_program.methods: print(f"Edge Dialect graph of {method}") print(edge_program.exported_program(method)) @@ -291,8 +283,8 @@ def forward(self, x): # rather than the ``torch.ops.aten`` namespace. example_args = (torch.randn(1, 3, 256, 256),) -aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True) -edge_program: EdgeProgramManager = to_edge(aten_dialect) +aten_dialect = export(SimpleConv(), example_args, strict=True) +edge_program = to_edge(aten_dialect) print("Edge Dialect Graph") print(edge_program.exported_program()) @@ -357,8 +349,8 @@ def forward(self, x): # Export and lower the module to Edge Dialect example_args = (torch.ones(1),) -aten_dialect: ExportedProgram = export(LowerableModule(), example_args, strict=True) -edge_program: EdgeProgramManager = to_edge(aten_dialect) +aten_dialect = export(LowerableModule(), example_args, strict=True) +edge_program = to_edge(aten_dialect) to_be_lowered_module = edge_program.exported_program() from executorch.exir.backend.backend_api import LoweredBackendModule, to_backend @@ -369,7 +361,7 @@ def forward(self, x): ) # Lower the module -lowered_module: LoweredBackendModule = to_backend( +lowered_module: LoweredBackendModule = to_backend( # type: ignore[call-arg] "BackendWithCompilerDemo", to_be_lowered_module, [] ) print(lowered_module) @@ -423,8 +415,8 @@ def forward(self, x): example_args = (torch.ones(1),) -aten_dialect: ExportedProgram = export(ComposedModule(), example_args, strict=True) -edge_program: EdgeProgramManager = to_edge(aten_dialect) +aten_dialect = export(ComposedModule(), example_args, strict=True) +edge_program = to_edge(aten_dialect) exported_program = edge_program.exported_program() print("Edge Dialect graph") print(exported_program) @@ -460,16 +452,16 @@ def forward(self, a, x, b): return z -example_args = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)) -aten_dialect: ExportedProgram = export(Foo(), example_args, strict=True) -edge_program: EdgeProgramManager = to_edge(aten_dialect) +example_args_3 = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)) +aten_dialect = export(Foo(), example_args_3, strict=True) +edge_program = to_edge(aten_dialect) exported_program = edge_program.exported_program() print("Edge Dialect graph") print(exported_program) from executorch.exir.backend.test.op_partitioner_demo import AddMulPartitionerDemo -delegated_program = to_backend(exported_program, AddMulPartitionerDemo()) +delegated_program = to_backend(exported_program, AddMulPartitionerDemo()) # type: ignore[call-arg] print("Delegated program") print(delegated_program) print(delegated_program.graph_module.lowered_module_0.original_module) @@ -484,19 +476,9 @@ def forward(self, a, x, b): # call ``to_backend`` on it: -class Foo(torch.nn.Module): - def forward(self, a, x, b): - y = torch.mm(a, x) - z = y + b - a = z - a - y = torch.mm(a, x) - z = y + b - return z - - -example_args = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)) -aten_dialect: ExportedProgram = export(Foo(), example_args, strict=True) -edge_program: EdgeProgramManager = to_edge(aten_dialect) +example_args_3 = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)) +aten_dialect = export(Foo(), example_args_3, strict=True) +edge_program = to_edge(aten_dialect) exported_program = edge_program.exported_program() delegated_program = edge_program.to_backend(AddMulPartitionerDemo()) @@ -530,7 +512,6 @@ def forward(self, a, x, b): print("ExecuTorch Dialect") print(executorch_program.exported_program()) -import executorch.exir as exir ###################################################################### # Notice that in the graph we now see operators like ``torch.ops.aten.sub.out`` @@ -577,13 +558,11 @@ def forward(self, x): pre_autograd_aten_dialect = export_for_training(M(), example_args).module() # Optionally do quantization: # pre_autograd_aten_dialect = convert_pt2e(prepare_pt2e(pre_autograd_aten_dialect, CustomBackendQuantizer)) -aten_dialect: ExportedProgram = export( - pre_autograd_aten_dialect, example_args, strict=True -) -edge_program: exir.EdgeProgramManager = exir.to_edge(aten_dialect) +aten_dialect = export(pre_autograd_aten_dialect, example_args, strict=True) +edge_program = to_edge(aten_dialect) # Optionally do delegation: # edge_program = edge_program.to_backend(CustomBackendPartitioner) -executorch_program: exir.ExecutorchProgramManager = edge_program.to_executorch( +executorch_program = edge_program.to_executorch( ExecutorchBackendConfig( passes=[], # User-defined passes ) diff --git a/setup.py b/setup.py index 8cbe45f7874..5e8f155353d 100644 --- a/setup.py +++ b/setup.py @@ -710,6 +710,7 @@ def get_ext_modules() -> List[Extension]: # include. See also setuptools/discovery.py for custom finders. package_dir={ "executorch/backends": "backends", + "executorch/codegen": "codegen", # TODO(mnachin T180504136): Do not put examples/models # into core pip packages. Refactor out the necessary utils # or core models files into a separate package.