diff --git a/export/TARGETS b/export/TARGETS new file mode 100644 index 00000000000..ae8be8a5e98 --- /dev/null +++ b/export/TARGETS @@ -0,0 +1,38 @@ +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") + +oncall("executorch") + +python_library( + name = "recipe", + srcs = [ + "recipe.py", + ], + deps = [ + "//caffe2:torch", + "//executorch/exir/backend:backend_api", + "//executorch/exir:pass_manager", + "//executorch/devtools/backend_debug:delegation_info", + ] +) + +python_library( + name = "export", + srcs = [ + "export.py", + ], + deps = [ + ":recipe", + "//executorch/runtime:runtime", + ] +) + +python_library( + name = "lib", + srcs = [ + "__init__.py", + ], + deps = [ + ":export", + ":recipe", + ], +) diff --git a/export/__init__.py b/export/__init__.py new file mode 100644 index 00000000000..5eaf2add02e --- /dev/null +++ b/export/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +ExecuTorch export module. + +This module provides the tools and utilities for exporting PyTorch models +to the ExecuTorch format, including configuration, quantization, and +export management. +""" + +# pyre-strict + +from .export import export, ExportSession +from .recipe import ExportRecipe + +__all__ = [ + "ExportRecipe", + "ExportSession", + "export", +] diff --git a/export/export.py b/export/export.py new file mode 100644 index 00000000000..7ea4de20a9a --- /dev/null +++ b/export/export.py @@ -0,0 +1,746 @@ +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import torch +from executorch.devtools.backend_debug import get_delegation_info +from executorch.exir._warnings import experimental +from executorch.exir.program import ( + EdgeProgramManager, + ExecutorchProgramManager, + to_edge_transform_and_lower, +) +from executorch.exir.schema import Program +from executorch.runtime import Runtime, Verification +from tabulate import tabulate +from torch import nn +from torch.ao.quantization import allow_exported_model_train_eval +from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e +from torch.export import ExportedProgram +from torchao.quantization import quantize_ +from torchao.utils import unwrap_tensor_subclass + +from .recipe import ExportRecipe + + +class Stage(ABC): + """ + Interface for a Stage in the ExecuTorch export pipeline. + + Each stage can be connected to other stages to form a pipeline. + Stages have clear run and get_outputs functions to make the data flow explicit. + Each stage implements its own run method with specific parameter names. + """ + + def __init__(self) -> None: + """ + Initialize the stage. + """ + self._next_stage = None + + @property + @abstractmethod + def name(self) -> str: + """ + Returns the name of this stage. + """ + pass + + @abstractmethod + def run(self, **kwargs) -> None: + """ + Executes this stage with the given inputs. + + Each concrete stage class implements this method with specific parameter names. + """ + pass + + @abstractmethod + def get_artifacts(self) -> Any: + """ + Returns the artifacts generated by this stage. + + Returns: + The artifacts of this stage, to be used as inputs for the next stage + """ + pass + + def set_next_stage(self, next_stage: "Stage") -> None: + """ + Set the next stage in the pipeline. + + Args: + next_stage: The next stage to execute after this one + """ + self._next_stage = next_stage + + @property + def next_stage(self) -> Optional["Stage"]: + """ + Get the next stage in the pipeline. + + Returns: + The next stage, or None if this is the last stage + """ + return self._next_stage + + +class ExportStage(Stage): + """ + First stage: Export PyTorch model to ExportedProgram. + """ + + def __init__( + self, + pre_edge_transform_passes: Optional[ + Callable[[ExportedProgram], ExportedProgram] + ] = None, + ) -> None: + self._exported_program: Dict[str, ExportedProgram] = {} + self._pre_edge_transform_passes = pre_edge_transform_passes + self._model_dict: Dict[str, nn.Module] = {} + self._example_inputs_dict: Dict[str, List[tuple[torch.Tensor, ...]]] = {} + self._dynamic_shapes_dict: Dict[str, Any] = {} + + @property + def name(self) -> str: + return "export" + + def run( + self, + models: Dict[str, Any], + export_config: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> None: + """ + Export PyTorch model to ExportedProgram. + + Args: + models: Dictionary mapping method names to PyTorch models + export_config: Configuration containing example inputs and dynamic shapes + **kwargs: Additional keyword arguments (not used) + """ + # Store inputs + self._model_dict = models.get("model", {}) + + if export_config is not None: + self._example_inputs_dict = export_config.get("example_inputs", {}) + self._dynamic_shapes_dict = export_config.get("dynamic_shapes", {}) + + # Process inputs + with torch.no_grad(): + for method_name, model in self._model_dict.items(): + # Check if method_name exists in example_inputs + if method_name not in self._example_inputs_dict: + raise ValueError( + f"Example inputs for method {method_name} not found." + ) + + # Get dynamic shapes if available + dynamic_shapes = None + if method_name in self._dynamic_shapes_dict: + dynamic_shapes = self._dynamic_shapes_dict[method_name] + + # Export the model + self._exported_program[method_name] = torch.export.export( + model, + self._example_inputs_dict[method_name][0], + dynamic_shapes=dynamic_shapes, + ) + + # Apply pre-edge transform passes if available + if self._pre_edge_transform_passes is not None: + self._exported_program[method_name] = ( + self._pre_edge_transform_passes( + self._exported_program[method_name] + ) + ) + + def get_artifacts(self) -> Dict[str, ExportedProgram]: + """ + Returns the exported program dictionary. + + Returns: + Dictionary mapping method names to exported programs + """ + return self._exported_program + + +class EdgeTransformAndLowerStage(Stage): + """ + Second stage: Transform and lower to EdgeProgramManager. + """ + + def __init__( + self, + partitioners: Optional[List[Any]] = None, + transform_passes: Optional[Sequence[Callable[[Any], Optional[Any]]]] = None, + compile_config: Optional[Any] = None, + ) -> None: + self._partitioners = partitioners + self._transform_passes = transform_passes + self._compile_config = compile_config + self._edge_program_manager: Optional[EdgeProgramManager] = None + self._delegation_info = None + self._exported_program: Dict[str, ExportedProgram] = {} + self._constant_methods = None + + @property + def name(self) -> str: + return "edge_transform_and_lower" + + def run( + self, + exported_programs: Dict[str, ExportedProgram], + transform_config: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> None: + """ + Transform and lower to EdgeProgramManager. + + Args: + exported_programs: Dictionary mapping method names to exported programs + transform_config: Configuration containing constant methods + **kwargs: Additional keyword arguments (not used) + """ + # Store inputs + self._exported_program = exported_programs + + self._constant_methods = None + if transform_config is not None: + self._constant_methods = transform_config.get("constant_methods", None) + + # Process inputs + self._edge_program_manager = to_edge_transform_and_lower( + self._exported_program, + partitioner=self._partitioners, + transform_passes=self._transform_passes, + constant_methods=self._constant_methods, + compile_config=self._compile_config, + ) + self._delegation_info = get_delegation_info( + self._edge_program_manager.exported_program().graph_module + ) + + def get_artifacts(self) -> EdgeProgramManager: + """ + Returns the edge program manager. + + Returns: + The edge program manager + + Raises: + RuntimeError: If the edge program manager is not initialized + """ + if self._edge_program_manager is None: + raise RuntimeError("Edge program manager is not initialized.") + return self._edge_program_manager + + @property + def delegation_info(self) -> Any: + """ + Returns the delegation info. + """ + return self._delegation_info + + +class ExecutorchStage(Stage): + """ + Third stage: Convert to ExecutorchProgramManager. + """ + + def __init__(self, backend_config: Any) -> None: + self._backend_config = backend_config + self._executorch_program_manager: Optional[ExecutorchProgramManager] = None + self._edge_program_manager: Optional[EdgeProgramManager] = None + + @property + def name(self) -> str: + return "executorch" + + def run( + self, + edge_program: EdgeProgramManager, + backend_options: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> None: + """ + Convert to ExecutorchProgramManager. + + Args: + edge_program: Edge program manager containing the lowered program + backend_options: Additional backend-specific options (not used in this stage) + **kwargs: Additional keyword arguments (not used) + """ + # Store inputs + self._edge_program_manager = edge_program + + # Process inputs + if self._edge_program_manager is None: + raise RuntimeError("Edge program manager is not set.") + + self._executorch_program_manager = self._edge_program_manager.to_executorch( + self._backend_config + ) + + def get_artifacts(self) -> ExecutorchProgramManager: + """ + Returns the executorch program manager. + + Returns: + The executorch program manager + + Raises: + RuntimeError: If the executorch program manager is not initialized + """ + if self._executorch_program_manager is None: + raise RuntimeError("Executorch program manager is not initialized.") + return self._executorch_program_manager + + +class SourceTransformStage(Stage): + """ + Source transform stage: Apply source transformations to the model. + """ + + def __init__(self, quantization_recipe: Any) -> None: + self._quantization_recipe = quantization_recipe + self._transformed_models: Dict[str, nn.Module] = {} + + @property + def name(self) -> str: + return "source_transform" + + def run(self, models: Dict[str, nn.Module], *args, **kwargs) -> None: + """ + Apply source transformations to the model. + + Args: + models: Dictionary mapping method names to PyTorch models + **kwargs: Additional keyword arguments (not used) + """ + # Store the original models + self._transformed_models = models + + # Check if there's a quantization recipe with ao_base_config + if self._quantization_recipe and self._quantization_recipe.ao_base_config: + # Apply torchao quantize_ to each model + for method_name, model in models.items(): + for config in self._quantization_recipe.ao_base_config: + quantize_(model, config) + unwrap_tensor_subclass(model) + self._transformed_models[method_name] = model + + def get_artifacts(self) -> Dict[str, nn.Module]: + """ + Returns the transformed models. + + Returns: + Dictionary mapping method names to transformed models + """ + return self._transformed_models + + +class QuantizeStage(Stage): + """ + Optional stage: Perform post-training quantization on the model. + """ + + def __init__(self, quantizer: Any) -> None: + self._quantizer = quantizer + self._quantized_models: Dict[str, nn.Module] = {} + self._model_dict: Dict[str, nn.Module] = {} + self._exported_program_dict: Dict[str, ExportedProgram] = {} + self._example_inputs_dict: Dict[str, List[tuple[torch.Tensor, ...]]] = {} + + @property + def name(self) -> str: + return "quantize" + + def run( + self, + exported_program_data: Dict[str, Any], + calibration_config: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> None: + """ + Perform post-training quantization on the exported program. + + Args: + exported_program_data: Dictionary containing exported programs + calibration_config: Configuration containing example inputs for calibration + **kwargs: Additional keyword arguments (not used) + """ + # Store inputs + self._exported_program_dict = exported_program_data["exported_program"] + + # Initialize with empty dictionaries + self._example_inputs_dict = {} + + if calibration_config is not None: + self._example_inputs_dict = calibration_config.get("example_inputs", {}) + + # Process inputs + for method_name, exported_program in self._exported_program_dict.items(): + # Check if method_name exists in example_inputs and has at least one element + if ( + method_name not in self._example_inputs_dict + or not self._example_inputs_dict[method_name] + ): + raise ValueError( + f"Example inputs for method {method_name} not found or empty." + ) + + # Get the module from the exported program + model = exported_program.module() + + # Prepare the model for quantization + prepared_model = prepare_pt2e(model, self._quantizer) # type: ignore + + # Allow the model to switch between train and eval modes + allow_exported_model_train_eval(prepared_model) + + # Calibrate the model with the provided calibration data + for calibration_input in self._example_inputs_dict[method_name]: # type: ignore + prepared_model(*calibration_input) + + # Convert the prepared model to a quantized model + quantized_model = convert_pt2e(prepared_model) + self._quantized_models[method_name] = quantized_model # type: ignore + + def get_artifacts(self) -> Dict[str, nn.Module]: + """ + Returns the quantized models. + + Returns: + Dictionary mapping method names to quantized models + """ + return self._quantized_models + + +@experimental( + "This API and all of its related functionality such as ExportSession and ExportRecipe are experimental." +) +def export( + model: Union[nn.Module, Dict[str, nn.Module]], + example_inputs: Union[ + List[tuple[torch.Tensor, ...]], Dict[str, List[tuple[torch.Tensor, ...]]] + ], + export_recipe: ExportRecipe, + name: Optional[str] = None, + dynamic_shapes: Optional[Union[Any, Dict[str, Any]]] = None, + constant_methods: Optional[Union[Dict[str, Callable]]] = None, + artifact_dir: Optional[str] = None, +) -> "ExportSession": + """ + Create and configure an ExportSession with the given parameters. + + This function provides a convenient way to create an ExportSession and + optionally run the export process in one step. + + Args: + model: The PyTorch model(s) to export, either a single model or a dictionary + mapping method names to models + example_inputs: Example inputs for the model(s), either a list of input tuples + or a dictionary mapping method names to lists of input tuples + export_recipe: Contains the configuration for the export process + name: Optional name for the export + dynamic_shapes: Optional dynamic shape specifications + constant_methods: Optional dictionary of constant methods + artifact_dir: Optional directory to store artifacts + + Returns: + A configured ExportSession instance with the export process completed if requested + """ + session = ExportSession( + model=model, + example_inputs=example_inputs, + export_recipe=export_recipe, + name=name, + dynamic_shapes=dynamic_shapes, + constant_methods=constant_methods, + artifact_dir=artifact_dir, + ) + session.export() + + return session + + +@experimental( + "This API and all of its related functionality such as ExportSession and ExportRecipe are experimental." +) +class ExportSession: + """ + Manages the export process for ExecuTorch models. + + This class handles the export process through a pipeline of stages: + 1. (Optional) Quantize - Apply post-training quantization to the model + 2. Export - Export PyTorch model to ExportedProgram + 3. EdgeTransformAndLower - Transform and lower to EdgeProgramManager + 4. Executorch - Convert to ExecutorchProgramManager for final execution + """ + + def __init__( + self, + model: Union[nn.Module, Dict[str, nn.Module]], + example_inputs: Union[ + List[tuple[torch.Tensor, ...]], Dict[str, List[tuple[torch.Tensor, ...]]] + ], + export_recipe: ExportRecipe, + name: Optional[str] = None, + dynamic_shapes: Optional[Union[Any, Dict[str, Any]]] = None, + constant_methods: Optional[Union[Dict[str, Callable]]] = None, + artifact_dir: Optional[str] = None, + ) -> None: + """ + Initialize the ExportSession with model, inputs, and recipe. + + Args: + model: The PyTorch model(s) to export, either a single model or a dictionary + mapping method names to models + example_inputs: Example inputs for the model(s), either a list of input tuples + or a dictionary mapping method names to lists of input tuples + export_recipe: Contains the configuration for the export process + name: Optional name for the export + dynamic_shapes: Optional dynamic shape specifications + constant_methods: Optional dictionary of constant methods + artifact_dir: Optional directory to store artifacts + """ + # Standardize model to dictionary format + self._model = model if isinstance(model, dict) else {"forward": model} + + # Standardize example_inputs to dictionary format + self._example_inputs = ( + example_inputs + if isinstance(example_inputs, dict) + else {"forward": example_inputs} + ) + + # Standardize dynamic_shapes to dictionary format + self._dynamic_shapes = {} + if dynamic_shapes is not None: + if isinstance(dynamic_shapes, dict): + self._dynamic_shapes = dynamic_shapes + else: + self._dynamic_shapes = {"forward": dynamic_shapes} + + self._name = name + self._constant_methods = constant_methods + self._artifact_dir = artifact_dir + self._export_recipe = export_recipe + + # Initialize pipeline as a list of stages + self._pipeline = [] + + # Create the source transform stage if a quantization recipe is provided + if self._export_recipe.quantization_recipe is not None: + source_transform_stage = SourceTransformStage( + quantization_recipe=self._export_recipe.quantization_recipe + ) + self._pipeline.append(source_transform_stage) + + # Create the export stage + export_stage = ExportStage( + pre_edge_transform_passes=self._export_recipe.pre_edge_transform_passes + ) + self._pipeline.append(export_stage) + + # Create the quantize stage if a quantizer is provided + if self._export_recipe.quantization_recipe is not None: + quantizer = self._export_recipe.quantization_recipe.get_quantizer() + if quantizer is not None: + quantize_stage = QuantizeStage(quantizer=quantizer) + self._pipeline.append(quantize_stage) + + # Create the edge transform and lower stage + edge_transform_and_lower_stage = EdgeTransformAndLowerStage( + partitioners=self._export_recipe.partitioners, + transform_passes=self._export_recipe.edge_transform_passes, + compile_config=self._export_recipe.edge_compile_config, + ) + self._pipeline.append(edge_transform_and_lower_stage) + + # Create the executorch stage + executorch_stage = ExecutorchStage( + backend_config=self._export_recipe.executorch_backend_config + ) + self._pipeline.append(executorch_stage) + + # Initialize stage artifacts + self._exported_models: Dict[str, nn.Module] = {} + + # Initialize stage artifacts + self._exported_program: Dict[str, ExportedProgram] = {} + self._edge_program_manager: Optional[EdgeProgramManager] = None + self._executorch_program_manager: Optional[ExecutorchProgramManager] = None + self._delegation_info = None + + def _run_pipeline(self) -> None: + """ + Run the pipeline from the beginning. + + This method cascades through the pipeline of stages, executing each stage in order. + Each stage directly configures the inputs for the next stage when it completes. + """ + # Process each stage in the pipeline + for stage in self._pipeline: + stage_name = stage.name + # Configure inputs for the current stage + if stage_name == "source_transform": + # Run the source transform stage + stage.run(self._model, {}) + self._model = stage.get_artifacts() + elif stage_name == "quantize": + # Run the quantize stage + exported_program_data = {"exported_program": self._exported_program} + config_params = {"example_inputs": self._example_inputs} + stage.run(exported_program_data, config_params) + self._model = stage.get_artifacts() + elif stage_name == "export": + # Run the export stage + models = {"model": self._model} + config_params = { + "example_inputs": self._example_inputs, + "dynamic_shapes": self._dynamic_shapes, + } + stage.run(models, config_params) + self._exported_program = stage.get_artifacts() + elif stage_name == "edge_transform_and_lower": + # Run the edge transform and lower stage + stage.run( + self._exported_program, {"constant_methods": self._constant_methods} + ) + self._edge_program_manager = stage.get_artifacts() + self._delegation_info = stage.delegation_info + elif stage_name == "executorch": + # Run the executorch stage + stage.run(self._edge_program_manager, {}) + self._executorch_program_manager = stage.get_artifacts() + + def export(self) -> None: + """ + Execute the full export process. + + This method orchestrates the export process with optional quantization: + 1. (Optional) Apply quantization to the model + 2. Export the PyTorch model to ExportedProgram + 3. Transform and lower to EdgeProgramManager + 4. Convert to ExecutorchProgramManager + """ + # Run the pipeline from the beginning + self._run_pipeline() + + def save_pte_file(self, path: str) -> None: + """ + Save the exported program to a PTE file. + + Args: + path: Path where the PTE file will be saved + + Raises: + RuntimeError: If the executorch program manager is not initialized + """ + if self._executorch_program_manager is None: + raise RuntimeError( + "Executorch program manager is not initialized. Run export() first." + ) + self._executorch_program_manager.save(path) + + def get_executorch_program(self) -> Program: + """ + Get the ExecutorchProgram from the ExecutorchProgramManager. + + Returns: + The ExecutorchProgram + + Raises: + RuntimeError: If the executorch program manager is not initialized + """ + if self._executorch_program_manager is None: + raise RuntimeError( + "Executorch program manager is not initialized. Run export() first." + ) + return self._executorch_program_manager.executorch_program + + def get_pte_buffer(self) -> bytes: + """ + Get the PTE buffer as bytes. + + Returns: + The PTE buffer as bytes + + Raises: + RuntimeError: If the executorch program manager is not initialized + """ + if self._executorch_program_manager is None: + raise RuntimeError( + "Executorch program manager is not initialized. Run export() first." + ) + return self._executorch_program_manager.buffer + + def get_example_input( + self, method_name: str = "forward" + ) -> Tuple[torch.Tensor, ...]: + """ + Get the example input for a specific method. + + Args: + method_name: Name of the method to get example input for, defaults to "forward" + + Returns: + Tuple of tensors representing the example input + + Raises: + KeyError: If the method name is not found in example inputs + ValueError: If the example inputs list is empty + """ + if method_name not in self._example_inputs: + raise KeyError(f"Method name '{method_name}' not found in example inputs") + + # Access the first element of the list for this method + example_inputs_list = self._example_inputs[method_name] + if not example_inputs_list: + raise ValueError(f"Example inputs list for method {method_name} is empty") + + # The original code expects this to be a tuple of tensors + return self._example_inputs[method_name][0] + + def run_method( + self, + method_name: str = "forward", + example_inputs: Optional[Tuple[torch.Tensor, ...]] = None, + ) -> Sequence[Any]: + """ + Run a specific method with the given inputs. + + Args: + method_name: Name of the method to run, defaults to "forward" + example_inputs: Optional inputs to use, defaults to the example inputs + + Returns: + The outputs of the method execution + + Raises: + RuntimeError: If the method cannot be loaded + """ + et_runtime = Runtime.get() + program = et_runtime.load_program( + self.get_pte_buffer(), verification=Verification.Minimal + ) + forward = program.load_method(method_name) + + if forward is None: + raise RuntimeError( + f"Failed to load method '{method_name}' from the program" + ) + if example_inputs is None: + example_inputs = self.get_example_input(method_name) + + return forward.execute(example_inputs) + + def print_delegation_info(self) -> None: + """ + Print delegation information for the exported program. + """ + print(self._delegation_info.get_summary()) + df = self._delegation_info.get_operator_delegation_dataframe() + print(tabulate(df, headers="keys", tablefmt="fancy_grid")) diff --git a/export/recipe.py b/export/recipe.py new file mode 100644 index 00000000000..5a6b1330368 --- /dev/null +++ b/export/recipe.py @@ -0,0 +1,104 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Export recipe definitions for ExecuTorch. + +This module provides the data structures needed to configure the export process +for ExecuTorch models, including export configurations and quantization recipes. +""" + +from dataclasses import dataclass +from enum import Enum +from typing import Callable, List, Optional, Sequence + +from executorch.exir._warnings import experimental + +from executorch.exir.backend.partitioner import Partitioner +from executorch.exir.capture import EdgeCompileConfig, ExecutorchBackendConfig +from executorch.exir.pass_manager import PassType +from torch.ao.quantization.quantizer import Quantizer +from torch.export import ExportedProgram +from torchao.core.config import AOBaseConfig + + +class Mode(str, Enum): + """ + Export mode enumeration. + + Attributes: + DEBUG: Debug mode with additional checks and information + RELEASE: Release mode optimized for performance + """ + + DEBUG = "debug" + RELEASE = "release" + + +@dataclass +class QuantizationRecipe: + """ + Configuration recipe for quantization. + + This class holds the configuration parameters for quantizing a model. + + Attributes: + quantizer: Optional quantizer for model quantization + """ + + quantizer: Optional[Quantizer] = None + ao_base_config: Optional[List[AOBaseConfig]] = None + + def get_quantizer(self) -> Optional[Quantizer]: + """ + Get the quantizer associated with this recipe. + + Returns: + The quantizer if one is set, otherwise None + """ + return self.quantizer + + +@experimental( + "This API and all of its related functionality such as ExportSession and ExportRecipe are experimental." +) +@dataclass +class ExportRecipe: + """ + Configuration recipe for the export process. + + This class holds the configuration parameters for exporting a model, + including compilation and transformation options. + + Attributes: + name: Optional name for the recipe + quantization_recipe: Optional quantization recipe for model quantization + edge_compile_config: Optional edge compilation configuration + pre_edge_transform_passes: Optional function to apply transformation passes + before edge lowering + edge_transform_passes: Optional sequence of transformation passes to apply + during edge lowering + transform_check_ir_validity: Whether to check IR validity during transformation + partitioners: Optional list of partitioners for model partitioning + executorch_backend_config: Optional backend configuration for ExecuTorch + mode: Export mode (debug or release) + """ + + name: Optional[str] = None + quantization_recipe: Optional[QuantizationRecipe] = None + edge_compile_config: Optional[EdgeCompileConfig] = ( + None # pyre-ignore[11]: Type not defined + ) + pre_edge_transform_passes: Optional[ + Callable[[ExportedProgram], ExportedProgram] + ] = None + edge_transform_passes: Optional[Sequence[PassType]] = None + transform_check_ir_validity: bool = True + partitioners: Optional[list[Partitioner]] = None + executorch_backend_config: Optional[ExecutorchBackendConfig] = ( + None # pyre-ignore[11]: Type not defined + ) + mode: Mode = Mode.RELEASE diff --git a/export/tests/TARGETS b/export/tests/TARGETS new file mode 100644 index 00000000000..93556cb03dd --- /dev/null +++ b/export/tests/TARGETS @@ -0,0 +1,16 @@ +load("@fbcode_macros//build_defs:python_unittest.bzl", "python_unittest") + +oncall("executorch") + +python_unittest( + name = "executorch_export", + srcs = [ + "test_executorch_export.py", + ], + deps = [ + "//executorch/exir:lib", + "//executorch/export:lib", + "//executorch/devtools/backend_debug:delegation_info", + "//executorch/runtime:runtime", + ] +) diff --git a/export/tests/test_executorch_export.py b/export/tests/test_executorch_export.py new file mode 100644 index 00000000000..6d9909ed791 --- /dev/null +++ b/export/tests/test_executorch_export.py @@ -0,0 +1,34 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-strict + +import unittest + +import torch +from executorch.export import export, ExportRecipe + + +class TestExecutorchExport(unittest.TestCase): + def test_basic_recipe(self) -> None: + class SimpleModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(10, 10) + + def forward(self, x): + return self.linear(x) + + model = SimpleModel() + example_inputs = [(torch.rand(1, 10),)] + export_recipe = ExportRecipe() + + # Use the export API instead of creating ExportSession directly + export_session = export( + model=model, example_inputs=example_inputs, export_recipe=export_recipe + ) + + self.assertTrue(len(export_session.get_pte_buffer()) != 0) diff --git a/runtime/TARGETS b/runtime/TARGETS index c341c042d03..7448523f5ff 100644 --- a/runtime/TARGETS +++ b/runtime/TARGETS @@ -9,8 +9,7 @@ runtime.python_library( "//executorch/extension/pybindings:portable_lib", ], visibility = [ - "//executorch/runtime/...", - "//executorch/exir/emit/test/...", + "//executorch/...", "@EXECUTORCH_CLIENTS", ], )