diff --git a/backends/apple/coreml/compiler/torch_ops.py b/backends/apple/coreml/compiler/torch_ops.py index 81306c9a2fd..e53670951e0 100644 --- a/backends/apple/coreml/compiler/torch_ops.py +++ b/backends/apple/coreml/compiler/torch_ops.py @@ -47,24 +47,48 @@ def split_copy(context, node): split(context, node) -@register_torch_op( - torch_alias=[ - "dim_order_ops::_to_dim_order_copy", - "dim_order_ops._to_dim_order_copy", - ], - override=False, -) -def _to_dim_order_copy(context, node): - dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0] - node.kwinputs.pop("dim_order") +def is_fbcode(): + return not hasattr(_torch.version, "git_version") - # In CoreML, dim_order.val will be an ndarray, so we convert it to a list - dim_order = [int(d) for d in dim_order.val] - memory_format = get_memory_format(dim_order) - assert ( - memory_format == _torch.contiguous_format - ), "Only contiguous memory format is supported in CoreML" - to(context, node) + +if not is_fbcode(): + from coremltools.converters.mil.frontend.torch.dim_order_ops import ( + _empty_dim_order, + _to_dim_order_copy, + ) + + # This is a temporary hack to register the alias "dim_order_ops._to_dim_order_copy", + # which was missed by coremltools + @register_torch_op(torch_alias=["dim_order_ops._to_dim_order_copy"], override=False) + def _to_dim_order_copy_TMP_EXECUTORCH_ALIAS_HACK(context, node): + _to_dim_order_copy(context, node) + + # This is a temporary hack to register the alias "dim_order_ops._empty_dim_order", + # which was missed by coremltools + @register_torch_op(torch_alias=["dim_order_ops._empty_dim_order"], override=False) + def _empty_dim_order_TMP_EXECUTORCH_ALIAS_HACK(context, node): + _empty_dim_order(context, node) + +else: + # TODO: remove this case when fbcode updates to coremltools 9.0 + @register_torch_op( + torch_alias=[ + "dim_order_ops::_to_dim_order_copy", + "dim_order_ops._to_dim_order_copy", + ], + override=False, + ) + def _to_dim_order_copy(context, node): + dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0] + node.kwinputs.pop("dim_order") + + # In CoreML, dim_order.val will be an ndarray, so we convert it to a list + dim_order = [int(d) for d in dim_order.val] + memory_format = get_memory_format(dim_order) + assert ( + memory_format == _torch.contiguous_format + ), "Only contiguous memory format is supported in CoreML" + to(context, node) # https://github.com/apple/coremltools/pull/2558 diff --git a/backends/apple/coreml/scripts/install_requirements.sh b/backends/apple/coreml/scripts/install_requirements.sh index e9f73105bcd..5ec1ea6a1de 100755 --- a/backends/apple/coreml/scripts/install_requirements.sh +++ b/backends/apple/coreml/scripts/install_requirements.sh @@ -12,7 +12,7 @@ SCRIPT_DIR_PATH="$( # TODO(jathu): remove the need to fetch coremltools to build deps for coreml_executor_runner. # Keep this version in sync with: pyproject.toml -COREMLTOOLS_VERSION="8.3" +COREMLTOOLS_VERSION="9.0b1" red=`tput setaf 1` green=`tput setaf 2` diff --git a/pyproject.toml b/pyproject.toml index 61448a849cf..1ab7ddcc150 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ dependencies=[ # See also third-party/TARGETS for buck's typing-extensions version. "typing-extensions>=4.10.0", # Keep this version in sync with: ./backends/apple/coreml/scripts/install_requirements.sh - "coremltools==8.3; platform_system == 'Darwin' or platform_system == 'Linux'", + "coremltools==9.0b1; platform_system == 'Darwin' or platform_system == 'Linux'", # scikit-learn is used to support palettization in the coreml backend "scikit-learn==1.7.1", "hydra-core>=1.3.0",