Skip to content

Commit 326d046

Browse files
committed
Finishing up changes
1 parent a20fc5f commit 326d046

File tree

3 files changed

+130
-66
lines changed

3 files changed

+130
-66
lines changed

codegen/tools/gen_oplist.py

Lines changed: 44 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import os
1010
import sys
1111
from enum import IntEnum
12-
from typing import Any, Dict, List, Optional, Set
12+
from typing import Any, Dict, List, Optional
1313

1414
import yaml
1515

@@ -21,6 +21,14 @@
2121
from ..parse import strip_et_fields
2222

2323

24+
from executorch.exir._serialize import _deserialize_pte_binary
25+
from executorch.exir.schema import (
26+
EValue,
27+
KernelCall,
28+
OptionalTensorList,
29+
Tensor,
30+
TensorList,
31+
)
2432
from torchgen.gen import LineLoader, parse_native_yaml_struct
2533
from torchgen.selective_build.operator import SelectiveBuildOperator
2634
from torchgen.selective_build.selector import merge_et_kernel_metadata
@@ -89,83 +97,53 @@ def _get_operators(model_file: str) -> List[str]:
8997
print("Processing model file: ", model_file)
9098
with open(model_file, "rb") as f:
9199
buf = f.read()
92-
try:
93-
from executorch.codegen.tools.selective_build import ( # type: ignore[import-not-found]
94-
_get_program_from_buffer,
95-
_get_program_operators,
96-
)
97100

98-
program = _get_program_from_buffer(buf)
99-
operators = _get_program_operators(program)
100-
print(f"Model file loaded, operators are: {operators}")
101-
return operators
102-
except ModuleNotFoundError:
103-
from executorch.exir._serialize import _deserialize_pte_binary
104-
model = _deserialize_pte_binary(buf)
105-
operators = set()
106-
for execPlan in model.execution_plan:
107-
for op in execPlan.operators:
108-
operators.add(op.name)
109-
print(f"Model file loaded, operators are: {operators}")
110-
return operators
101+
model = _deserialize_pte_binary(buf)
102+
operators = []
103+
for execution_plan in model.execution_plan:
104+
for op in execution_plan.operators:
105+
operators.append(op.name)
106+
print(f"Model file loaded, operators are: {operators}")
107+
return operators
108+
109+
110+
def _get_dtypes_from_non_list(evalue: EValue):
111+
kernel_key = ""
112+
if isinstance(evalue, Tensor):
113+
dim_order = ",".join(map(str, evalue.dim_order))
114+
kernel_key += f"{evalue.scalar_type};{dim_order}|"
115+
return kernel_key
111116

112117

113118
def _get_kernel_metadata_for_model(model_file: str) -> Dict[str, List[str]]:
114119
with open(model_file, "rb") as f:
115120
buf = f.read()
116121
op_kernel_key_list: Dict[str, List[str]] = {}
117122

118-
try:
119-
from executorch.codegen.tools.selective_build import ( # type: ignore[import-not-found]
120-
_get_io_metadata_for_program_operators,
121-
_get_program_from_buffer,
122-
_IOMetaData,
123-
)
124-
125-
program = _get_program_from_buffer(buf)
126-
operators_with_io_metadata = _get_io_metadata_for_program_operators(program)
127-
128-
specialized_kernels: Set[List[_IOMetaData]]
129-
for op_name, specialized_kernels in operators_with_io_metadata.items():
130-
print(op_name)
131-
if op_name not in op_kernel_key_list:
132-
op_kernel_key_list[op_name] = []
133-
134-
for specialized_kernel in specialized_kernels:
123+
model = _deserialize_pte_binary(buf)
124+
for execution_plan in model.execution_plan:
125+
for chain in execution_plan.chains:
126+
for instr in chain.instructions:
127+
if not isinstance(instr.instr_args, KernelCall):
128+
continue
129+
op_name = execution_plan.operators[instr.instr_args.op_index].name
130+
if op_name not in op_kernel_key_list:
131+
op_kernel_key_list[op_name] = []
135132
version = "v1"
136133
kernel_key = version + "/"
137-
for io_metadata in specialized_kernel:
138-
if io_metadata.kernel_type in [
139-
KernelType.TENSOR,
140-
KernelType.TENSOR_LIST,
141-
KernelType.OPTIONAL_TENSOR_LIST,
142-
]:
143-
dim_order = ",".join(map(str, io_metadata.dim_order))
144-
kernel_key += f"{io_metadata.dtype};{dim_order}|"
145-
op_kernel_key_list[op_name].append(kernel_key[:-1])
134+
for tensor_arg in instr.instr_args.args:
135+
val = execution_plan.values[tensor_arg].val
146136

147-
except ModuleNotFoundError:
148-
from executorch.exir._serialize import _deserialize_pte_binary
149-
model = _deserialize_pte_binary(buf)
150-
for execPlan in model.execution_plan:
151-
for chain in execPlan.chains:
152-
for instr in chain.instructions:
153-
op_name = execPlan.operators[instr.instr_args.op_index].name
154-
if op_name not in op_kernel_key_list:
155-
op_kernel_key_list[op_name] = []
156-
version = "v1"
157-
kernel_key = version + "/"
158-
# TODO what happens when tensors have different types withina single kernel/ is that even allowed?
159-
for tensor_arg in instr.instr_args.args:
160-
val = execPlan.values[tensor_arg].val
161-
162-
# TODO is there a better way to do this?
163-
if type(val).__name__ == "Tensor":
164-
dim_order = ",".join(map(str,val.dim_order))
165-
kernel_key += f"{val.scalar_type};{dim_order}|"
166-
op_kernel_key_list[op_name].append(kernel_key[:-1])
167-
return op_kernel_key_list
137+
if isinstance(val, TensorList) or isinstance(
138+
val, OptionalTensorList
139+
):
140+
for tensor in val.items:
141+
tval = execution_plan.values[tensor].val
142+
kernel_key += _get_dtypes_from_non_list(tval) # type: ignore[arg-type]
168143

144+
kernel_key += _get_dtypes_from_non_list(val) # type: ignore[arg-type]
145+
op_kernel_key_list[op_name].append(kernel_key[:-1])
146+
return op_kernel_key_list
169147

170148

171149
def _get_et_kernel_metadata_from_ops_yaml(ops_yaml_path: str) -> Dict[str, List[str]]:

pytest.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ addopts =
5858
runtime
5959
# Tools
6060
codegen/test
61+
codegen/tools/test
6162
tools/cmake
6263
# test TODO: fix these tests
6364
# test/end2end/test_end2end.py

tools/cmake/Codegen.cmake

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,22 @@
1212
include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake)
1313

1414
function(gen_selected_ops)
15+
<<<<<<< Updated upstream
1516
set(arg_names LIB_NAME OPS_SCHEMA_YAML ROOT_OPS INCLUDE_ALL_OPS)
17+
=======
18+
set(arg_names LIB_NAME OPS_SCHEMA_YAML ROOT_OPS INCLUDE_ALL_OPS DTYPE_SELECTIVE_BUILD)
19+
>>>>>>> Stashed changes
1620
cmake_parse_arguments(GEN "" "" "${arg_names}" ${ARGN})
1721

1822
message(STATUS "Generating operator lib:")
1923
message(STATUS " LIB_NAME: ${GEN_LIB_NAME}")
2024
message(STATUS " OPS_SCHEMA_YAML: ${GEN_OPS_SCHEMA_YAML}")
2125
message(STATUS " ROOT_OPS: ${GEN_ROOT_OPS}")
2226
message(STATUS " INCLUDE_ALL_OPS: ${GEN_INCLUDE_ALL_OPS}")
27+
<<<<<<< Updated upstream
28+
=======
29+
message(STATUS " DTYPE_SELECTIVE_BUILD: ${GEN_DTYPE_SELECTIVE_BUILD}")
30+
>>>>>>> Stashed changes
2331

2432
set(_oplist_yaml
2533
${CMAKE_CURRENT_BINARY_DIR}/${GEN_LIB_NAME}/selected_operators.yaml
@@ -53,6 +61,26 @@ function(gen_selected_ops)
5361
WORKING_DIRECTORY ${EXECUTORCH_ROOT}
5462
)
5563

64+
<<<<<<< Updated upstream
65+
=======
66+
if(GEN_DTYPE_SELECTIVE_BUILD)
67+
set(_opvariant_h
68+
${CMAKE_CURRENT_BINARY_DIR}/${GEN_LIB_NAME}/selected_op_variants.h
69+
)
70+
set(_gen_opvariant_command "${PYTHON_EXECUTABLE}" -m codegen.tools.gen_selected_op_variants
71+
--yaml-file=${_oplist_yaml}
72+
--output-dir=${CMAKE_CURRENT_BINARY_DIR}/${GEN_LIB_NAME}/
73+
)
74+
message("Command - ${_gen_opvariant_command}")
75+
add_custom_command(
76+
COMMENT "Generating selected_op_variants.h for ${GEN_LIB_NAME}"
77+
OUTPUT ${_opvariant_h}
78+
COMMAND ${_gen_opvariant_command}
79+
DEPENDS ${_optlist_yaml} ${_codegen_tools_srcs}
80+
WORKING_DIRECTORY ${EXECUTORCH_ROOT}
81+
)
82+
endif()
83+
>>>>>>> Stashed changes
5684
endfunction()
5785

5886
# Codegen for registering kernels. Kernels are defined in functions_yaml and
@@ -62,14 +90,22 @@ endfunction()
6290
# functions_yaml CUSTOM_OPS_YAML custom_ops_yaml )
6391
function(generate_bindings_for_kernels)
6492
set(options ADD_EXCEPTION_BOUNDARY)
93+
<<<<<<< Updated upstream
6594
set(arg_names LIB_NAME FUNCTIONS_YAML CUSTOM_OPS_YAML)
95+
=======
96+
set(arg_names LIB_NAME FUNCTIONS_YAML CUSTOM_OPS_YAML DTYPE_SELECTIVE_BUILD)
97+
>>>>>>> Stashed changes
6698
cmake_parse_arguments(GEN "${options}" "${arg_names}" "" ${ARGN})
6799

68100
message(STATUS "Generating kernel bindings:")
69101
message(STATUS " LIB_NAME: ${GEN_LIB_NAME}")
70102
message(STATUS " FUNCTIONS_YAML: ${GEN_FUNCTIONS_YAML}")
71103
message(STATUS " CUSTOM_OPS_YAML: ${GEN_CUSTOM_OPS_YAML}")
72104
message(STATUS " ADD_EXCEPTION_BOUNDARY: ${GEN_ADD_EXCEPTION_BOUNDARY}")
105+
<<<<<<< Updated upstream
106+
=======
107+
message(STATUS " DTYPE_SELECTIVE_BUILD: ${GEN_DTYPE_SELECTIVE_BUILD}")
108+
>>>>>>> Stashed changes
73109

74110
# Command to generate selected_operators.yaml from custom_ops.yaml.
75111
file(GLOB_RECURSE _codegen_templates "${EXECUTORCH_ROOT}/codegen/templates/*")
@@ -78,6 +114,16 @@ function(generate_bindings_for_kernels)
78114
# By default selective build output is selected_operators.yaml
79115
set(_oplist_yaml ${_out_dir}/selected_operators.yaml)
80116

117+
<<<<<<< Updated upstream
118+
=======
119+
# If dtype selective build is enable, force header file to be preserved
120+
if(GEN_DTYPE_SELECTIVE_BUILD)
121+
set(_opvariant_h ${_out_dir}/selected_op_variants.h)
122+
else()
123+
set(_opvariant_h "")
124+
endif()
125+
126+
>>>>>>> Stashed changes
81127
# Command to codegen C++ wrappers to register custom ops to both PyTorch and
82128
# Executorch runtime.
83129
execute_process(
@@ -108,6 +154,13 @@ function(generate_bindings_for_kernels)
108154
${_out_dir}/Functions.h ${_out_dir}/NativeFunctions.h
109155
)
110156

157+
<<<<<<< Updated upstream
158+
=======
159+
if(GEN_DTYPE_SELECTIVE_BUILD)
160+
list(APPEND _gen_command_sources ${_out_dir}/selected_op_variants.h)
161+
endif()
162+
163+
>>>>>>> Stashed changes
111164
if(GEN_FUNCTIONS_YAML)
112165
list(APPEND _gen_command --functions-yaml-path=${GEN_FUNCTIONS_YAML})
113166
endif()
@@ -165,29 +218,61 @@ endfunction()
165218

166219
# Generate a runtime lib for registering operators in Executorch
167220
function(gen_operators_lib)
221+
<<<<<<< Updated upstream
168222
set(multi_arg_names LIB_NAME KERNEL_LIBS DEPS)
223+
=======
224+
set(multi_arg_names LIB_NAME KERNEL_LIBS DEPS DTYPE_SELECTIVE_BUILD)
225+
>>>>>>> Stashed changes
169226
cmake_parse_arguments(GEN "" "" "${multi_arg_names}" ${ARGN})
170227

171228
message(STATUS "Generating operator lib:")
172229
message(STATUS " LIB_NAME: ${GEN_LIB_NAME}")
173230
message(STATUS " KERNEL_LIBS: ${GEN_KERNEL_LIBS}")
174231
message(STATUS " DEPS: ${GEN_DEPS}")
232+
<<<<<<< Updated upstream
233+
=======
234+
message(STATUS " DTYPE_SELECTIVE_BUILD: ${GEN_DTYPE_SELECTIVE_BUILD}")
235+
>>>>>>> Stashed changes
175236

176237
set(_out_dir ${CMAKE_CURRENT_BINARY_DIR}/${GEN_LIB_NAME})
177238

178239
add_library(${GEN_LIB_NAME})
240+
<<<<<<< Updated upstream
179241
target_sources(
180242
${GEN_LIB_NAME}
181243
PRIVATE ${_out_dir}/RegisterCodegenUnboxedKernelsEverything.cpp
182244
${_out_dir}/Functions.h ${_out_dir}/NativeFunctions.h
183245
)
246+
=======
247+
if(GEN_DTYPE_SELECTIVE_BUILD)
248+
target_sources(
249+
${GEN_LIB_NAME}
250+
PRIVATE ${_out_dir}/RegisterCodegenUnboxedKernelsEverything.cpp
251+
${_out_dir}/Functions.h ${_out_dir}/NativeFunctions.h
252+
${_out_dir}/selected_op_variants.h
253+
)
254+
else()
255+
target_sources(
256+
${GEN_LIB_NAME}
257+
PRIVATE ${_out_dir}/RegisterCodegenUnboxedKernelsEverything.cpp
258+
${_out_dir}/Functions.h ${_out_dir}/NativeFunctions.h
259+
)
260+
endif()
261+
262+
>>>>>>> Stashed changes
184263
target_link_libraries(${GEN_LIB_NAME} PRIVATE ${GEN_DEPS})
185264
if(GEN_KERNEL_LIBS)
186265
target_link_libraries(${GEN_LIB_NAME} PUBLIC ${GEN_KERNEL_LIBS})
187266
endif()
188267

189268
target_link_options_shared_lib(${GEN_LIB_NAME})
190269
set(_generated_headers ${_out_dir}/Functions.h ${_out_dir}/NativeFunctions.h)
270+
<<<<<<< Updated upstream
271+
=======
272+
if(GEN_DTYPE_SELECTIVE_BUILD)
273+
list(APPEND _generated_headers ${_out_dir}/selected_op_variants.h)
274+
endif()
275+
>>>>>>> Stashed changes
191276
set_target_properties(
192277
${GEN_LIB_NAME} PROPERTIES PUBLIC_HEADER "${_generated_headers}"
193278
)

0 commit comments

Comments
 (0)