11load ("@fbsource//xplat/executorch/build:runtime_wrapper.bzl" , "get_default_executorch_platforms" , "is_xplat" , "runtime" , "struct_to_json" )
22load ("@fbsource//xplat/executorch/build:selects.bzl" , "selects" )
3- load ("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl" , "portable_header_list" , " portable_source_list" )
4- load ("@fbsource//xplat/executorch/kernels/optimized:op_registration_util.bzl" , "optimized_header_list" , " optimized_source_list" )
3+ load ("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl" , "portable_source_list" )
4+ load ("@fbsource//xplat/executorch/kernels/optimized:op_registration_util.bzl" , "optimized_source_list" )
55load (
66 "@fbsource//xplat/executorch/kernels/optimized:lib_defs.bzl" ,
77 "get_vec_deps" ,
@@ -407,29 +407,40 @@ def copy_files(genrule_name, target, file_list):
407407 default_outs = ["." ],
408408 )
409409
410+ def get_portable_lib_deps ():
411+ return [
412+ "//executorch/kernels/portable/cpu:math_constants" ,
413+ "//executorch/kernels/portable/cpu:scalar_utils" ,
414+ "//executorch/kernels/portable/cpu:vec_ops" ,
415+ "//executorch/kernels/portable/cpu/pattern:all_deps" ,
416+ "//executorch/kernels/portable/cpu/util:all_deps" ,
417+ ]
418+
419+ def get_optimized_lib_deps ():
420+ return [
421+ "//executorch/kernels/optimized/cpu:add_sub_impl" ,
422+ "//executorch/kernels/optimized/cpu:binary_ops" ,
423+ "//executorch/kernels/optimized/cpu:fft_utils" ,
424+ "//executorch/kernels/optimized/cpu:moments_utils" ,
425+ "//executorch/kernels/optimized:libblas" ,
426+ "//executorch/kernels/optimized:libutils" ,
427+ "//executorch/kernels/optimized:libvec" ,
428+ "//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch" ,
429+ "//executorch/runtime/kernel:kernel_includes" ,
430+ ] + get_vec_deps ()
431+
410432def build_portable_header_lib (name , oplist_header_name , feature = None ):
411433 """Build the portable headers into a header-only library.
412434 Ensures that includes work across portable and optimized libs.
413- #include "executorch/kernels/portable/cpu/<header.h>"
414435 """
415- # Copy portable header files.
416- portable_header_files = {}
417- genrule_name = name + "_copy_portable_header"
418- copy_files (genrule_name , "//executorch/kernels/portable/cpu:portable_header_files" , portable_header_list ())
419- for header in portable_header_list ():
420- portable_header_files [header ] = ":{}[{}]" .format (genrule_name , header )
421-
422- # Include dtype header.
423- portable_header_files ["selected_op_variants.h" ] = ":{}[selected_op_variants]" .format (oplist_header_name )
424-
425- # Build portable headers lib.
426436 runtime .cxx_library (
427437 name = name ,
428438 srcs = [],
429- exported_headers = portable_header_files ,
439+ exported_headers = {
440+ "selected_op_variants.h" :":{}[selected_op_variants]" .format (oplist_header_name ),
441+ },
430442 exported_preprocessor_flags = ["-DEXECUTORCH_SELECTIVE_BUILD_DTYPE" ],
431- # header_namespace is only available in xplat. See https://fburl.com/code/we2gvopk
432- header_namespace = "executorch/kernels/portable/cpu" ,
443+ header_namespace = "" ,
433444 feature = feature ,
434445 )
435446
@@ -454,7 +465,7 @@ def build_portable_lib(name, oplist_header_name, portable_header_lib, feature =
454465 # library, and it blocks users like unit tests to use kernel
455466 # implementation directly. So we enable this for xplat only.
456467 compiler_flags = ["-Wno-missing-prototypes" ]
457- if not expose_operator_symbols :
468+ if not expose_operator_symbols and is_xplat () :
458469 # Removing '-fvisibility=hidden' exposes operator symbols.
459470 # This allows operators to be called outside of the kernel registry.
460471 compiler_flags += ["-fvisibility=hidden" ]
@@ -464,9 +475,7 @@ def build_portable_lib(name, oplist_header_name, portable_header_lib, feature =
464475 name = name ,
465476 srcs = portable_source_files ,
466477 exported_preprocessor_flags = ["-DEXECUTORCH_SELECTIVE_BUILD_DTYPE" ],
467- deps = ["//executorch/kernels/portable/cpu/pattern:all_deps" , "//executorch/kernels/portable/cpu/util:all_deps" ] + [":" + portable_header_lib ],
468- # header_namespace is only available in xplat. See https://fburl.com/code/we2gvopk
469- header_namespace = "executorch/kernels/portable/cpu" ,
478+ deps = get_portable_lib_deps () + [":" + portable_header_lib ],
470479 compiler_flags = compiler_flags ,
471480 # WARNING: using a deprecated API to avoid being built into a shared
472481 # library. In the case of dynamically loading so library we don't want
@@ -492,13 +501,6 @@ def build_optimized_lib(name, oplist_header_name, portable_header_lib, feature =
492501 for op in optimized_source_list ():
493502 optimized_source_files .append (":{}[{}]" .format (source_genrule , op ))
494503
495- # Copy optimized header files.
496- optimized_header_files = {}
497- header_genrule = name + "_copy_optimized_header"
498- copy_files (header_genrule , "//executorch/kernels/optimized/cpu:optimized_header_files" , optimized_header_list ())
499- for header in optimized_header_list ():
500- optimized_header_files [header ] = ":{}[{}]" .format (header_genrule , header )
501-
502504 # For shared library build, we don't want to expose symbols of
503505 # kernel implementation (ex torch::executor::native::tanh_out)
504506 # to library users. They should use kernels through registry only.
@@ -508,35 +510,17 @@ def build_optimized_lib(name, oplist_header_name, portable_header_lib, feature =
508510 # library, and it blocks users like unit tests to use kernel
509511 # implementation directly. So we enable this for xplat only.
510512 compiler_flags = ["-Wno-missing-prototypes" , "-Wno-pass-failed" ,"-Wno-global-constructors" ,"-Wno-shadow" ,]
511- if not expose_operator_symbols :
513+ if not expose_operator_symbols and is_xplat () :
512514 # Removing '-fvisibility=hidden' exposes operator symbols.
513515 # This allows operators to be called outside of the kernel registry.
514516 compiler_flags += ["-fvisibility=hidden" ]
515517
516- # Set up dependencies.
517- optimized_lib_deps = [
518- "//executorch/kernels/optimized/cpu:add_sub_impl" ,
519- "//executorch/kernels/optimized/cpu:binary_ops" ,
520- "//executorch/kernels/optimized/cpu:fft_utils" ,
521- "//executorch/kernels/optimized/cpu:moments_utils" ,
522- "//executorch/kernels/optimized:libblas" ,
523- "//executorch/kernels/optimized:libutils" ,
524- "//executorch/kernels/optimized:libvec" ,
525- "//executorch/kernels/portable/cpu/pattern:all_deps" ,
526- "//executorch/kernels/portable/cpu/util:all_deps" ,
527- "//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch" ,
528- "//executorch/runtime/kernel:kernel_includes" ,
529- ":" + portable_header_lib ,
530- ] + get_vec_deps ()
531-
532518 # Build optimized lib.
533519 runtime .cxx_library (
534520 name = name ,
535521 srcs = optimized_source_files ,
536522 exported_preprocessor_flags = ["-DEXECUTORCH_SELECTIVE_BUILD_DTYPE" ],
537- deps = optimized_lib_deps ,
538- # header_namespace is only available in xplat. See https://fburl.com/code/we2gvopk
539- header_namespace = "executorch/kernels/optimized/cpu" ,
523+ deps = get_portable_lib_deps () + get_optimized_lib_deps () + [":" + portable_header_lib ],
540524 compiler_flags = compiler_flags ,
541525 preprocessor_flags = get_vec_preprocessor_flags (),
542526 # sleef needs to be added as a direct dependency of the operator target when building for Android,
@@ -627,21 +611,49 @@ def executorch_generated_lib(
627611 deps: Additinal deps of the main C++ library. Needs to be in either `//executorch` or `//caffe2` module.
628612 platforms: platforms args to runtime.cxx_library (only used when in xplat)
629613 manual_registration: if true, generate RegisterKernels.cpp and RegisterKernels.h.
630- use_default_aten_ops_lib: If `aten_mode` is True AND this flag is True, use `torch_mobile_all_ops_et` for ATen operator library.
614+ use_default_aten_ops_lib: If `aten_mode` is True AND this flag is True,
615+ use `torch_mobile_all_ops_et` for ATen operator library.
631616 xplat_deps: Additional xplat deps, can be used to provide custom operator library.
632617 fbcode_deps: Additional fbcode deps, can be used to provide custom operator library.
633618 compiler_flags: compiler_flags args to runtime.cxx_library
634- dtype_selective_build: In additional to operator selection, dtype selective build further selects the dtypes for each operator. Can be used with model or dict selective build APIs, where dtypes can be specified. Note: this is only available in xplat.
635- feature: Product-Feature Hierarchy (PFH). For internal use only, required for FoA in production. See: https://fburl.com/wiki/2wzjpyqy
636- support_exceptions: enable try/catch wrapper around operator implemntations to make sure exceptions thrown will not bring down the process. Disable if your use case disables exceptions in the build.
619+ dtype_selective_build: In additional to operator selection, dtype selective build
620+ further selects the dtypes for each operator. Can be used with model or dict
621+ selective build APIs, where dtypes can be specified.
622+ feature: Product-Feature Hierarchy (PFH). For internal use only, required
623+ for FoA in production. See: https://fburl.com/wiki/2wzjpyqy
624+ expose_operator_symbols: By default, fvisibility=hidden is set for executorch kernel
625+ libraries built with dtype selective build. This options removes the compiler
626+ flag and allows operators to be called outside of the kernel registry.
627+ NOTE: It is not recommended to set this to True, as symbols may clash (duplicate
628+ symbols errors) if multiple executorch_generated_libs are included by a parent library.
629+ support_exceptions: enable try/catch wrapper around operator implementations
630+ to make sure exceptions thrown will not bring down the process. Disable if your
631+ use case disables exceptions in the build.
637632 """
638633 if functions_yaml_target and aten_mode :
639634 fail ("{} is providing functions_yaml_target in ATen mode, it will be ignored. `native_functions.yaml` will be the source of truth." .format (name ))
640635
641636 if not aten_mode and not functions_yaml_target and not custom_ops_yaml_target :
642637 fail ("At least one of functions_yaml_target, custom_ops_yaml_target needs to be provided" )
643638
639+ if expose_operator_symbols :
640+ if not dtype_selective_build :
641+ fail ("""
642+ expose_operator_symbols is only available in dtype selective build mode.
643+ See: https://www.internalfb.com/wiki/PyTorch/Teams/Edge/PyTorch_Edge_Core_Team/Dtype_Selective_Build/""" )
644+
644645 if dtype_selective_build :
646+ if not expose_operator_symbols and not is_xplat ():
647+ # TODO(T225169282): make this a fail once internal cases move to xplat.
648+ warning ("""
649+ Dtype selective build with expose_operator_symbols=False works only in xplat -
650+ there are undefined symbols otherwise. Please try to use xplat, or talk to the
651+ executorch team. Setting expose_operator_symbols=True is not recommended as the
652+ exposed symbols may clash (duplicate symbols errors) if multiple
653+ executorch_generated_libs are included by a parent library.
654+
655+ Falling back to operator selective build.""" )
656+
645657 if (not "//executorch/kernels/portable:operators" in kernel_deps ) and (not "//executorch/kernels/optimized:optimized_operators" in kernel_deps ):
646658 fail ("""
647659 !!WARNING!! Dtype selective build is available for the portable and optimized kernel libraries.
@@ -655,7 +667,7 @@ def executorch_generated_lib(
655667 If you have a custom kernel library, please remove `dtype_selective_build=True`
656668 and use regular selective build.
657669 """ .format (kernel_deps ))
658-
670+
659671 # Dtype selective build requires that the portable/optimized kernel libraries are not passed into `deps`.
660672 if ("//executorch/kernels/portable:operators" in kernel_deps ):
661673 index = 0
@@ -755,30 +767,28 @@ def executorch_generated_lib(
755767 platforms = platforms ,
756768 )
757769
758- portable_lib = []
759- optimized_lib = []
760- if dtype_selective_build and is_xplat ():
770+ if dtype_selective_build :
761771 # Build portable headers lib. Used for portable and optimized kernel libraries.
762772 portable_header_lib = name + "_portable_header_lib"
763773 build_portable_header_lib (portable_header_lib , oplist_header_name , feature )
764-
774+
765775 if "//executorch/kernels/portable:operators" in kernel_deps :
766776 # Remove portable from kernel_deps as we're building it from source.
767777 kernel_deps .remove ("//executorch/kernels/portable:operators" )
768778
769779 # Build portable lib.
770780 portable_lib_name = name + "_portable_lib"
771781 build_portable_lib (portable_lib_name , oplist_header_name , portable_header_lib , feature , expose_operator_symbols )
772- portable_lib = [ ":{}" .format (portable_lib_name )]
773-
782+ kernel_deps . append ( ":{}" .format (portable_lib_name ))
783+
774784 if "//executorch/kernels/optimized:optimized_operators" in kernel_deps :
775785 # Remove optimized from kernel_deps as we're building it from source.
776786 kernel_deps .remove ("//executorch/kernels/optimized:optimized_operators" )
777-
787+
778788 # Build optimized lib.
779789 optimized_lib_name = name + "_optimized_lib"
780790 build_optimized_lib (optimized_lib_name , oplist_header_name , portable_header_lib , feature , expose_operator_symbols )
781- optimized_lib = [ ":{}" .format (optimized_lib_name )]
791+ kernel_deps . append ( ":{}" .format (optimized_lib_name ))
782792
783793 # Exports headers that declare the function signatures of the C++ functions
784794 # that map to entries in `functions.yaml` and `custom_ops.yaml`.
@@ -832,7 +842,7 @@ def executorch_generated_lib(
832842 "//executorch/kernels/prim_ops:prim_ops_registry" + aten_suffix ,
833843 "//executorch/runtime/core:evalue" + aten_suffix ,
834844 "//executorch/codegen:macros" ,
835- ] + deps + kernel_deps + portable_lib + optimized_lib ,
845+ ] + deps + kernel_deps ,
836846 exported_deps = [
837847 "//executorch/runtime/core/exec_aten:lib" + aten_suffix ,
838848 "//executorch/runtime/kernel:kernel_runtime_context" + aten_suffix ,
0 commit comments