diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp index 9538dd4a70bae..d0f25a48f4802 100644 --- a/lld/ELF/Arch/AArch64.cpp +++ b/lld/ELF/Arch/AArch64.cpp @@ -83,6 +83,8 @@ class AArch64 : public TargetInfo { uint64_t val) const override; RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override; void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override; + void initICFSafeThunkBody(InputSection *thunk, Symbol *target) const override; + uint32_t getICFSafeThunkSize() const override; private: void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const; @@ -926,6 +928,18 @@ static bool needsGotForMemtag(const Relocation &rel) { return rel.sym->isTagged() && needsGot(rel.expr); } +static constexpr uint8_t icfSafeThunkCode[] = {0x00, 0x00, 0x00, 0x14}; + +void AArch64::initICFSafeThunkBody(InputSection *thunk, Symbol *target) const { + thunk->content_ = icfSafeThunkCode; + thunk->size = sizeof(icfSafeThunkCode); + thunk->relocations.push_back({R_PC, R_AARCH64_JUMP26, 0, 0, target}); +} + +uint32_t AArch64::getICFSafeThunkSize() const { + return sizeof(icfSafeThunkCode); +} + void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { uint64_t secAddr = sec.getOutputSection()->addr; if (auto *s = dyn_cast(&sec)) diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h index f132b11b20c63..6cb72de29caa7 100644 --- a/lld/ELF/Config.h +++ b/lld/ELF/Config.h @@ -97,7 +97,7 @@ enum class CGProfileSortKind { None, Hfsort, Cdsort }; enum class DiscardPolicy { Default, All, Locals, None }; // For --icf={none,safe,all}. -enum class ICFLevel { None, Safe, All }; +enum class ICFLevel { None, Safe, SafeThunks, All }; // For --strip-{all,debug}. enum class StripPolicy { None, All, Debug }; diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp index 7d14180a49926..18cc96595ddd0 100644 --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -808,11 +808,14 @@ static int getMemtagMode(Ctx &ctx, opt::InputArgList &args) { } static ICFLevel getICF(opt::InputArgList &args) { - auto *arg = args.getLastArg(OPT_icf_none, OPT_icf_safe, OPT_icf_all); + auto *arg = args.getLastArg(OPT_icf_none, OPT_icf_safe, OPT_icf_safe_thunks, + OPT_icf_all); if (!arg || arg->getOption().getID() == OPT_icf_none) return ICFLevel::None; if (arg->getOption().getID() == OPT_icf_safe) return ICFLevel::Safe; + if (arg->getOption().getID() == OPT_icf_safe_thunks) + return ICFLevel::SafeThunks; return ICFLevel::All; } @@ -2474,7 +2477,8 @@ static void findKeepUniqueSections(Ctx &ctx, opt::InputArgList &args) { // Symbols in the dynsym could be address-significant in other executables // or DSOs, so we conservatively mark them as address-significant. - bool icfSafe = ctx.arg.icf == ICFLevel::Safe; + bool icfSafe = + (ctx.arg.icf == ICFLevel::Safe || ctx.arg.icf == ICFLevel::SafeThunks); for (Symbol *sym : ctx.symtab->getSymbols()) if (sym->isExported) markAddrsig(icfSafe, sym); diff --git a/lld/ELF/ICF.cpp b/lld/ELF/ICF.cpp index 1cdcf6be9d8a9..4972f7703d7ce 100644 --- a/lld/ELF/ICF.cpp +++ b/lld/ELF/ICF.cpp @@ -77,9 +77,12 @@ #include "InputFiles.h" #include "LinkerScript.h" #include "OutputSections.h" +#include "Relocations.h" #include "SymbolTable.h" #include "Symbols.h" #include "SyntheticSections.h" +#include "Target.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/Object/ELF.h" #include "llvm/Support/Parallel.h" @@ -121,6 +124,8 @@ template class ICF { void forEachClass(llvm::function_ref fn); + void applySafeThunksToRange(size_t begin, size_t end); + Ctx &ctx; SmallVector sections; @@ -160,10 +165,14 @@ template class ICF { } // Returns true if section S is subject of ICF. -static bool isEligible(InputSection *s) { - if (!s->isLive() || s->keepUnique || !(s->flags & SHF_ALLOC)) +static bool isEligible(InputSection *s, bool safeThunksMode) { + if (!s->isLive() || (s->keepUnique && !safeThunksMode) || + !(s->flags & SHF_ALLOC)) return false; + if (s->keepUnique) + return safeThunksMode && (s->flags & ELF::SHF_EXECINSTR); + // Don't merge writable sections. .data.rel.ro sections are marked as writable // but are semantically read-only. if ((s->flags & SHF_WRITE) && s->name != ".data.rel.ro" && @@ -459,6 +468,52 @@ static void combineRelocHashes(unsigned cnt, InputSection *isec, isec->eqClass[(cnt + 1) % 2] = hash | (1U << 31); } +// Given a range of identical icfInputs, replace address significant functions +// with a thunk that is just a direct branch to the first function in the +// series. This way we keep only one main body of the function but we still +// retain the address uniqueness of relevant functions by having them be a +// direct branch thunk rather than containing a full copy of the actual function +// body. +template +void ICF::applySafeThunksToRange(size_t begin, size_t end) { + InputSection *masterIsec = sections[begin]; + + uint32_t thunkSize = ctx.target->getICFSafeThunkSize(); + // If the functions we're dealing with are smaller than the thunk size, then + // just leave them all as-is - creating thunks would be a net loss. + if (masterIsec->getSize() <= thunkSize) + return; + + // Find the symbol to create the thunk for. + Symbol *masterSym = masterIsec->getEnclosingSymbol(0); + if (!masterSym) + return; + + for (size_t i = begin + 1; i < end; ++i) { + InputSection *isec = sections[i]; + if (!isec->keepUnique) + break; + + auto *thunk = make(*isec); + ctx.target->initICFSafeThunkBody(thunk, masterSym); + thunk->markLive(); + auto *osec = isec->getParent(); + auto *isd = cast(osec->commands.back()); + isd->sections.push_back(thunk); + osec->commitSection(thunk); + isec->repl = thunk; + isec->markDead(); + + for (Symbol *sym : thunk->file->getSymbols()) + if (auto *d = dyn_cast(sym)) + if (d->section == isec) { + d->value = 0; + if (d->size != 0) + d->size = thunkSize; + } + } +} + // The main function of ICF. template void ICF::run() { // Two text sections may have identical content and relocations but different @@ -475,10 +530,11 @@ template void ICF::run() { [&](InputSection &s) { s.eqClass[0] = s.eqClass[1] = ++uniqueId; }); // Collect sections to merge. + bool safeThunksMode = ctx.arg.icf == ICFLevel::SafeThunks; for (InputSectionBase *sec : ctx.inputSections) { auto *s = dyn_cast(sec); if (s && s->eqClass[0] == 0) { - if (isEligible(s)) + if (isEligible(s, safeThunksMode)) sections.push_back(s); else // Ineligible sections are assigned unique IDs, i.e. each section @@ -510,9 +566,13 @@ template void ICF::run() { // From now on, sections in Sections vector are ordered so that sections // in the same equivalence class are consecutive in the vector. - llvm::stable_sort(sections, [](const InputSection *a, const InputSection *b) { - return a->eqClass[0] < b->eqClass[0]; - }); + llvm::stable_sort( + sections, [safeThunksMode](const InputSection *a, const InputSection *b) { + if (safeThunksMode) + if (a->eqClass[0] == b->eqClass[0]) + return a->keepUnique > b->keepUnique; + return a->eqClass[0] < b->eqClass[0]; + }); // Compare static contents and assign unique equivalence class IDs for each // static content. Use a base offset for these IDs to ensure no overlap with @@ -535,12 +595,19 @@ template void ICF::run() { auto print = [&ctx = ctx]() -> ELFSyncStream { return {ctx, ctx.arg.printIcfSections ? DiagLevel::Msg : DiagLevel::None}; }; + if (safeThunksMode) + forEachClassRange(0, sections.size(), [&](size_t begin, size_t end) { + applySafeThunksToRange(begin, end); + }); + // Merge sections by the equivalence class. forEachClassRange(0, sections.size(), [&](size_t begin, size_t end) { if (end - begin == 1) return; print() << "selected section " << sections[begin]; for (size_t i = begin + 1; i < end; ++i) { + if (safeThunksMode && sections[i]->keepUnique) + continue; print() << " removing identical section " << sections[i]; sections[begin]->replace(sections[i]); diff --git a/lld/ELF/Options.td b/lld/ELF/Options.td index b3b12a0646875..9f9eeb87656e9 100644 --- a/lld/ELF/Options.td +++ b/lld/ELF/Options.td @@ -293,6 +293,8 @@ def icf_all: F<"icf=all">, HelpText<"Enable identical code folding">; def icf_safe: F<"icf=safe">, HelpText<"Enable safe identical code folding">; +def icf_safe_thunks: F<"icf=safe_thunks">, HelpText<"Enable safe identical code folding with thunks">; + def icf_none: F<"icf=none">, HelpText<"Disable identical code folding (default)">; def ignore_function_address_equality: FF<"ignore-function-address-equality">, diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h index fd1e5d33c438a..6c95cd2517e01 100644 --- a/lld/ELF/Target.h +++ b/lld/ELF/Target.h @@ -16,6 +16,7 @@ #include "llvm/Object/ELF.h" #include "llvm/Object/ELFTypes.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include @@ -102,6 +103,15 @@ class TargetInfo { virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type, JumpModType val) const {} + // Initialize the body of the safe thunk in ICF for the target. + virtual void initICFSafeThunkBody(InputSection *thunk, Symbol *target) const { + llvm_unreachable("target does not support ICF safe thunks"); + } + // Returns the size of the safe thunk in ICF for the target. + virtual uint32_t getICFSafeThunkSize() const { + llvm_unreachable("target does not support ICF safe thunks"); + } + virtual ~TargetInfo(); // This deletes a jump insn at the end of the section if it is a fall thru to diff --git a/lld/test/ELF/icf-safe-thunks.s b/lld/test/ELF/icf-safe-thunks.s new file mode 100644 index 0000000000000..12edf4ae76e2a --- /dev/null +++ b/lld/test/ELF/icf-safe-thunks.s @@ -0,0 +1,397 @@ +# REQUIRES: aarch64 +# RUN: rm -rf %t && split-file %s %t && cd %t + +# RUN: llvm-mc -filetype=obj -triple=aarch64 a.s -o a.o +# RUN: ld.lld a.o -o a.so --icf=safe_thunks --print-icf-sections | FileCheck %s +# RUN: llvm-objdump a.so -d | FileCheck %s --check-prefixes=CHECK-ARM64 + +# CHECK: selected section a.o:(.text.func_3identical_v1_canmerge) +# CHECK: selected section a.o:(.text.func_call_thunked_1_nomerge) +# CHECK: selected section a.o:(.text.func_unique_2_canmerge) +# CHECK: selected section a.o:(.text.func_3identical_v1) + +# CHECK-ARM64: func_unique_1 +# CHECK-ARM64-NEXT: adrp x8, {{.*}} +# CHECK-ARM64-NEXT: mov w9, #0x1 + +# CHECK-ARM64: func_unique_2_canmerge +# CHECK-ARM64-NEXT: adrp x8, {{.*}} +# CHECK-ARM64-NEXT: mov w9, #0x2 + +# CHECK-ARM64: func_3identical_v1 +# CHECK-ARM64-NEXT: adrp x8, {{.*}} +# CHECK-ARM64-NEXT: mov w9, #0x3 + +# CHECK-ARM64: func_3identical_v1_canmerge +# CHECK-ARM64-NEXT: adrp x8, {{.*}} + +# CHECK-ARM64: func_call_thunked_1_nomerge +# CHECK-ARM64-NEXT: stp x29, x30, [sp, #-0x10]! + +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] +# CHECK-ARM64: : +# CHECK-ARM64-NEXT: b 0x[[#%.6x,]] + +;--- a.cc +#define ATTR __attribute__((noinline,used,retain)) extern "C" +typedef unsigned long long ULL; + +volatile char g_val = 0; +void *volatile g_ptr = 0; + +ATTR void func_unique_1() { g_val = 1; } + +ATTR void func_unique_2_canmerge() { g_val = 2; } + +ATTR void func_2identical_v1() { g_val = 2; } + +ATTR void func_2identical_v2() { g_val = 2; } + +ATTR void func_3identical_v1() { g_val = 3; } + +ATTR void func_3identical_v2() { g_val = 3; } + +ATTR void func_3identical_v3() { g_val = 3; } + +ATTR void func_3identical_v1_canmerge() { g_val = 33; } + +ATTR void func_3identical_v2_canmerge() { g_val = 33; } + +ATTR void func_3identical_v3_canmerge() { g_val = 33; } + +ATTR void func_call_thunked_1_nomerge() { + func_2identical_v1(); + g_val = 77; +} + +ATTR void func_call_thunked_2_nomerge() { + func_2identical_v2(); + g_val = 77; +} + +ATTR void func_call_thunked_2_merge() { + func_2identical_v2(); + g_val = 77; +} + +ATTR void call_all_funcs() { + func_unique_1(); + func_unique_2_canmerge(); + func_2identical_v1(); + func_2identical_v2(); + func_3identical_v1(); + func_3identical_v2(); + func_3identical_v3(); + func_3identical_v1_canmerge(); + func_3identical_v2_canmerge(); + func_3identical_v3_canmerge(); +} + +ATTR void take_func_addr() { + g_ptr = (void*)func_unique_1; + g_ptr = (void*)func_2identical_v1; + g_ptr = (void*)func_2identical_v2; + g_ptr = (void*)func_3identical_v1; + g_ptr = (void*)func_3identical_v2; + g_ptr = (void*)func_3identical_v3; +} + +ATTR int _start() { return 0; } + +;--- gen +clang --target=aarch64-linux-gnu -O3 -ffunction-sections -fdata-sections -fno-asynchronous-unwind-tables -S a.cc -o - + +;--- a.s + .file "a.cc" + .section .text.func_unique_1,"axR",@progbits + .globl func_unique_1 // -- Begin function func_unique_1 + .p2align 2 + .type func_unique_1,@function +func_unique_1: // @func_unique_1 +// %bb.0: // %entry + adrp x8, g_val + mov w9, #1 // =0x1 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end0: + .size func_unique_1, .Lfunc_end0-func_unique_1 + // -- End function + .section .text.func_unique_2_canmerge,"axR",@progbits + .globl func_unique_2_canmerge // -- Begin function func_unique_2_canmerge + .p2align 2 + .type func_unique_2_canmerge,@function +func_unique_2_canmerge: // @func_unique_2_canmerge +// %bb.0: // %entry + adrp x8, g_val + mov w9, #2 // =0x2 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end1: + .size func_unique_2_canmerge, .Lfunc_end1-func_unique_2_canmerge + // -- End function + .section .text.func_2identical_v1,"axR",@progbits + .globl func_2identical_v1 // -- Begin function func_2identical_v1 + .p2align 2 + .type func_2identical_v1,@function +func_2identical_v1: // @func_2identical_v1 +// %bb.0: // %entry + adrp x8, g_val + mov w9, #2 // =0x2 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end2: + .size func_2identical_v1, .Lfunc_end2-func_2identical_v1 + // -- End function + .section .text.func_2identical_v2,"axR",@progbits + .globl func_2identical_v2 // -- Begin function func_2identical_v2 + .p2align 2 + .type func_2identical_v2,@function +func_2identical_v2: // @func_2identical_v2 +// %bb.0: // %entry + adrp x8, g_val + mov w9, #2 // =0x2 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end3: + .size func_2identical_v2, .Lfunc_end3-func_2identical_v2 + // -- End function + .section .text.func_3identical_v1,"axR",@progbits + .globl func_3identical_v1 // -- Begin function func_3identical_v1 + .p2align 2 + .type func_3identical_v1,@function +func_3identical_v1: // @func_3identical_v1 +// %bb.0: // %entry + adrp x8, g_val + mov w9, #3 // =0x3 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end4: + .size func_3identical_v1, .Lfunc_end4-func_3identical_v1 + // -- End function + .section .text.func_3identical_v2,"axR",@progbits + .globl func_3identical_v2 // -- Begin function func_3identical_v2 + .p2align 2 + .type func_3identical_v2,@function +func_3identical_v2: // @func_3identical_v2 +// %bb.0: // %entry + adrp x8, g_val + mov w9, #3 // =0x3 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end5: + .size func_3identical_v2, .Lfunc_end5-func_3identical_v2 + // -- End function + .section .text.func_3identical_v3,"axR",@progbits + .globl func_3identical_v3 // -- Begin function func_3identical_v3 + .p2align 2 + .type func_3identical_v3,@function +func_3identical_v3: // @func_3identical_v3 +// %bb.0: // %entry + adrp x8, g_val + mov w9, #3 // =0x3 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end6: + .size func_3identical_v3, .Lfunc_end6-func_3identical_v3 + // -- End function + .section .text.func_3identical_v1_canmerge,"axR",@progbits + .globl func_3identical_v1_canmerge // -- Begin function func_3identical_v1_canmerge + .p2align 2 + .type func_3identical_v1_canmerge,@function +func_3identical_v1_canmerge: // @func_3identical_v1_canmerge +// %bb.0: // %entry + adrp x8, g_val + mov w9, #33 // =0x21 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end7: + .size func_3identical_v1_canmerge, .Lfunc_end7-func_3identical_v1_canmerge + // -- End function + .section .text.func_3identical_v2_canmerge,"axR",@progbits + .globl func_3identical_v2_canmerge // -- Begin function func_3identical_v2_canmerge + .p2align 2 + .type func_3identical_v2_canmerge,@function +func_3identical_v2_canmerge: // @func_3identical_v2_canmerge +// %bb.0: // %entry + adrp x8, g_val + mov w9, #33 // =0x21 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end8: + .size func_3identical_v2_canmerge, .Lfunc_end8-func_3identical_v2_canmerge + // -- End function + .section .text.func_3identical_v3_canmerge,"axR",@progbits + .globl func_3identical_v3_canmerge // -- Begin function func_3identical_v3_canmerge + .p2align 2 + .type func_3identical_v3_canmerge,@function +func_3identical_v3_canmerge: // @func_3identical_v3_canmerge +// %bb.0: // %entry + adrp x8, g_val + mov w9, #33 // =0x21 + strb w9, [x8, :lo12:g_val] + ret +.Lfunc_end9: + .size func_3identical_v3_canmerge, .Lfunc_end9-func_3identical_v3_canmerge + // -- End function + .section .text.func_call_thunked_1_nomerge,"axR",@progbits + .globl func_call_thunked_1_nomerge // -- Begin function func_call_thunked_1_nomerge + .p2align 2 + .type func_call_thunked_1_nomerge,@function +func_call_thunked_1_nomerge: // @func_call_thunked_1_nomerge +// %bb.0: // %entry + stp x29, x30, [sp, #-16]! // 16-byte Folded Spill + mov x29, sp + bl func_2identical_v1 + adrp x8, g_val + mov w9, #77 // =0x4d + strb w9, [x8, :lo12:g_val] + ldp x29, x30, [sp], #16 // 16-byte Folded Reload + ret +.Lfunc_end10: + .size func_call_thunked_1_nomerge, .Lfunc_end10-func_call_thunked_1_nomerge + // -- End function + .section .text.func_call_thunked_2_nomerge,"axR",@progbits + .globl func_call_thunked_2_nomerge // -- Begin function func_call_thunked_2_nomerge + .p2align 2 + .type func_call_thunked_2_nomerge,@function +func_call_thunked_2_nomerge: // @func_call_thunked_2_nomerge +// %bb.0: // %entry + stp x29, x30, [sp, #-16]! // 16-byte Folded Spill + mov x29, sp + bl func_2identical_v2 + adrp x8, g_val + mov w9, #77 // =0x4d + strb w9, [x8, :lo12:g_val] + ldp x29, x30, [sp], #16 // 16-byte Folded Reload + ret +.Lfunc_end11: + .size func_call_thunked_2_nomerge, .Lfunc_end11-func_call_thunked_2_nomerge + // -- End function + .section .text.func_call_thunked_2_merge,"axR",@progbits + .globl func_call_thunked_2_merge // -- Begin function func_call_thunked_2_merge + .p2align 2 + .type func_call_thunked_2_merge,@function +func_call_thunked_2_merge: // @func_call_thunked_2_merge +// %bb.0: // %entry + stp x29, x30, [sp, #-16]! // 16-byte Folded Spill + mov x29, sp + bl func_2identical_v2 + adrp x8, g_val + mov w9, #77 // =0x4d + strb w9, [x8, :lo12:g_val] + ldp x29, x30, [sp], #16 // 16-byte Folded Reload + ret +.Lfunc_end12: + .size func_call_thunked_2_merge, .Lfunc_end12-func_call_thunked_2_merge + // -- End function + .section .text.call_all_funcs,"axR",@progbits + .globl call_all_funcs // -- Begin function call_all_funcs + .p2align 2 + .type call_all_funcs,@function +call_all_funcs: // @call_all_funcs +// %bb.0: // %entry + stp x29, x30, [sp, #-16]! // 16-byte Folded Spill + mov x29, sp + bl func_unique_1 + bl func_unique_2_canmerge + bl func_2identical_v1 + bl func_2identical_v2 + bl func_3identical_v1 + bl func_3identical_v2 + bl func_3identical_v3 + bl func_3identical_v1_canmerge + bl func_3identical_v2_canmerge + ldp x29, x30, [sp], #16 // 16-byte Folded Reload + b func_3identical_v3_canmerge +.Lfunc_end13: + .size call_all_funcs, .Lfunc_end13-call_all_funcs + // -- End function + .section .text.take_func_addr,"axR",@progbits + .globl take_func_addr // -- Begin function take_func_addr + .p2align 2 + .type take_func_addr,@function +take_func_addr: // @take_func_addr +// %bb.0: // %entry + adrp x8, g_ptr + adrp x9, func_unique_1 + add x9, x9, :lo12:func_unique_1 + str x9, [x8, :lo12:g_ptr] + adrp x9, func_2identical_v1 + add x9, x9, :lo12:func_2identical_v1 + str x9, [x8, :lo12:g_ptr] + adrp x9, func_2identical_v2 + add x9, x9, :lo12:func_2identical_v2 + str x9, [x8, :lo12:g_ptr] + adrp x9, func_3identical_v1 + add x9, x9, :lo12:func_3identical_v1 + str x9, [x8, :lo12:g_ptr] + adrp x9, func_3identical_v2 + add x9, x9, :lo12:func_3identical_v2 + str x9, [x8, :lo12:g_ptr] + adrp x9, func_3identical_v3 + add x9, x9, :lo12:func_3identical_v3 + str x9, [x8, :lo12:g_ptr] + ret +.Lfunc_end14: + .size take_func_addr, .Lfunc_end14-take_func_addr + // -- End function + .section .text._start,"axR",@progbits + .globl _start // -- Begin function _start + .p2align 2 + .type _start,@function +_start: // @_start +// %bb.0: // %entry + mov w0, wzr + ret +.Lfunc_end15: + .size _start, .Lfunc_end15-_start + // -- End function + .type g_val,@object // @g_val + .section .bss.g_val,"aw",@nobits + .globl g_val + .p2align 2, 0x0 +g_val: + .byte 0 // 0x0 + .size g_val, 1 + + .type g_ptr,@object // @g_ptr + .section .bss.g_ptr,"aw",@nobits + .globl g_ptr + .p2align 3, 0x0 +g_ptr: + .xword 0 + .size g_ptr, 8 + + .section ".note.GNU-stack","",@progbits + .addrsig + .addrsig_sym func_unique_1 + .addrsig_sym func_unique_2_canmerge + .addrsig_sym func_2identical_v1 + .addrsig_sym func_2identical_v2 + .addrsig_sym func_3identical_v1 + .addrsig_sym func_3identical_v2 + .addrsig_sym func_3identical_v3 + .addrsig_sym func_3identical_v1_canmerge + .addrsig_sym func_3identical_v2_canmerge + .addrsig_sym func_3identical_v3_canmerge + .addrsig_sym func_call_thunked_1_nomerge + .addrsig_sym func_call_thunked_2_nomerge + .addrsig_sym func_call_thunked_2_merge + .addrsig_sym call_all_funcs + .addrsig_sym take_func_addr + .addrsig_sym _start + .addrsig_sym g_val + .addrsig_sym g_ptr