@@ -41,14 +41,77 @@ struct read_mem_result *read_mem_result;
4141 static uintptr_t v2p_offset ;
4242#elif __TARGET_ARCH_arm64
4343 static int64_t v2p_offset ;
44- #ifndef CORE
45- static uintptr_t page_offset ;
46- #endif
4744#endif
4845
4946/*Address of root of struct resources list (physical memory regions list) */
5047static uintptr_t iomem_resource ;
5148
49+ #if defined(__TARGET_ARCH_arm64 )
50+ /*
51+ * @brief Check if memory mapping respects the given address
52+ * @param addr: The address to check
53+ *
54+ * Attempts to mmap a 1-byte region at the specified address. If the mmap operation is successful
55+ * and the address is valid (greater than or equal to the specified address) the function returns
56+ * true. Otherwise, it returns false.
57+ *
58+ * @return: true if the mmap succeeds at addr, false otherwise.
59+ */
60+ static bool is_mmap_respecting_address (void * addr ) {
61+ const size_t size = 1 ;
62+ void * mapped_addr = mmap (addr , size , PROT_READ , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
63+
64+ if (mapped_addr == MAP_FAILED ) {
65+ return false;
66+ }
67+
68+ if (munmap (mapped_addr , size ) == -1 ) {
69+ perror ("Failed to munmap" );
70+ return false;
71+ }
72+
73+ /* Check if the mapped address is the desired address, also greater is ok */
74+ if (mapped_addr >= addr ) {
75+ return true;
76+ } else {
77+ return false;
78+ }
79+ }
80+
81+ /*
82+ * @brief Determine the actual virtual address bits for ARM64
83+ *
84+ * Determines the number of virtual address bits used by the system on ARM64
85+ * by checking the mmap behavior for various address values defined in arch/arm64/Kconfig.
86+ * The function first checks the most common virtual address bit settings (48 and 52),
87+ * then falls back to testing other possible values (47, 42, 39, 36) if necessary.
88+ * @return Number of virtual address bits used (e.g., 48, 52).
89+ */
90+ static unsigned long arm64_vabits_actual () {
91+ unsigned long vabits = 0 ;
92+
93+ /* VA_BITS = 48 is probably the most common check it first */
94+ if (is_mmap_respecting_address ((void * )(1ul << (48 - 1 )))) {
95+ if (is_mmap_respecting_address ((void * )(1ul << (52 - 1 )))) {
96+ vabits = 52 ;
97+ } else {
98+ vabits = 48 ;
99+ }
100+ } else {
101+ /* Remaining cases */
102+ const unsigned long va_bits [] = {47 , 42 , 39 , 36 };
103+ for (int i = 0 ; i < 4 ; ++ i ) {
104+ if (is_mmap_respecting_address ((void * )(1ul << (va_bits [i ] - 1 )))) {
105+ vabits = va_bits [i ];
106+ break ;
107+ }
108+ }
109+ }
110+
111+ return vabits ;
112+ }
113+ #endif // __TARGET_ARCH_arm64
114+
52115/*
53116 * init_mmap() - Initializes a shared memory mapping for reading memory results from eBPF
54117 *
@@ -90,6 +153,19 @@ int load_ebpf_mem_progs() {
90153 return errno ;
91154 }
92155
156+ /* ARM64 phys to virt translation requires two values, one of the two (CONFIG_ARM64_VA_BITS)
157+ * might not be available from eBPF so we try to compute it at runtime here and we pass it to
158+ * eBPF.
159+ */
160+ #if defined(__TARGET_ARCH_arm64 )
161+ unsigned long vabits = arm64_vabits_actual ();
162+ if (vabits == 0 ) {
163+ WARN ("Failed to determine runtime virtual address bits, defaulting to 48" );
164+ vabits = 48 ;
165+ }
166+ mem_ebpf_skel -> data -> runtime_va_bits = vabits ;
167+ #endif
168+
93169 /* Load the BPF objectes */
94170 if (mem_ebpf__load (mem_ebpf_skel )) {
95171 perror ("Failed to load BPF object" );
@@ -138,12 +214,7 @@ uintptr_t phys_to_virt(const uintptr_t phy_addr) {
138214 #ifdef __TARGET_ARCH_x86
139215 return phy_addr + v2p_offset ;
140216 #elif __TARGET_ARCH_arm64
141- uintptr_t vaddr = phy_addr - v2p_offset ;
142- #ifndef CORE
143- /* If in CO-RE mode the translation will be finished in the eBPF program */
144- vaddr |= page_offset ;
145- #endif
146- return vaddr ;
217+ return phy_addr - v2p_offset ;
147218 #else
148219 return phy_addr ;
149220 #endif
@@ -182,70 +253,6 @@ int __attribute__((noinline, optnone)) read_kernel_memory(const uintptr_t addr,
182253 return read_mem_result -> ret_code ;
183254}
184255
185- #if defined(__TARGET_ARCH_arm64 ) && !defined(CORE )
186- /*
187- * is_mmap_respecting_address() - Check if memory mapping respects the given address
188- * @addr: The address to check
189- *
190- * Attempts to mmap a 1-byte region at the specified address. If the mmap operation is successful
191- * and the address is valid (greater than or equal to the specified address), the function returns
192- * true. Otherwise, it returns false.
193- */
194- static bool is_mmap_respecting_address (void * addr ) {
195- unsigned int size = getpagesize ();
196- void * mapped_addr = mmap (addr , size , PROT_READ , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
197-
198- if (mapped_addr == MAP_FAILED ) {
199- return false;
200- }
201-
202- if (munmap (mapped_addr , size ) == -1 ) {
203- return false;
204- }
205-
206- /* Check if the mapped address is the desired address, also greater is ok */
207- if (mapped_addr >= addr ) {
208- return true;
209- } else {
210- return false;
211- }
212- }
213-
214- /*
215- * arm64_vabits_actual() - Determine the actual virtual address bits for ARM64
216- *
217- * Determines the number of virtual address bits used by the system on ARM64
218- * by checking the mmap behavior for various address values defined in arch/arm64/Kconfig.
219- * The function first checks the most common virtual address bit settings (48 and 52),
220- * then falls back to testing other possible values (47, 42, 39, 36) if necessary.
221- * Returns the number of virtual address bits used (e.g., 48, 52).
222- */
223- static unsigned long arm64_vabits_actual () {
224- unsigned long vabits = 0 ;
225-
226- /* VA_BITS = 48 is probably the most common check it first */
227- if (is_mmap_respecting_address ((void * )(1ul << 47 ))) {
228- if (is_mmap_respecting_address ((void * )(1ul << 51 ))) {
229- vabits = 52 ;
230- } else {
231- vabits = 48 ;
232- }
233- } else {
234- /* Remaining cases */
235- const unsigned long va_bits [] = {47 , 42 , 39 , 36 };
236- for (int i = 0 ; i < 4 ; ++ i ) {
237- if (is_mmap_respecting_address ((void * )(1ul << (va_bits [i ] - 1 )))) {
238- vabits = va_bits [i ];
239- break ;
240- }
241- }
242- }
243-
244- return vabits ;
245- }
246- #endif
247-
248-
249256/*
250257 * parse_kallsyms_line() - Extracts the address of a specific kernel symbol from a text line
251258 * @line: A line of text, typically from /proc/kallsyms or System.map
@@ -346,23 +353,6 @@ static int parse_kallsyms()
346353 return EIO ;
347354 }
348355
349- /* We are able now to translate phys to virt addresses for X64. ARM64 instead is more complex
350- * and require two values, one of the two (CONFIG_ARM64_VA_BITS) available only in the eBPF
351- * CO-RE program or determined at runtime here for non CO-RE ones.
352- *
353- * TODO: false! it does not depends by CO-RE, but on availability of the kernel config (see libbpf)
354- */
355- #if defined(__TARGET_ARCH_arm64 ) && !defined(CORE )
356- /* If the kernel is not CORE we determine the CONFIG_ARM64_VA_BITS using the runtime value. */
357- unsigned long vabits = arm64_vabits_actual ();
358- if (vabits == 0 ) {
359- fprintf (stderr , "Failed to determine virtual address bits, defaulting to 48\n" );
360- vabits = 48 ;
361- }
362- page_offset = -1L << vabits ;
363-
364- #endif
365-
366356 return 0 ;
367357}
368358
0 commit comments