@@ -162,6 +162,8 @@ static void invalidate_icache(char* begin, char*end) {
162162}
163163#endif
164164
165+ #define CODE_ALIGNMENT 32
166+
165167/* The function pointer is passed as last argument. The other three arguments
166168 * are passed in the same order as the function requires. This results in
167169 * shorter, more efficient ASM code for trampoline.
@@ -230,7 +232,6 @@ perf_map_init_state(void)
230232{
231233 PyUnstable_PerfMapState_Init ();
232234 trampoline_api .code_padding = 0 ;
233- trampoline_api .code_alignment = 32 ;
234235 perf_trampoline_type = PERF_TRAMPOLINE_TYPE_MAP ;
235236 return NULL ;
236237}
@@ -293,8 +294,8 @@ new_code_arena(void)
293294 void * end = & _Py_trampoline_func_end ;
294295 size_t code_size = end - start ;
295296 size_t unaligned_size = code_size + trampoline_api .code_padding ;
296- size_t chunk_size = round_up (unaligned_size , trampoline_api . code_alignment );
297- assert (chunk_size % trampoline_api . code_alignment == 0 );
297+ size_t chunk_size = round_up (unaligned_size , CODE_ALIGNMENT );
298+ assert (chunk_size % CODE_ALIGNMENT == 0 );
298299 // TODO: Check the effect of alignment of the code chunks. Initial investigation
299300 // showed that this has no effect on performance in x86-64 or aarch64 and the current
300301 // version has the advantage that the unwinder in GDB can unwind across JIT-ed code.
@@ -360,8 +361,8 @@ code_arena_new_code(code_arena_t *code_arena)
360361{
361362 py_trampoline trampoline = (py_trampoline )code_arena -> current_addr ;
362363 size_t total_code_size = round_up (code_arena -> code_size + trampoline_api .code_padding ,
363- trampoline_api . code_alignment );
364- assert (total_code_size % trampoline_api . code_alignment == 0 );
364+ CODE_ALIGNMENT );
365+ assert (total_code_size % CODE_ALIGNMENT == 0 );
365366 code_arena -> size_left -= total_code_size ;
366367 code_arena -> current_addr += total_code_size ;
367368 return trampoline ;
0 commit comments