@@ -69,60 +69,12 @@ STATIC_ASSERT(pointer_tagging_scheme, USE_FLONUM);
6969// The "_yjit_" part is for trying to be informative. We might want different
7070// suffixes for symbols meant for Rust and symbols meant for broader CRuby.
7171
72- bool
73- rb_yjit_mark_writable (void * mem_block , uint32_t mem_size )
74- {
75- return mprotect (mem_block , mem_size , PROT_READ | PROT_WRITE ) == 0 ;
76- }
77-
78- void
79- rb_yjit_mark_executable (void * mem_block , uint32_t mem_size )
80- {
81- // Do not call mprotect when mem_size is zero. Some platforms may return
82- // an error for it. https://github.com/Shopify/ruby/issues/450
83- if (mem_size == 0 ) {
84- return ;
85- }
86- if (mprotect (mem_block , mem_size , PROT_READ | PROT_EXEC )) {
87- rb_bug ("Couldn't make JIT page (%p, %lu bytes) executable, errno: %s" ,
88- mem_block , (unsigned long )mem_size , strerror (errno ));
89- }
90- }
91-
92- // Free the specified memory block.
93- bool
94- rb_yjit_mark_unused (void * mem_block , uint32_t mem_size )
95- {
96- // On Linux, you need to use madvise MADV_DONTNEED to free memory.
97- // We might not need to call this on macOS, but it's not really documented.
98- // We generally prefer to do the same thing on both to ease testing too.
99- madvise (mem_block , mem_size , MADV_DONTNEED );
100-
101- // On macOS, mprotect PROT_NONE seems to reduce RSS.
102- // We also call this on Linux to avoid executing unused pages.
103- return mprotect (mem_block , mem_size , PROT_NONE ) == 0 ;
104- }
105-
10672long
10773rb_yjit_array_len (VALUE a )
10874{
10975 return rb_array_len (a );
11076}
11177
112- // `start` is inclusive and `end` is exclusive.
113- void
114- rb_yjit_icache_invalidate (void * start , void * end )
115- {
116- // Clear/invalidate the instruction cache. Compiles to nothing on x86_64
117- // but required on ARM before running freshly written code.
118- // On Darwin it's the same as calling sys_icache_invalidate().
119- #ifdef __GNUC__
120- __builtin___clear_cache (start , end );
121- #elif defined(__aarch64__ )
122- #error No instruction cache clear available with this compiler on Aarch64!
123- #endif
124- }
125-
12678# define PTR2NUM (x ) (rb_int2inum((intptr_t)(void *)(x)))
12779
12880// For a given raw_sample (frame), set the hash with the caller's
@@ -217,131 +169,6 @@ rb_yjit_exit_locations_dict(VALUE *yjit_raw_samples, int *yjit_line_samples, int
217169 return result ;
218170}
219171
220- uint32_t
221- rb_yjit_get_page_size (void )
222- {
223- #if defined(_SC_PAGESIZE )
224- long page_size = sysconf (_SC_PAGESIZE );
225- if (page_size <= 0 ) rb_bug ("yjit: failed to get page size" );
226-
227- // 1 GiB limit. x86 CPUs with PDPE1GB can do this and anything larger is unexpected.
228- // Though our design sort of assume we have fine grained control over memory protection
229- // which require small page sizes.
230- if (page_size > 0x40000000l ) rb_bug ("yjit page size too large" );
231-
232- return (uint32_t )page_size ;
233- #else
234- #error "YJIT supports POSIX only for now"
235- #endif
236- }
237-
238- #if defined(MAP_FIXED_NOREPLACE ) && defined(_SC_PAGESIZE )
239- // Align the current write position to a multiple of bytes
240- static uint8_t *
241- align_ptr (uint8_t * ptr , uint32_t multiple )
242- {
243- // Compute the pointer modulo the given alignment boundary
244- uint32_t rem = ((uint32_t )(uintptr_t )ptr ) % multiple ;
245-
246- // If the pointer is already aligned, stop
247- if (rem == 0 )
248- return ptr ;
249-
250- // Pad the pointer by the necessary amount to align it
251- uint32_t pad = multiple - rem ;
252-
253- return ptr + pad ;
254- }
255- #endif
256-
257- // Address space reservation. Memory pages are mapped on an as needed basis.
258- // See the Rust mm module for details.
259- uint8_t *
260- rb_yjit_reserve_addr_space (uint32_t mem_size )
261- {
262- #ifndef _WIN32
263- uint8_t * mem_block ;
264-
265- // On Linux
266- #if defined(MAP_FIXED_NOREPLACE ) && defined(_SC_PAGESIZE )
267- uint32_t const page_size = (uint32_t )sysconf (_SC_PAGESIZE );
268- uint8_t * const cfunc_sample_addr = (void * )(uintptr_t )& rb_yjit_reserve_addr_space ;
269- uint8_t * const probe_region_end = cfunc_sample_addr + INT32_MAX ;
270- // Align the requested address to page size
271- uint8_t * req_addr = align_ptr (cfunc_sample_addr , page_size );
272-
273- // Probe for addresses close to this function using MAP_FIXED_NOREPLACE
274- // to improve odds of being in range for 32-bit relative call instructions.
275- do {
276- mem_block = mmap (
277- req_addr ,
278- mem_size ,
279- PROT_NONE ,
280- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE ,
281- -1 ,
282- 0
283- );
284-
285- // If we succeeded, stop
286- if (mem_block != MAP_FAILED ) {
287- ruby_annotate_mmap (mem_block , mem_size , "Ruby:rb_yjit_reserve_addr_space" );
288- break ;
289- }
290-
291- // -4MiB. Downwards to probe away from the heap. (On x86/A64 Linux
292- // main_code_addr < heap_addr, and in case we are in a shared
293- // library mapped higher than the heap, downwards is still better
294- // since it's towards the end of the heap rather than the stack.)
295- req_addr -= 4 * 1024 * 1024 ;
296- } while (req_addr < probe_region_end );
297-
298- // On MacOS and other platforms
299- #else
300- // Try to map a chunk of memory as executable
301- mem_block = mmap (
302- (void * )rb_yjit_reserve_addr_space ,
303- mem_size ,
304- PROT_NONE ,
305- MAP_PRIVATE | MAP_ANONYMOUS ,
306- -1 ,
307- 0
308- );
309- #endif
310-
311- // Fallback
312- if (mem_block == MAP_FAILED ) {
313- // Try again without the address hint (e.g., valgrind)
314- mem_block = mmap (
315- NULL ,
316- mem_size ,
317- PROT_NONE ,
318- MAP_PRIVATE | MAP_ANONYMOUS ,
319- -1 ,
320- 0
321- );
322-
323- if (mem_block != MAP_FAILED ) {
324- ruby_annotate_mmap (mem_block , mem_size , "Ruby:rb_yjit_reserve_addr_space:fallback" );
325- }
326- }
327-
328- // Check that the memory mapping was successful
329- if (mem_block == MAP_FAILED ) {
330- perror ("ruby: yjit: mmap:" );
331- if (errno == ENOMEM ) {
332- // No crash report if it's only insufficient memory
333- exit (EXIT_FAILURE );
334- }
335- rb_bug ("mmap failed" );
336- }
337-
338- return mem_block ;
339- #else
340- // Windows not supported for now
341- return NULL ;
342- #endif
343- }
344-
345172// Is anyone listening for :c_call and :c_return event currently?
346173bool
347174rb_c_method_tracing_currently_enabled (const rb_execution_context_t * ec )
0 commit comments