@@ -11371,7 +11371,7 @@ void CEEJitInfo::recordRelocation(void * location,
11371
11371
// When m_fAllowRel32 == TRUE, the JIT will use REL32s for both data addresses and direct code targets.
11372
11372
// Since we cannot tell what the relocation is for, we have to defensively retry.
11373
11373
//
11374
- m_fRel32Overflow = TRUE ;
11374
+ m_fJumpStubOverflow = TRUE ;
11375
11375
delta = 0 ;
11376
11376
}
11377
11377
else
@@ -11385,7 +11385,7 @@ void CEEJitInfo::recordRelocation(void * location,
11385
11385
{
11386
11386
// This forces the JIT to retry the method, which allows us to reserve more space for jump stubs and have a higher chance that
11387
11387
// we will find space for them.
11388
- m_fRel32Overflow = TRUE ;
11388
+ m_fJumpStubOverflow = TRUE ;
11389
11389
}
11390
11390
11391
11391
// Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory
@@ -11429,12 +11429,12 @@ void CEEJitInfo::recordRelocation(void * location,
11429
11429
if (!FitsInRel28 (delta))
11430
11430
{
11431
11431
// Use jump stub.
11432
- //
11432
+ //
11433
11433
TADDR baseAddr = (TADDR)fixupLocation;
11434
11434
TADDR loAddr = baseAddr - 0x08000000 ; // -2^27
11435
11435
TADDR hiAddr = baseAddr + 0x07FFFFFF ; // +2^27-1
11436
11436
11437
- // Check for the wrap around cases
11437
+ // Check for the wrap around cases
11438
11438
if (loAddr > baseAddr)
11439
11439
loAddr = UINT64_MIN; // overflow
11440
11440
if (hiAddr < baseAddr)
@@ -11443,7 +11443,21 @@ void CEEJitInfo::recordRelocation(void * location,
11443
11443
PCODE jumpStubAddr = ExecutionManager::jumpStub (m_pMethodBeingCompiled,
11444
11444
(PCODE) target,
11445
11445
(BYTE *) loAddr,
11446
- (BYTE *) hiAddr);
11446
+ (BYTE *) hiAddr,
11447
+ NULL ,
11448
+ false );
11449
+
11450
+ // Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory
11451
+ // on retry to increase chances that the retry succeeds.
11452
+ m_reserveForJumpStubs = max (0x400 , m_reserveForJumpStubs + 2 *BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
11453
+
11454
+ if (jumpStubAddr == 0 )
11455
+ {
11456
+ // This forces the JIT to retry the method, which allows us to reserve more space for jump stubs and have a higher chance that
11457
+ // we will find space for them.
11458
+ m_fJumpStubOverflow = TRUE ;
11459
+ break ;
11460
+ }
11447
11461
11448
11462
delta = (INT64)(jumpStubAddr - fixupLocation);
11449
11463
@@ -12674,25 +12688,22 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
12674
12688
}
12675
12689
#endif // _DEBUG
12676
12690
12677
- #ifdef _TARGET_AMD64_
12678
- BOOL fForceRel32Overflow = FALSE ;
12691
+ #if defined( _TARGET_AMD64_) || defined(_TARGET_ARM64_)
12692
+ BOOL fForceJumpStubOverflow = FALSE ;
12679
12693
12680
12694
#ifdef _DEBUG
12681
12695
// Always exercise the overflow codepath with force relocs
12682
12696
if (PEDecoder::GetForceRelocs ())
12683
- fForceRel32Overflow = TRUE ;
12697
+ fForceJumpStubOverflow = TRUE ;
12698
+ #endif
12699
+
12700
+ #if defined(_TARGET_AMD64_)
12701
+ BOOL fAllowRel32 = (g_fAllowRel32 | fForceJumpStubOverflow );
12684
12702
#endif
12685
12703
12686
- BOOL fAllowRel32 = g_fAllowRel32 | fForceRel32Overflow ;
12687
12704
size_t reserveForJumpStubs = 0 ;
12688
12705
12689
- // For determinism, never try to use the REL32 in compilation process
12690
- if (IsCompilationProcess ())
12691
- {
12692
- fForceRel32Overflow = FALSE ;
12693
- fAllowRel32 = FALSE ;
12694
- }
12695
- #endif // _TARGET_AMD64_
12706
+ #endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
12696
12707
12697
12708
for (;;)
12698
12709
{
@@ -12706,10 +12717,15 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
12706
12717
EEJitManager *jitMgr = NULL ;
12707
12718
#endif
12708
12719
12709
- #if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
12710
- if (fForceRel32Overflow )
12711
- jitInfo.SetRel32Overflow (fAllowRel32 );
12720
+ #if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) && !defined(CROSSGEN_COMPILE)
12721
+ #ifdef _TARGET_AMD64_
12722
+ if (fForceJumpStubOverflow )
12723
+ jitInfo.SetJumpStubOverflow (fAllowRel32 );
12712
12724
jitInfo.SetAllowRel32 (fAllowRel32 );
12725
+ #else
12726
+ if (fForceJumpStubOverflow )
12727
+ jitInfo.SetJumpStubOverflow (fForceJumpStubOverflow );
12728
+ #endif
12713
12729
jitInfo.SetReserveForJumpStubs (reserveForJumpStubs);
12714
12730
#endif
12715
12731
@@ -12858,21 +12874,23 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
12858
12874
if (!nativeEntry)
12859
12875
COMPlusThrow (kInvalidProgramException );
12860
12876
12861
- #if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
12862
- if (jitInfo.IsRel32Overflow ())
12877
+ #if ( defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) ) && !defined(CROSSGEN_COMPILE)
12878
+ if (jitInfo.IsJumpStubOverflow ())
12863
12879
{
12864
12880
// Backout and try again with fAllowRel32 == FALSE.
12865
12881
jitInfo.BackoutJitData (jitMgr);
12866
12882
12883
+ #ifdef _TARGET_AMD64_
12867
12884
// Disallow rel32 relocs in future.
12868
12885
g_fAllowRel32 = FALSE ;
12869
12886
12870
12887
fAllowRel32 = FALSE ;
12888
+ #endif // _TARGET_AMD64_
12871
12889
12872
12890
reserveForJumpStubs = jitInfo.GetReserveForJumpStubs ();
12873
12891
continue ;
12874
12892
}
12875
- #endif // _TARGET_AMD64_ && !CROSSGEN_COMPILE
12893
+ #endif // ( _TARGET_AMD64_ || _TARGET_ARM64_) && !CROSSGEN_COMPILE
12876
12894
12877
12895
LOG ((LF_JIT, LL_INFO10000,
12878
12896
" Jitted Entry at" FMT_ADDR " method %s::%s %s\n " , DBG_ADDR (nativeEntry),
0 commit comments