Skip to content
This repository was archived by the owner on Jan 23, 2023. It is now read-only.

Commit 4fca952

Browse files
sdmacleajkotas
authored andcommitted
[Arm64] reserve for jump stubs (#17244)
1 parent c0fbb14 commit 4fca952

File tree

3 files changed

+55
-31
lines changed

3 files changed

+55
-31
lines changed

src/vm/codeman.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2063,7 +2063,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize)
20632063
{
20642064
LIMITED_METHOD_CONTRACT;
20652065

2066-
#ifdef _TARGET_AMD64_
2066+
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
20672067
//
20682068
// Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce
20692069
// chance that we won't be able allocate jump stub because of lack of suitable address space.

src/vm/jitinterface.cpp

Lines changed: 40 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -11371,7 +11371,7 @@ void CEEJitInfo::recordRelocation(void * location,
1137111371
// When m_fAllowRel32 == TRUE, the JIT will use REL32s for both data addresses and direct code targets.
1137211372
// Since we cannot tell what the relocation is for, we have to defensively retry.
1137311373
//
11374-
m_fRel32Overflow = TRUE;
11374+
m_fJumpStubOverflow = TRUE;
1137511375
delta = 0;
1137611376
}
1137711377
else
@@ -11385,7 +11385,7 @@ void CEEJitInfo::recordRelocation(void * location,
1138511385
{
1138611386
// This forces the JIT to retry the method, which allows us to reserve more space for jump stubs and have a higher chance that
1138711387
// we will find space for them.
11388-
m_fRel32Overflow = TRUE;
11388+
m_fJumpStubOverflow = TRUE;
1138911389
}
1139011390

1139111391
// Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory
@@ -11429,12 +11429,12 @@ void CEEJitInfo::recordRelocation(void * location,
1142911429
if (!FitsInRel28(delta))
1143011430
{
1143111431
// Use jump stub.
11432-
//
11432+
//
1143311433
TADDR baseAddr = (TADDR)fixupLocation;
1143411434
TADDR loAddr = baseAddr - 0x08000000; // -2^27
1143511435
TADDR hiAddr = baseAddr + 0x07FFFFFF; // +2^27-1
1143611436

11437-
// Check for the wrap around cases
11437+
// Check for the wrap around cases
1143811438
if (loAddr > baseAddr)
1143911439
loAddr = UINT64_MIN; // overflow
1144011440
if (hiAddr < baseAddr)
@@ -11443,7 +11443,21 @@ void CEEJitInfo::recordRelocation(void * location,
1144311443
PCODE jumpStubAddr = ExecutionManager::jumpStub(m_pMethodBeingCompiled,
1144411444
(PCODE) target,
1144511445
(BYTE *) loAddr,
11446-
(BYTE *) hiAddr);
11446+
(BYTE *) hiAddr,
11447+
NULL,
11448+
false);
11449+
11450+
// Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory
11451+
// on retry to increase chances that the retry succeeds.
11452+
m_reserveForJumpStubs = max(0x400, m_reserveForJumpStubs + 2*BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
11453+
11454+
if (jumpStubAddr == 0)
11455+
{
11456+
// This forces the JIT to retry the method, which allows us to reserve more space for jump stubs and have a higher chance that
11457+
// we will find space for them.
11458+
m_fJumpStubOverflow = TRUE;
11459+
break;
11460+
}
1144711461

1144811462
delta = (INT64)(jumpStubAddr - fixupLocation);
1144911463

@@ -12674,25 +12688,22 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
1267412688
}
1267512689
#endif //_DEBUG
1267612690

12677-
#ifdef _TARGET_AMD64_
12678-
BOOL fForceRel32Overflow = FALSE;
12691+
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
12692+
BOOL fForceJumpStubOverflow = FALSE;
1267912693

1268012694
#ifdef _DEBUG
1268112695
// Always exercise the overflow codepath with force relocs
1268212696
if (PEDecoder::GetForceRelocs())
12683-
fForceRel32Overflow = TRUE;
12697+
fForceJumpStubOverflow = TRUE;
12698+
#endif
12699+
12700+
#if defined(_TARGET_AMD64_)
12701+
BOOL fAllowRel32 = (g_fAllowRel32 | fForceJumpStubOverflow);
1268412702
#endif
1268512703

12686-
BOOL fAllowRel32 = g_fAllowRel32 | fForceRel32Overflow;
1268712704
size_t reserveForJumpStubs = 0;
1268812705

12689-
// For determinism, never try to use the REL32 in compilation process
12690-
if (IsCompilationProcess())
12691-
{
12692-
fForceRel32Overflow = FALSE;
12693-
fAllowRel32 = FALSE;
12694-
}
12695-
#endif // _TARGET_AMD64_
12706+
#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
1269612707

1269712708
for (;;)
1269812709
{
@@ -12706,10 +12717,15 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
1270612717
EEJitManager *jitMgr = NULL;
1270712718
#endif
1270812719

12709-
#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
12710-
if (fForceRel32Overflow)
12711-
jitInfo.SetRel32Overflow(fAllowRel32);
12720+
#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) && !defined(CROSSGEN_COMPILE)
12721+
#ifdef _TARGET_AMD64_
12722+
if (fForceJumpStubOverflow)
12723+
jitInfo.SetJumpStubOverflow(fAllowRel32);
1271212724
jitInfo.SetAllowRel32(fAllowRel32);
12725+
#else
12726+
if (fForceJumpStubOverflow)
12727+
jitInfo.SetJumpStubOverflow(fForceJumpStubOverflow);
12728+
#endif
1271312729
jitInfo.SetReserveForJumpStubs(reserveForJumpStubs);
1271412730
#endif
1271512731

@@ -12858,21 +12874,23 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
1285812874
if (!nativeEntry)
1285912875
COMPlusThrow(kInvalidProgramException);
1286012876

12861-
#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
12862-
if (jitInfo.IsRel32Overflow())
12877+
#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) && !defined(CROSSGEN_COMPILE)
12878+
if (jitInfo.IsJumpStubOverflow())
1286312879
{
1286412880
// Backout and try again with fAllowRel32 == FALSE.
1286512881
jitInfo.BackoutJitData(jitMgr);
1286612882

12883+
#ifdef _TARGET_AMD64_
1286712884
// Disallow rel32 relocs in future.
1286812885
g_fAllowRel32 = FALSE;
1286912886

1287012887
fAllowRel32 = FALSE;
12888+
#endif // _TARGET_AMD64_
1287112889

1287212890
reserveForJumpStubs = jitInfo.GetReserveForJumpStubs();
1287312891
continue;
1287412892
}
12875-
#endif // _TARGET_AMD64_ && !CROSSGEN_COMPILE
12893+
#endif // (_TARGET_AMD64_ || _TARGET_ARM64_) && !CROSSGEN_COMPILE
1287612894

1287712895
LOG((LF_JIT, LL_INFO10000,
1287812896
"Jitted Entry at" FMT_ADDR "method %s::%s %s\n", DBG_ADDR(nativeEntry),

src/vm/jitinterface.h

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1351,23 +1351,25 @@ class CEEJitInfo : public CEEInfo
13511351
LIMITED_METHOD_CONTRACT;
13521352
m_fAllowRel32 = fAllowRel32;
13531353
}
1354+
#endif
13541355

1355-
void SetRel32Overflow(BOOL fRel32Overflow)
1356+
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
1357+
void SetJumpStubOverflow(BOOL fJumpStubOverflow)
13561358
{
13571359
LIMITED_METHOD_CONTRACT;
1358-
m_fRel32Overflow = fRel32Overflow;
1360+
m_fJumpStubOverflow = fJumpStubOverflow;
13591361
}
13601362

1361-
BOOL IsRel32Overflow()
1363+
BOOL IsJumpStubOverflow()
13621364
{
13631365
LIMITED_METHOD_CONTRACT;
1364-
return m_fRel32Overflow;
1366+
return m_fJumpStubOverflow;
13651367
}
13661368

13671369
BOOL JitAgain()
13681370
{
13691371
LIMITED_METHOD_CONTRACT;
1370-
return m_fRel32Overflow;
1372+
return m_fJumpStubOverflow;
13711373
}
13721374

13731375
size_t GetReserveForJumpStubs()
@@ -1411,7 +1413,9 @@ class CEEJitInfo : public CEEInfo
14111413
#endif
14121414
#ifdef _TARGET_AMD64_
14131415
m_fAllowRel32(FALSE),
1414-
m_fRel32Overflow(FALSE),
1416+
#endif
1417+
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
1418+
m_fJumpStubOverflow(FALSE),
14151419
m_reserveForJumpStubs(0),
14161420
#endif
14171421
m_GCinfo_len(0),
@@ -1495,8 +1499,10 @@ protected :
14951499

14961500
#ifdef _TARGET_AMD64_
14971501
BOOL m_fAllowRel32; // Use 32-bit PC relative address modes
1498-
BOOL m_fRel32Overflow; // Overflow while trying to use encode 32-bit PC relative address.
1499-
// The code will need to be regenerated with m_fRel32Allowed == FALSE.
1502+
#endif
1503+
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
1504+
BOOL m_fJumpStubOverflow; // Overflow while trying to alocate jump stub slot within PC relative branch region
1505+
// The code will need to be regenerated (with m_fRel32Allowed == FALSE for AMD64).
15001506
size_t m_reserveForJumpStubs; // Space to reserve for jump stubs when allocating code
15011507
#endif
15021508

0 commit comments

Comments
 (0)