diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/Interlocked.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/Interlocked.CoreCLR.cs
index 4a62578672c75c..d4ab6346f83769 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Threading/Interlocked.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/Interlocked.CoreCLR.cs
@@ -1,6 +1,7 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
@@ -127,6 +128,26 @@ public static int CompareExchange(ref int location1, int value, int comparand)
[MethodImpl(MethodImplOptions.InternalCall)]
private static extern int CompareExchange32(ref int location1, int value, int comparand);
+ // This is used internally in cases where having a managed
+ // ref to the location is unsafe (Ex: it is the object header of a pinned object).
+ // The intrinsic expansion for this overload is exactly the same as for the `ref int`
+ // variant and will go on the same path since expansion is triggered by the name and
+ // return type of the method.
+ // The important part is avoiding `ref *location` that is reported as byref to the GC.
+ [Intrinsic]
+ internal static unsafe int CompareExchange(int* location1, int value, int comparand)
+ {
+#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
+ return CompareExchange(location1, value, comparand); // Must expand intrinsic
+#else
+ Debug.Assert(location1 != null);
+ return CompareExchange32Pointer(location1, value, comparand);
+#endif
+ }
+
+ [MethodImpl(MethodImplOptions.InternalCall)]
+ private static unsafe extern int CompareExchange32Pointer(int* location1, int value, int comparand);
+
/// Compares two 64-bit signed integers for equality and, if they are equal, replaces the first value.
/// The destination, whose value is compared with and possibly replaced.
/// The value that replaces the destination value if the comparison results in equality.
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/ObjectHeader.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/ObjectHeader.CoreCLR.cs
index 0b978c2e000765..7a91dffbc677ae 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Threading/ObjectHeader.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/ObjectHeader.CoreCLR.cs
@@ -150,7 +150,7 @@ private static unsafe HeaderLockResult TryAcquireThinLockSpin(object obj)
int newBits = oldBits + SBLK_LOCK_RECLEVEL_INC;
if ((newBits & SBLK_MASK_LOCK_RECLEVEL) != 0)
{
- if (Interlocked.CompareExchange(ref *pHeader, newBits, oldBits) == oldBits)
+ if (Interlocked.CompareExchange(pHeader, newBits, oldBits) == oldBits)
{
return HeaderLockResult.Success;
}
@@ -170,7 +170,7 @@ private static unsafe HeaderLockResult TryAcquireThinLockSpin(object obj)
else if ((oldBits & SBLK_MASK_LOCK_THREADID) == 0)
{
int newBits = oldBits | currentThreadID;
- if (Interlocked.CompareExchange(ref *pHeader, newBits, oldBits) == oldBits)
+ if (Interlocked.CompareExchange(pHeader, newBits, oldBits) == oldBits)
{
return HeaderLockResult.Success;
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Interlocked.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Interlocked.cs
index 7596f676c901e8..5ee8576357a1aa 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Interlocked.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Threading/Interlocked.cs
@@ -24,13 +24,12 @@ public static int CompareExchange(ref int location1, int value, int comparand)
#endif
}
- // This is used internally by NativeAOT runtime in cases where having a managed
- // ref to the location is unsafe (Ex: it is the syncblock of a pinned object).
+ // This is used internally in cases where having a managed
+ // ref to the location is unsafe (Ex: it is the object header of a pinned object).
// The intrinsic expansion for this overload is exactly the same as for the `ref int`
// variant and will go on the same path since expansion is triggered by the name and
// return type of the method.
- // The important part is avoiding `ref *location` in the unexpanded scenario, like
- // in a case when compiling the Debug flavor of the app.
+ // The important part is avoiding `ref *location` that is reported as byref to the GC.
[Intrinsic]
internal static unsafe int CompareExchange(int* location1, int value, int comparand)
{
diff --git a/src/coreclr/vm/ecalllist.h b/src/coreclr/vm/ecalllist.h
index 61828016c80987..e03240ef6d0e61 100644
--- a/src/coreclr/vm/ecalllist.h
+++ b/src/coreclr/vm/ecalllist.h
@@ -323,6 +323,7 @@ FCFuncStart(gInterlockedFuncs)
FCFuncElement("Exchange64", COMInterlocked::Exchange64)
FCFuncElement("ExchangeObject", COMInterlocked::ExchangeObject)
FCFuncElement("CompareExchange32", COMInterlocked::CompareExchange32)
+ FCFuncElement("CompareExchange32Pointer", COMInterlocked::CompareExchange32)
FCFuncElement("CompareExchange64", COMInterlocked::CompareExchange64)
FCFuncElement("CompareExchangeObject", COMInterlocked::CompareExchangeObject)
FCFuncElement("ExchangeAdd32", COMInterlocked::ExchangeAdd32)
diff --git a/src/coreclr/vm/syncblk.cpp b/src/coreclr/vm/syncblk.cpp
index 00021d92f87d65..c210092c685fdb 100644
--- a/src/coreclr/vm/syncblk.cpp
+++ b/src/coreclr/vm/syncblk.cpp
@@ -1737,6 +1737,25 @@ OBJECTHANDLE SyncBlock::GetOrCreateLock(OBJECTREF lockObj)
// Create the handle here.
OBJECTHANDLEHolder lockHandle = GetAppDomain()->CreateHandle(lockObj);
+ if (TryUpgradeThinLockToFullLock(lockHandle.GetValue()))
+ {
+ // Our lock instance is the one in the sync block now.
+ return lockHandle.Extract();
+ }
+
+ return VolatileLoad(&m_Lock);
+}
+
+bool SyncBlock::TryUpgradeThinLockToFullLock(OBJECTHANDLE lockHandle)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
// Switch to preemptive so we can grab the spin-lock.
// Use the NO_DTOR version so we don't do a coop->preemptive->coop transition on return.
GCX_PREEMP_NO_DTOR();
@@ -1747,10 +1766,9 @@ OBJECTHANDLE SyncBlock::GetOrCreateLock(OBJECTREF lockObj)
GCX_PREEMP_NO_DTOR_END();
// Check again now that we hold the spin-lock
- existingLock = m_Lock;
- if (existingLock != (OBJECTHANDLE)NULL)
+ if (m_Lock != (OBJECTHANDLE)NULL)
{
- return existingLock;
+ return false;
}
// We need to create a new lock
@@ -1761,32 +1779,31 @@ OBJECTHANDLE SyncBlock::GetOrCreateLock(OBJECTREF lockObj)
if (thinLock != 0)
{
-
- // We have thin-lock info that needs to be transferred to the lock object.
DWORD lockThreadId = thinLock & SBLK_MASK_LOCK_THREADID;
DWORD recursionLevel = (thinLock & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
_ASSERTE(lockThreadId != 0);
PREPARE_NONVIRTUAL_CALLSITE(METHOD__LOCK__INITIALIZE_FOR_MONITOR);
+
+ // We have thin-lock info that needs to be transferred to the lock object.
+ OBJECTREF lockObj = ObjectFromHandle(lockHandle);
+
DECLARE_ARGHOLDER_ARRAY(args, 3);
- args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(lockHandle));
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(lockObj);
args[ARGNUM_1] = DWORD_TO_ARGHOLDER(lockThreadId);
args[ARGNUM_2] = DWORD_TO_ARGHOLDER(recursionLevel);
CALL_MANAGED_METHOD_NORET(args);
}
- VolatileStore(&m_Lock, lockHandle.GetValue());
-
- // Our lock instance is in the sync block now.
- // Don't release it.
- lockHandle.SuppressRelease();
-
- // Also, clear the thin lock info.
+ VolatileStore(&m_Lock, lockHandle);
+ // Clear the thin lock info.
// It won't be used any more, but it will look out of date.
// Only clear the relevant bits, as the spin-lock bit is used to lock this method.
// That bit will be reset upon return.
m_thinLock.StoreWithoutBarrier(m_thinLock.LoadWithoutBarrier() & ~((SBLK_MASK_LOCK_THREADID) | (SBLK_MASK_LOCK_RECLEVEL)));
- return lockHandle;
+ // Our lock instance is in the sync block now.
+ // Don't release it.
+ return true;
}
#endif // !DACCESS_COMPILE
diff --git a/src/coreclr/vm/syncblk.h b/src/coreclr/vm/syncblk.h
index ec7f8567ac6c9b..19cf57f7b6f9cd 100644
--- a/src/coreclr/vm/syncblk.h
+++ b/src/coreclr/vm/syncblk.h
@@ -592,6 +592,8 @@ class SyncBlock
private:
void InitializeThinLock(DWORD recursionLevel, DWORD threadId);
+ bool TryUpgradeThinLockToFullLock(OBJECTHANDLE lockHandle);
+
friend struct ::cdac_data;
};
diff --git a/src/coreclr/vm/syncblk.inl b/src/coreclr/vm/syncblk.inl
index 7131f2f7754805..245c16fdaa66a8 100644
--- a/src/coreclr/vm/syncblk.inl
+++ b/src/coreclr/vm/syncblk.inl
@@ -74,9 +74,8 @@ FORCEINLINE ObjHeader::HeaderLockResult ObjHeader::AcquireHeaderThinLock(DWORD t
return HeaderLockResult::Success;
}
- // Use the slow path instead of spinning. The compare-exchange above would not fail often, and it's not worth forcing the
- // spin loop that typically follows the call to this function to check the recursive case, so just bail to the slow path.
- return HeaderLockResult::UseSlowPath;
+ // We failed to acquire because someone touched other bits in the header.
+ return HeaderLockResult::Failure;
}
// Helper encapsulating the core logic for releasing monitor. Returns what kind of