2727
2828using namespace __tsan ;
2929
30- static bool correctRaceDetection (){
31- return flags ()->correct_race_detection ;
30+ static bool relaxedSupport (){
31+ return flags ()->relaxed_support ;
3232}
3333
3434#if !SANITIZER_GO && __TSAN_HAS_INT128
@@ -231,16 +231,16 @@ namespace {
231231template <typename T, T (*F)(volatile T *v, T op)>
232232static T AtomicRMW (ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
233233 MemoryAccess (thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic );
234- if (!correctRaceDetection ()){
234+ if (!relaxedSupport ()){
235235 if (LIKELY (mo == mo_relaxed))
236236 return F (a, v);
237237 }
238238 SlotLocker locker (thr);
239239 {
240240 auto s = ctx->metamap .GetSyncOrCreate (thr, pc, (uptr)a, false );
241- bool fullLock = correctRaceDetection () || IsReleaseOrder (mo);
241+ bool fullLock = relaxedSupport () || IsReleaseOrder (mo);
242242 RWLock lock (&s->mtx , fullLock);
243- if (!correctRaceDetection ()){
243+ if (!relaxedSupport ()){
244244 if (IsAcqRelOrder (mo))
245245 thr->clock .ReleaseAcquire (&s->clock );
246246 else if (IsReleaseOrder (mo))
@@ -286,7 +286,7 @@ struct OpLoad {
286286 DCHECK (IsLoadOrder (mo));
287287 // This fast-path is critical for performance.
288288 // Assume the access is atomic.
289- if (!correctRaceDetection () && !IsAcquireOrder (mo)) {
289+ if (!relaxedSupport () && !IsAcquireOrder (mo)) {
290290 MemoryAccess (thr, pc, (uptr)a, AccessSize<T>(),
291291 kAccessRead | kAccessAtomic );
292292 return NoTsanAtomic (mo, a);
@@ -300,7 +300,7 @@ struct OpLoad {
300300 ReadLock lock (&s->mtx );
301301 if (IsAcquireOrder (mo)) {
302302 thr->clock .Acquire (s->clock );
303- } else if (correctRaceDetection ()) {
303+ } else if (relaxedSupport ()) {
304304 thr->clockA .Acquire (s->clock );
305305 }
306306 // Re-read under sync mutex because we need a consistent snapshot
@@ -335,7 +335,7 @@ struct OpStore {
335335 // Assume the access is atomic.
336336 // Strictly saying even relaxed store cuts off release sequence,
337337 // so must reset the clock.
338- if (!correctRaceDetection () && !IsReleaseOrder (mo)) {
338+ if (!relaxedSupport () && !IsReleaseOrder (mo)) {
339339 NoTsanAtomic (mo, a, v);
340340 return ;
341341 }
@@ -345,7 +345,7 @@ struct OpStore {
345345 Lock lock (&s->mtx );
346346 if (IsReleaseOrder (mo))
347347 thr->clock .ReleaseStore (&s->clock );
348- else if (correctRaceDetection ())
348+ else if (relaxedSupport ())
349349 thr->clockR .ReleaseStore (&s->clock );
350350 NoTsanAtomic (mo, a, v);
351351 }
@@ -471,7 +471,7 @@ struct OpCAS {
471471
472472 MemoryAccess (thr, pc, (uptr)a, AccessSize<T>(),
473473 kAccessWrite | kAccessAtomic );
474- if (LIKELY (!correctRaceDetection () && mo == mo_relaxed && fmo == mo_relaxed)) {
474+ if (LIKELY (!relaxedSupport () && mo == mo_relaxed && fmo == mo_relaxed)) {
475475 T cc = *c;
476476 T pr = func_cas (a, cc, v);
477477 if (pr == cc)
@@ -484,7 +484,7 @@ struct OpCAS {
484484 bool success;
485485 {
486486 auto s = ctx->metamap .GetSyncOrCreate (thr, pc, (uptr)a, false );
487- bool fullLock = correctRaceDetection () || release;
487+ bool fullLock = relaxedSupport () || release;
488488 RWLock lock (&s->mtx , fullLock);
489489 T cc = *c;
490490 T pr = func_cas (a, cc, v);
@@ -493,7 +493,7 @@ struct OpCAS {
493493 *c = pr;
494494 mo = fmo;
495495 }
496- if (!correctRaceDetection ()){
496+ if (!relaxedSupport ()){
497497 if (success && IsAcqRelOrder (mo))
498498 thr->clock .ReleaseAcquire (&s->clock );
499499 else if (success && IsReleaseOrder (mo))
@@ -533,7 +533,7 @@ struct OpFence {
533533 static void NoTsanAtomic (morder mo) { __sync_synchronize (); }
534534
535535 static void Atomic (ThreadState *thr, uptr pc, morder mo) {
536- if (correctRaceDetection ()){
536+ if (relaxedSupport ()){
537537 SlotLocker locker (thr);
538538 if (IsAcquireOrder (mo))
539539 thr->clock .Acquire (&thr->clockA );
0 commit comments