@@ -71,17 +71,23 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
7171}
7272
7373// Monitor the lock to see if its value changes within some time period
74- // (adaptive_spin_count loop iterations). The last value read from the lock
74+ // (adaptive_spin_count_ loop iterations). The last value read from the lock
7575// is returned from the method.
76+ ABSL_CONST_INIT std::atomic<int > SpinLock::adaptive_spin_count_{0 };
7677uint32_t SpinLock::SpinLoop () {
7778 // We are already in the slow path of SpinLock, initialize the
7879 // adaptive_spin_count here.
79- ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
80- ABSL_CONST_INIT static int adaptive_spin_count = 0 ;
81- LowLevelCallOnce (&init_adaptive_spin_count,
82- []() { adaptive_spin_count = NumCPUs () > 1 ? 1000 : 1 ; });
83-
84- int c = adaptive_spin_count;
80+ if (adaptive_spin_count_.load (std::memory_order_relaxed) == 0 ) {
81+ int current_spin_count = 0 ;
82+ int new_spin_count = NumCPUs () > 1 ? 1000 : 1 ;
83+ // If this fails, the value will remain unchanged. We may not spin for the
84+ // intended duration, but that is still safe. We will try again on the next
85+ // call to SpinLoop.
86+ adaptive_spin_count_.compare_exchange_weak (
87+ current_spin_count, new_spin_count, std::memory_order_relaxed,
88+ std::memory_order_relaxed);
89+ }
90+ int c = adaptive_spin_count_.load (std::memory_order_relaxed);
8591 uint32_t lock_value;
8692 do {
8793 lock_value = lockword_.load (std::memory_order_relaxed);
0 commit comments