Skip to content

Commit 2b320cb

Browse files
Abseil Teamcopybara-github
authored andcommitted
Fix includes and fuse constructors of SpinLock.
PiperOrigin-RevId: 774835829 Change-Id: I0fa7cab1b98c1b7222de0acd71b7846df693f1e2
1 parent b40953d commit 2b320cb

File tree

11 files changed

+151
-118
lines changed

11 files changed

+151
-118
lines changed

absl/base/internal/spinlock.cc

Lines changed: 15 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,18 @@
1616

1717
#include <algorithm>
1818
#include <atomic>
19+
#include <cstdint>
1920
#include <limits>
2021

2122
#include "absl/base/attributes.h"
23+
#include "absl/base/call_once.h"
2224
#include "absl/base/config.h"
2325
#include "absl/base/internal/atomic_hook.h"
2426
#include "absl/base/internal/cycleclock.h"
27+
#include "absl/base/internal/scheduling_mode.h"
2528
#include "absl/base/internal/spinlock_wait.h"
2629
#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
27-
#include "absl/base/call_once.h"
30+
#include "absl/base/internal/tsan_mutex_interface.h"
2831

2932
// Description of lock-word:
3033
// 31..00: [............................3][2][1][0]
@@ -58,7 +61,7 @@ namespace absl {
5861
ABSL_NAMESPACE_BEGIN
5962
namespace base_internal {
6063

61-
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
64+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void (*)(
6265
const void *lock, int64_t wait_cycles)>
6366
submit_profile_data;
6467

@@ -67,12 +70,6 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
6770
submit_profile_data.Store(fn);
6871
}
6972

70-
// Uncommon constructors.
71-
SpinLock::SpinLock(base_internal::SchedulingMode mode)
72-
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
73-
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
74-
}
75-
7673
// Monitor the lock to see if its value changes within some time period
7774
// (adaptive_spin_count loop iterations). The last value read from the lock
7875
// is returned from the method.
@@ -81,9 +78,8 @@ uint32_t SpinLock::SpinLoop() {
8178
// adaptive_spin_count here.
8279
ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
8380
ABSL_CONST_INIT static int adaptive_spin_count = 0;
84-
base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
85-
adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
86-
});
81+
LowLevelCallOnce(&init_adaptive_spin_count,
82+
[]() { adaptive_spin_count = NumCPUs() > 1 ? 1000 : 1; });
8783

8884
int c = adaptive_spin_count;
8985
uint32_t lock_value;
@@ -100,11 +96,11 @@ void SpinLock::SlowLock() {
10096
return;
10197
}
10298

103-
base_internal::SchedulingMode scheduling_mode;
99+
SchedulingMode scheduling_mode;
104100
if ((lock_value & kSpinLockCooperative) != 0) {
105-
scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
101+
scheduling_mode = SCHEDULE_COOPERATIVE_AND_KERNEL;
106102
} else {
107-
scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
103+
scheduling_mode = SCHEDULE_KERNEL_ONLY;
108104
}
109105

110106
// The lock was not obtained initially, so this thread needs to wait for
@@ -134,7 +130,7 @@ void SpinLock::SlowLock() {
134130
// new lock state will be the number of cycles this thread waited if
135131
// this thread obtains the lock.
136132
lock_value = TryLockInternal(lock_value, wait_cycles);
137-
continue; // Skip the delay at the end of the loop.
133+
continue; // Skip the delay at the end of the loop.
138134
} else if ((lock_value & kWaitTimeMask) == 0) {
139135
// The lock is still held, without a waiter being marked, but something
140136
// else about the lock word changed, causing our CAS to fail. For
@@ -150,8 +146,8 @@ void SpinLock::SlowLock() {
150146
// synchronization there to avoid false positives.
151147
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
152148
// Wait for an OS specific delay.
153-
base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
154-
scheduling_mode);
149+
SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
150+
scheduling_mode);
155151
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
156152
// Spin again after returning from the wait routine to give this thread
157153
// some chance of obtaining the lock.
@@ -162,8 +158,8 @@ void SpinLock::SlowLock() {
162158
}
163159

164160
void SpinLock::SlowUnlock(uint32_t lock_value) {
165-
base_internal::SpinLockWake(&lockword_,
166-
false); // wake waiter if necessary
161+
SpinLockWake(&lockword_,
162+
false); // wake waiter if necessary
167163

168164
// If our acquisition was contended, collect contentionz profile info. We
169165
// reserve a unitary wait time to represent that a waiter exists without our

absl/base/internal/spinlock.h

Lines changed: 37 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
// - for use by Abseil internal code that Mutex itself depends on
2020
// - for async signal safety (see below)
2121

22-
// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async
22+
// SpinLock with a SchedulingMode::SCHEDULE_KERNEL_ONLY is async
2323
// signal safe. If a spinlock is used within a signal handler, all code that
2424
// acquires the lock must ensure that the signal cannot arrive while they are
2525
// holding the lock. Typically, this is done by blocking the signal.
@@ -31,14 +31,16 @@
3131

3232
#include <atomic>
3333
#include <cstdint>
34+
#include <type_traits>
3435

3536
#include "absl/base/attributes.h"
37+
#include "absl/base/config.h"
3638
#include "absl/base/const_init.h"
37-
#include "absl/base/dynamic_annotations.h"
3839
#include "absl/base/internal/low_level_scheduling.h"
3940
#include "absl/base/internal/raw_logging.h"
4041
#include "absl/base/internal/scheduling_mode.h"
4142
#include "absl/base/internal/tsan_mutex_interface.h"
43+
#include "absl/base/macros.h"
4244
#include "absl/base/thread_annotations.h"
4345

4446
namespace tcmalloc {
@@ -55,17 +57,31 @@ namespace base_internal {
5557

5658
class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
5759
public:
58-
SpinLock() : lockword_(kSpinLockCooperative) {
59-
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
60-
}
60+
constexpr SpinLock() : lockword_(kSpinLockCooperative) { RegisterWithTsan(); }
6161

6262
// Constructors that allow non-cooperative spinlocks to be created for use
6363
// inside thread schedulers. Normal clients should not use these.
64-
explicit SpinLock(base_internal::SchedulingMode mode);
64+
constexpr explicit SpinLock(SchedulingMode mode)
65+
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
66+
RegisterWithTsan();
67+
}
68+
69+
#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(_WIN32)
70+
// Constructor to inline users of the default scheduling mode.
71+
//
72+
// This only needs to exists for inliner runs, but doesn't work correctly in
73+
// clang+windows builds, likely due to mangling differences.
74+
ABSL_DEPRECATE_AND_INLINE()
75+
constexpr explicit SpinLock(SchedulingMode mode)
76+
__attribute__((enable_if(mode == SCHEDULE_COOPERATIVE_AND_KERNEL,
77+
"Cooperative use default constructor")))
78+
: SpinLock() {}
79+
#endif
6580

6681
// Constructor for global SpinLock instances. See absl/base/const_init.h.
67-
constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
68-
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
82+
ABSL_DEPRECATE_AND_INLINE()
83+
constexpr SpinLock(absl::ConstInitType, SchedulingMode mode)
84+
: SpinLock(mode) {}
6985

7086
// For global SpinLock instances prefer trivial destructor when possible.
7187
// Default but non-trivial destructor in some build configurations causes an
@@ -106,7 +122,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
106122
std::memory_order_release);
107123

108124
if ((lock_value & kSpinLockDisabledScheduling) != 0) {
109-
base_internal::SchedulingGuard::EnableRescheduling(true);
125+
SchedulingGuard::EnableRescheduling(true);
110126
}
111127
if ((lock_value & kWaitTimeMask) != 0) {
112128
// Collect contentionz profile info, and speed the wakeup of any waiter.
@@ -175,9 +191,16 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
175191
~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
176192

177193
// Returns true if the provided scheduling mode is cooperative.
178-
static constexpr bool IsCooperative(
179-
base_internal::SchedulingMode scheduling_mode) {
180-
return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
194+
static constexpr bool IsCooperative(SchedulingMode scheduling_mode) {
195+
return scheduling_mode == SCHEDULE_COOPERATIVE_AND_KERNEL;
196+
}
197+
198+
constexpr void RegisterWithTsan() {
199+
#if ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)
200+
if (!__builtin_is_constant_evaluated()) {
201+
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
202+
}
203+
#endif
181204
}
182205

183206
bool IsCooperative() const {
@@ -243,7 +266,7 @@ inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
243266
if ((lock_value & kSpinLockCooperative) == 0) {
244267
// For non-cooperative locks we must make sure we mark ourselves as
245268
// non-reschedulable before we attempt to CompareAndSwap.
246-
if (base_internal::SchedulingGuard::DisableRescheduling()) {
269+
if (SchedulingGuard::DisableRescheduling()) {
247270
sched_disabled_bit = kSpinLockDisabledScheduling;
248271
}
249272
}
@@ -252,7 +275,7 @@ inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
252275
lock_value,
253276
kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
254277
std::memory_order_acquire, std::memory_order_relaxed)) {
255-
base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
278+
SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
256279
}
257280

258281
return lock_value;

absl/base/internal/thread_identity_test.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ namespace base_internal {
3131
namespace {
3232

3333
ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock(
34-
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
34+
base_internal::SCHEDULE_KERNEL_ONLY);
3535
ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock);
3636

3737
static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1);

absl/base/spinlock_test_common.cc

Lines changed: 35 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -60,24 +60,41 @@ namespace {
6060
static constexpr size_t kArrayLength = 10;
6161
static uint32_t values[kArrayLength];
6262

63-
ABSL_CONST_INIT static SpinLock static_cooperative_spinlock(
64-
absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
63+
ABSL_CONST_INIT static SpinLock static_cooperative_spinlock;
6564
ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock(
66-
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
65+
base_internal::SCHEDULE_KERNEL_ONLY);
6766

6867
// Simple integer hash function based on the public domain lookup2 hash.
6968
// http://burtleburtle.net/bob/c/lookup2.c
7069
static uint32_t Hash32(uint32_t a, uint32_t c) {
7170
uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value.
72-
a -= b; a -= c; a ^= (c >> 13);
73-
b -= c; b -= a; b ^= (a << 8);
74-
c -= a; c -= b; c ^= (b >> 13);
75-
a -= b; a -= c; a ^= (c >> 12);
76-
b -= c; b -= a; b ^= (a << 16);
77-
c -= a; c -= b; c ^= (b >> 5);
78-
a -= b; a -= c; a ^= (c >> 3);
79-
b -= c; b -= a; b ^= (a << 10);
80-
c -= a; c -= b; c ^= (b >> 15);
71+
a -= b;
72+
a -= c;
73+
a ^= (c >> 13);
74+
b -= c;
75+
b -= a;
76+
b ^= (a << 8);
77+
c -= a;
78+
c -= b;
79+
c ^= (b >> 13);
80+
a -= b;
81+
a -= c;
82+
a ^= (c >> 12);
83+
b -= c;
84+
b -= a;
85+
b ^= (a << 16);
86+
c -= a;
87+
c -= b;
88+
c ^= (b >> 5);
89+
a -= b;
90+
a -= c;
91+
a ^= (c >> 3);
92+
b -= c;
93+
b -= a;
94+
b ^= (a << 10);
95+
c -= a;
96+
c -= b;
97+
c ^= (b >> 15);
8198
return c;
8299
}
83100

@@ -134,7 +151,7 @@ TEST(SpinLock, WaitCyclesEncoding) {
134151
// We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
135152
// but the lower kProfileTimestampShift will be dropped.
136153
const int kMaxCyclesShift =
137-
32 - kLockwordReservedShift + kProfileTimestampShift;
154+
32 - kLockwordReservedShift + kProfileTimestampShift;
138155
const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
139156

140157
// These bits should be zero after encoding.
@@ -171,22 +188,22 @@ TEST(SpinLock, WaitCyclesEncoding) {
171188
SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
172189

173190
// Check that we cannot produce kSpinLockSleeper during encoding.
174-
int64_t sleeper_cycles =
175-
kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
191+
int64_t sleeper_cycles = kSpinLockSleeper
192+
<< (kProfileTimestampShift - kLockwordReservedShift);
176193
uint32_t sleeper_value =
177194
SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
178195
EXPECT_NE(sleeper_value, kSpinLockSleeper);
179196

180197
// Test clamping
181198
uint32_t max_value =
182-
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
199+
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
183200
int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
184201
int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
185202
EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
186203

187204
const int64_t step = (1 << kProfileTimestampShift);
188-
uint32_t after_max_value =
189-
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
205+
uint32_t after_max_value = SpinLockTest::EncodeWaitCycles(
206+
start_time, start_time + kMaxCycles + step);
190207
int64_t after_max_value_decoded =
191208
SpinLockTest::DecodeWaitCycles(after_max_value);
192209
EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);

absl/debugging/symbolize_elf.inc

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -171,18 +171,18 @@ struct FileMappingHint {
171171
// is being modified (is busy), we skip all decorators, and possibly
172172
// loose some info. Sorry, that's the best we could do.
173173
ABSL_CONST_INIT absl::base_internal::SpinLock g_decorators_mu(
174-
absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY);
174+
absl::base_internal::SCHEDULE_KERNEL_ONLY);
175175

176176
const int kMaxFileMappingHints = 8;
177177
int g_num_file_mapping_hints;
178178
FileMappingHint g_file_mapping_hints[kMaxFileMappingHints];
179179
// Protects g_file_mapping_hints.
180180
ABSL_CONST_INIT absl::base_internal::SpinLock g_file_mapping_mu(
181-
absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY);
181+
absl::base_internal::SCHEDULE_KERNEL_ONLY);
182182

183183
// Async-signal-safe function to zero a buffer.
184184
// memset() is not guaranteed to be async-signal-safe.
185-
static void SafeMemZero(void* p, size_t size) {
185+
static void SafeMemZero(void *p, size_t size) {
186186
unsigned char *c = static_cast<unsigned char *>(p);
187187
while (size--) {
188188
*c++ = 0;
@@ -1469,14 +1469,15 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
14691469
constexpr int interesting = PF_X | PF_R;
14701470
#endif
14711471

1472-
if (phdr.p_type != PT_LOAD
1473-
|| (phdr.p_flags & interesting) != interesting) {
1472+
if (phdr.p_type != PT_LOAD ||
1473+
(phdr.p_flags & interesting) != interesting) {
14741474
// Not a LOAD segment, not executable code, and not a function
14751475
// descriptor.
14761476
continue;
14771477
}
14781478
if (num_interesting_load_segments < obj->phdr.size()) {
1479-
memcpy(&obj->phdr[num_interesting_load_segments++], &phdr, sizeof(phdr));
1479+
memcpy(&obj->phdr[num_interesting_load_segments++], &phdr,
1480+
sizeof(phdr));
14801481
} else {
14811482
ABSL_RAW_LOG(
14821483
WARNING, "%s: too many interesting LOAD segments: %zu >= %zu",
@@ -1525,7 +1526,8 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) {
15251526
ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type");
15261527
break;
15271528
}
1528-
if (pc < reinterpret_cast<void *>(start_addr + p.p_vaddr + p.p_memsz)) {
1529+
if (pc <
1530+
reinterpret_cast<void *>(start_addr + p.p_vaddr + p.p_memsz)) {
15291531
phdr = &p;
15301532
break;
15311533
}
@@ -1671,8 +1673,8 @@ int InstallSymbolDecorator(SymbolDecorator decorator, void *arg) {
16711673
return ret;
16721674
}
16731675

1674-
bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset,
1675-
const char *filename) {
1676+
bool RegisterFileMappingHint(const void *start, const void *end,
1677+
uint64_t offset, const char *filename) {
16761678
SAFE_ASSERT(start <= end);
16771679
SAFE_ASSERT(filename != nullptr);
16781680

@@ -1765,7 +1767,8 @@ ABSL_NAMESPACE_END
17651767
} // namespace absl
17661768

17671769
extern "C" bool AbslInternalGetFileMappingHint(const void **start,
1768-
const void **end, uint64_t *offset,
1770+
const void **end,
1771+
uint64_t *offset,
17691772
const char **filename) {
17701773
return absl::debugging_internal::GetFileMappingHint(start, end, offset,
17711774
filename);

absl/log/internal/vlog_config.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ struct VModuleInfo final {
9090
// To avoid problems with the heap checker which calls into `VLOG`, `mutex` must
9191
// be a `SpinLock` that prevents fiber scheduling instead of a `Mutex`.
9292
ABSL_CONST_INIT absl::base_internal::SpinLock mutex(
93-
absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY);
93+
absl::base_internal::SCHEDULE_KERNEL_ONLY);
9494

9595
// `GetUpdateSitesMutex()` serializes updates to all of the sites (i.e. those in
9696
// `site_list_head`) themselves.

0 commit comments

Comments
 (0)