Skip to content

Commit 6f79037

Browse files
Tensorstore Teamcopybara-github
authored andcommitted
No public description
PiperOrigin-RevId: 819158467 Change-Id: I2378c39616c5236a7f75b55a215e6879dee95edb
1 parent 9ae7381 commit 6f79037

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed

tensorstore/internal/cache/cache.cc

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ void MaybeEvictEntries(CachePoolImpl* pool) noexcept {
178178
bool evict = false;
179179
bool should_delete_cache = false;
180180
auto& shard = cache->ShardForKey(entry->key_);
181-
if (absl::MutexLock lock(&shard.mutex);
181+
if (absl::MutexLock lock(shard.mutex);
182182
entry->reference_count_.load(std::memory_order_acquire) == 0) {
183183
[[maybe_unused]] size_t erase_count = shard.entries.erase(entry);
184184
assert(erase_count == 1);
@@ -232,16 +232,16 @@ void DestroyCache(CachePoolImpl* pool,
232232
// been marked `ShouldDelete == true`, that the pool already contains a
233233
// replacement cache with the same key; the replacement cache should be
234234
// ignored.
235-
absl::MutexLock lock(&pool->caches_mutex_);
235+
absl::MutexLock lock(pool->caches_mutex_);
236236
auto it = pool->caches_.find(cache);
237237
if (it != pool->caches_.end() && *it == cache) {
238238
pool->caches_.erase(it);
239239
}
240240
}
241241
if (HasLruCache(pool)) {
242-
absl::MutexLock lru_lock(&pool->lru_mutex_);
242+
absl::MutexLock lru_lock(pool->lru_mutex_);
243243
for (auto& shard : cache->shards_) {
244-
absl::MutexLock lock(&shard.mutex);
244+
absl::MutexLock lock(shard.mutex);
245245
for (CacheEntryImpl* entry : shard.entries) {
246246
// Increment reference count by 2, to ensure that concurrently
247247
// releasing the last weak reference to `entry` does not result in a
@@ -255,7 +255,7 @@ void DestroyCache(CachePoolImpl* pool,
255255
// the entries can safely be destroyed without holding any locks.
256256
} else {
257257
for (auto& shard : cache->shards_) {
258-
absl::MutexLock lock(&shard.mutex);
258+
absl::MutexLock lock(shard.mutex);
259259
for (CacheEntryImpl* entry : shard.entries) {
260260
// Increment reference count by 2, to ensure that concurrently
261261
// releasing the last weak reference to `entry` does not result in a
@@ -429,7 +429,7 @@ CachePtr<Cache> GetCacheInternal(
429429
CachePoolImpl::CacheKey key(cache_type, cache_key);
430430
if (pool && !cache_key.empty()) {
431431
// An non-empty key indicates to look for an existing cache.
432-
absl::MutexLock lock(&pool->caches_mutex_);
432+
absl::MutexLock lock(pool->caches_mutex_);
433433
auto it = pool->caches_.find(key);
434434
if (it != pool->caches_.end()) {
435435
auto* cache = *it;
@@ -461,7 +461,7 @@ CachePtr<Cache> GetCacheInternal(
461461
}
462462
cache_impl->cache_type_ = &cache_type;
463463
cache_impl->cache_identifier_ = std::string(cache_key);
464-
absl::MutexLock lock(&pool->caches_mutex_);
464+
absl::MutexLock lock(pool->caches_mutex_);
465465
auto insert_result = pool->caches_.insert(cache_impl);
466466
if (insert_result.second ||
467467
!TryToAcquireCacheStrongReference(pool, *insert_result.first)) {
@@ -498,7 +498,7 @@ PinnedCacheEntry<Cache> GetCacheEntryInternal(internal::Cache* cache,
498498
Access::StaticCast<CacheEntry>(entry_impl), internal::adopt_object_ref);
499499
} else {
500500
auto& shard = cache_impl->ShardForKey(key);
501-
absl::MutexLock lock(&shard.mutex);
501+
absl::MutexLock lock(shard.mutex);
502502
auto it = shard.entries.find(key);
503503
if (it != shard.entries.end()) {
504504
hit_count.Increment();
@@ -731,7 +731,7 @@ void UpdateTotalBytes(CachePoolImpl& pool, ptrdiff_t change) {
731731
change <= 0) {
732732
return;
733733
}
734-
absl::MutexLock lock(&pool.lru_mutex_);
734+
absl::MutexLock lock(pool.lru_mutex_);
735735
MaybeEvictEntries(&pool);
736736
}
737737

@@ -751,7 +751,7 @@ CacheEntry::~CacheEntry() {
751751
auto* weak_state = this->weak_state_.load(std::memory_order_relaxed);
752752
if (!weak_state) return;
753753
{
754-
absl::MutexLock lock(&weak_state->mutex);
754+
absl::MutexLock lock(weak_state->mutex);
755755
weak_state->entry = nullptr;
756756
if (weak_state->weak_references.load(std::memory_order_acquire) != 0) {
757757
// Don't destroy the weak reference state, since there are still weak
@@ -801,7 +801,7 @@ CachePool::StrongPtr::StrongPtr(const CachePool::WeakPtr& ptr)
801801
auto* pool =
802802
internal_cache::Access::StaticCast<internal_cache::CachePoolImpl>(
803803
ptr.get());
804-
absl::MutexLock lock(&pool->caches_mutex_);
804+
absl::MutexLock lock(pool->caches_mutex_);
805805
if (pool->strong_references_.fetch_add(1, std::memory_order_acq_rel) == 0) {
806806
internal_cache::AcquireWeakReference(pool);
807807
for (auto* cache : pool->caches_) {

tensorstore/internal/cache/cache_test.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ class TestCache : public Cache {
108108

109109
~TestCache() {
110110
if (log_) {
111-
absl::MutexLock lock(&log_->mutex);
111+
absl::MutexLock lock(log_->mutex);
112112
log_->cache_destroy_log.emplace_back(cache_identifier());
113113
}
114114
}
@@ -117,7 +117,7 @@ class TestCache : public Cache {
117117

118118
Entry* DoAllocateEntry() override {
119119
if (log_) {
120-
absl::MutexLock lock(&log_->mutex);
120+
absl::MutexLock lock(log_->mutex);
121121
log_->entry_allocate_log.emplace_back(cache_identifier());
122122
}
123123
auto* entry = new Entry;
@@ -222,7 +222,7 @@ CachePtr<CacheType> GetTestCache(
222222
std::shared_ptr<TestCache::RequestLog> log = {}) {
223223
return GetCache<CacheType>(pool, cache_identifier, [&] {
224224
if (log) {
225-
absl::MutexLock lock(&log->mutex);
225+
absl::MutexLock lock(log->mutex);
226226
log->cache_allocate_log.emplace_back(cache_identifier);
227227
}
228228
return std::make_unique<CacheType>(log);

0 commit comments

Comments
 (0)