Skip to content

Commit e5017fa

Browse files
Clean up MemoryCache and fix thread-safety issues (#12440)
While reviewing #12434, I noticed that the fix applied doesn't actually fix the race condition in `MemoryCache<TKey, TValue>.Compact()`. The reason is subtle! The fix was to avoid enumerating `ConcurrentDictionary` while items could be added and removed by first calling `ConcurrentDictionary<TKey, TValue>.ToArray()`, which is thread-safe. However, the receiver of the call isn't a `ConcurrentDictionary<TKey, TValue>`; it's an `IDictionary<TKey, TValue>`, which doesn't offer a `ToArray()` method. So, the new call actually goes to `Enumerable.ToArray(...)`, and it's still enumerating the dictionary while items can be added and removed -- it's just doing it earlier than before. To address this, I went ahead and implemented a comprehensive fix for the `MemoryCache<TKey, TValue>` thread-safety issues. I also updated the tests to use a test accessor instead of subclassing, which better encapsulates the `MemoryCache` implementation. Many thanks to copilot for helping with comments, documentation, and tests! Here is a summary of the fixes and improvements made to `MemoryCache<TKey, TValue>`. - Atomic remove-then-check pattern to eliminate race conditions in compaction - Thread-safe last access time updates using volatile operations - Improved test structure using test accessors instead of subclassing - Enhanced documentation and comments - Added test coverage for concurrent and edge scenarios
2 parents c6fb31c + 3ceb688 commit e5017fa

File tree

5 files changed

+663
-89
lines changed

5 files changed

+663
-89
lines changed
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
// Licensed to the .NET Foundation under one or more agreements.
2+
// The .NET Foundation licenses this file to you under the MIT license.
3+
4+
using System;
5+
using System.Threading;
6+
7+
namespace Microsoft.CodeAnalysis.Razor.Utilities;
8+
9+
internal sealed partial class MemoryCache<TKey, TValue> where TKey : notnull
10+
where TValue : class
11+
{
12+
private sealed class Entry(TValue value)
13+
{
14+
private long _lastAccessTicks = DateTime.UtcNow.Ticks;
15+
16+
public DateTime LastAccess => new(Volatile.Read(ref _lastAccessTicks));
17+
public TValue Value => value;
18+
19+
public void UpdateLastAccess()
20+
{
21+
Volatile.Write(ref _lastAccessTicks, DateTime.UtcNow.Ticks);
22+
}
23+
}
24+
}
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
// Licensed to the .NET Foundation under one or more agreements.
2+
// The .NET Foundation licenses this file to you under the MIT license.
3+
4+
using System;
5+
6+
namespace Microsoft.CodeAnalysis.Razor.Utilities;
7+
8+
internal partial class MemoryCache<TKey, TValue>
9+
where TKey : notnull
10+
where TValue : class
11+
{
12+
internal TestAccessor GetTestAccessor() => new(this);
13+
14+
internal struct TestAccessor(MemoryCache<TKey, TValue> instance)
15+
{
16+
public event Action Compacted
17+
{
18+
add => instance._compactedHandler += value;
19+
remove => instance._compactedHandler -= value;
20+
}
21+
22+
public readonly bool TryGetLastAccess(TKey key, out DateTime result)
23+
{
24+
if (instance._map.TryGetValue(key, out var value))
25+
{
26+
result = value.LastAccess;
27+
return true;
28+
}
29+
30+
result = default;
31+
return false;
32+
}
33+
}
34+
}

src/Razor/src/Microsoft.CodeAnalysis.Razor.Workspaces/Utilities/MemoryCache`2.cs

Lines changed: 91 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -5,77 +5,126 @@
55
using System.Collections.Concurrent;
66
using System.Collections.Generic;
77
using System.Diagnostics.CodeAnalysis;
8-
using System.Linq;
98

109
namespace Microsoft.CodeAnalysis.Razor.Utilities;
1110

12-
// We've created our own MemoryCache here, ideally we would use the one in Microsoft.Extensions.Caching.Memory,
13-
// but until we update O# that causes an Assembly load problem.
14-
internal class MemoryCache<TKey, TValue>
11+
/// <summary>
12+
/// A thread-safe, size-limited cache with approximate LRU (Least Recently Used)
13+
/// eviction policy. When the cache reaches its size limit, it removes approximately
14+
/// half of the least recently used entries.
15+
/// </summary>
16+
/// <typeparam name="TKey">The type of keys in the cache.</typeparam>
17+
/// <typeparam name="TValue">The type of values in the cache.</typeparam>
18+
/// <param name="sizeLimit">The maximum number of entries the cache can hold before compaction is triggered.</param>
19+
/// <param name="concurrencyLevel">The estimated number of threads that will update the cache concurrently.</param>
20+
internal sealed partial class MemoryCache<TKey, TValue>(int sizeLimit = 50, int concurrencyLevel = 2)
1521
where TKey : notnull
1622
where TValue : class
1723
{
18-
private const int DefaultSizeLimit = 50;
19-
private const int DefaultConcurrencyLevel = 2;
24+
private readonly ConcurrentDictionary<TKey, Entry> _map = new(concurrencyLevel, capacity: sizeLimit);
2025

21-
protected IDictionary<TKey, CacheEntry> _dict;
26+
/// <summary>
27+
/// Lock used to synchronize cache compaction operations. This prevents multiple threads
28+
/// from attempting to compact the cache simultaneously while allowing concurrent reads.
29+
/// </summary>
30+
private readonly object _compactLock = new();
31+
private readonly int _sizeLimit = sizeLimit;
2232

23-
private readonly object _compactLock;
24-
private readonly int _sizeLimit;
33+
/// <summary>
34+
/// Optional callback invoked after cache compaction completes. Only used by tests.
35+
/// </summary>
36+
private Action? _compactedHandler;
2537

26-
public MemoryCache(int sizeLimit = DefaultSizeLimit, int concurrencyLevel = DefaultConcurrencyLevel)
38+
/// <summary>
39+
/// Attempts to retrieve a value from the cache and updates its last access time if found.
40+
/// </summary>
41+
public bool TryGetValue(TKey key, [NotNullWhen(true)] out TValue? result)
2742
{
28-
_sizeLimit = sizeLimit;
29-
_dict = new ConcurrentDictionary<TKey, CacheEntry>(concurrencyLevel, capacity: _sizeLimit);
30-
_compactLock = new object();
31-
}
32-
33-
public bool TryGetValue(TKey key, [NotNullWhen(returnValue: true)] out TValue? result)
34-
{
35-
if (_dict.TryGetValue(key, out var value))
43+
if (_map.TryGetValue(key, out var entry))
3644
{
37-
value.LastAccess = DateTime.UtcNow;
38-
result = value.Value;
45+
entry.UpdateLastAccess();
46+
result = entry.Value;
3947
return true;
4048
}
4149

4250
result = default;
4351
return false;
4452
}
4553

54+
/// <summary>
55+
/// Adds or updates a value in the cache. If the cache is at capacity, triggers compaction
56+
/// before adding the new entry.
57+
/// </summary>
4658
public void Set(TKey key, TValue value)
4759
{
60+
CompactIfNeeded();
61+
62+
_map[key] = new Entry(value);
63+
}
64+
65+
/// <summary>
66+
/// Removes approximately half of the least recently used entries when the cache reaches capacity.
67+
/// </summary>
68+
private void CompactIfNeeded()
69+
{
70+
// Fast path: check size without locking
71+
if (_map.Count < _sizeLimit)
72+
{
73+
return;
74+
}
75+
4876
lock (_compactLock)
4977
{
50-
if (_dict.Count >= _sizeLimit)
78+
// Double-check after acquiring lock in case another thread already compacted
79+
if (_map.Count < _sizeLimit)
5180
{
52-
Compact();
81+
return;
5382
}
54-
}
5583

56-
_dict[key] = new CacheEntry
57-
{
58-
LastAccess = DateTime.UtcNow,
59-
Value = value,
60-
};
61-
}
84+
// Create a snapshot with last access times to implement approximate LRU eviction.
85+
// This captures each entry's access time to determine which entries were least recently used.
86+
var orderedItems = _map.ToArray().SelectAndOrderByAsArray(
87+
selector: static x => (x.Key, x.Value.LastAccess),
88+
keySelector: static x => x.LastAccess);
6289

63-
public void Clear() => _dict.Clear();
90+
var toRemove = Math.Max(_sizeLimit / 2, 1);
6491

65-
protected virtual void Compact()
66-
{
67-
var kvps = _dict.ToArray().OrderBy(x => x.Value.LastAccess).ToArray();
92+
// Remove up to half of the oldest entries using an atomic remove-then-check pattern.
93+
// This ensures we don't remove entries that were accessed after our snapshot was taken.
94+
foreach (var (itemKey, itemLastAccess) in orderedItems)
95+
{
96+
// Atomic remove-then-check pattern eliminates race conditions
97+
// Note: If TryRemove fails, another thread already removed this entry.
98+
if (_map.TryRemove(itemKey, out var removedEntry))
99+
{
100+
if (removedEntry.LastAccess == itemLastAccess)
101+
{
102+
// Entry was still old when removed - successful eviction
103+
toRemove--;
68104

69-
for (var i = 0; i < _sizeLimit / 2; i++)
70-
{
71-
_dict.Remove(kvps[i].Key);
105+
// Stop early if we've removed enough entries
106+
if (toRemove == 0)
107+
{
108+
break;
109+
}
110+
}
111+
else
112+
{
113+
// Entry was accessed after snapshot - try to restore it
114+
// If TryAdd fails, another thread already added a new entry with this key,
115+
// which is acceptable - we preserve the hot entry's data either way
116+
_map.TryAdd(itemKey, removedEntry);
117+
}
118+
}
119+
}
120+
121+
_compactedHandler?.Invoke();
72122
}
73123
}
74124

75-
protected class CacheEntry
76-
{
77-
public required TValue Value { get; init; }
78-
79-
public required DateTime LastAccess { get; set; }
80-
}
125+
/// <summary>
126+
/// Removes all entries from the cache.
127+
/// </summary>
128+
public void Clear()
129+
=> _map.Clear();
81130
}
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// Licensed to the .NET Foundation under one or more agreements.
2+
// The .NET Foundation licenses this file to you under the MIT license.
3+
4+
using System;
5+
using System.Collections.Generic;
6+
using System.Diagnostics;
7+
using System.Linq;
8+
using System.Threading.Tasks;
9+
using Microsoft.AspNetCore.Razor.Test.Common;
10+
using Microsoft.CodeAnalysis.Razor.Utilities;
11+
using Xunit;
12+
using Xunit.Abstractions;
13+
14+
namespace Microsoft.CodeAnalysis.Razor.Workspaces.Test.Utilities;
15+
16+
[CollectionDefinition(nameof(MemoryCachePerfTest), DisableParallelization = false)]
17+
[Collection(nameof(MemoryCachePerfTest))]
18+
public class MemoryCachePerfTest(ITestOutputHelper testOutput) : ToolingTestBase(testOutput)
19+
{
20+
[Fact]
21+
public async Task HighFrequencyAccess_MaintainsPerformance()
22+
{
23+
var cache = new MemoryCache<string, IReadOnlyList<int>>();
24+
const string Key = "hot-key";
25+
cache.Set(Key, [1, 2, 3]);
26+
27+
const int AccessCount = 10_000;
28+
var stopwatch = Stopwatch.StartNew();
29+
30+
var tasks = Enumerable.Range(0, Environment.ProcessorCount)
31+
.Select(x => Task.Run(() =>
32+
{
33+
for (var i = 0; i < AccessCount / Environment.ProcessorCount; i++)
34+
{
35+
_ = cache.TryGetValue(Key, out _);
36+
}
37+
}))
38+
.ToArray();
39+
40+
await Task.WhenAll(tasks);
41+
stopwatch.Stop();
42+
43+
// Should complete reasonably quickly (adjust threshold as needed)
44+
Assert.True(stopwatch.ElapsedMilliseconds < 1000,
45+
$"High-frequency access took too long: {stopwatch.ElapsedMilliseconds}ms");
46+
}
47+
}

0 commit comments

Comments
 (0)