|
5 | 5 | using System.Collections.Concurrent; |
6 | 6 | using System.Collections.Generic; |
7 | 7 | using System.Diagnostics.CodeAnalysis; |
8 | | -using System.Linq; |
9 | 8 |
|
10 | 9 | namespace Microsoft.CodeAnalysis.Razor.Utilities; |
11 | 10 |
|
12 | | -// We've created our own MemoryCache here, ideally we would use the one in Microsoft.Extensions.Caching.Memory, |
13 | | -// but until we update O# that causes an Assembly load problem. |
14 | | -internal partial class MemoryCache<TKey, TValue> |
| 11 | +/// <summary> |
| 12 | +/// A thread-safe, size-limited cache with approximate LRU (Least Recently Used) |
| 13 | +/// eviction policy. When the cache reaches its size limit, it removes approximately |
| 14 | +/// half of the least recently used entries. |
| 15 | +/// </summary> |
| 16 | +/// <typeparam name="TKey">The type of keys in the cache.</typeparam> |
| 17 | +/// <typeparam name="TValue">The type of values in the cache.</typeparam> |
| 18 | +/// <param name="sizeLimit">The maximum number of entries the cache can hold before compaction is triggered.</param> |
| 19 | +/// <param name="concurrencyLevel">The estimated number of threads that will update the cache concurrently.</param> |
| 20 | +internal sealed partial class MemoryCache<TKey, TValue>(int sizeLimit = 50, int concurrencyLevel = 2) |
15 | 21 | where TKey : notnull |
16 | 22 | where TValue : class |
17 | 23 | { |
18 | | - private const int DefaultSizeLimit = 50; |
19 | | - private const int DefaultConcurrencyLevel = 2; |
20 | | - |
21 | | - protected IDictionary<TKey, CacheEntry> _dict; |
22 | | - |
23 | | - private readonly object _compactLock; |
24 | | - private readonly int _sizeLimit; |
25 | | - |
| 24 | + private readonly ConcurrentDictionary<TKey, Entry> _map = new(concurrencyLevel, capacity: sizeLimit); |
| 25 | + |
| 26 | + /// <summary> |
| 27 | + /// Lock used to synchronize cache compaction operations. This prevents multiple threads |
| 28 | + /// from attempting to compact the cache simultaneously while allowing concurrent reads. |
| 29 | + /// </summary> |
| 30 | + private readonly object _compactLock = new(); |
| 31 | + private readonly int _sizeLimit = sizeLimit; |
| 32 | + |
| 33 | + /// <summary> |
| 34 | + /// Optional callback invoked after cache compaction completes. Only used by tests. |
| 35 | + /// </summary> |
26 | 36 | private Action? _compactedHandler; |
27 | 37 |
|
28 | | - public MemoryCache(int sizeLimit = DefaultSizeLimit, int concurrencyLevel = DefaultConcurrencyLevel) |
29 | | - { |
30 | | - _sizeLimit = sizeLimit; |
31 | | - _dict = new ConcurrentDictionary<TKey, CacheEntry>(concurrencyLevel, capacity: _sizeLimit); |
32 | | - _compactLock = new object(); |
33 | | - } |
34 | | - |
35 | | - public bool TryGetValue(TKey key, [NotNullWhen(returnValue: true)] out TValue? result) |
| 38 | + /// <summary> |
| 39 | + /// Attempts to retrieve a value from the cache and updates its last access time if found. |
| 40 | + /// </summary> |
| 41 | + public bool TryGetValue(TKey key, [NotNullWhen(true)] out TValue? result) |
36 | 42 | { |
37 | | - if (_dict.TryGetValue(key, out var value)) |
| 43 | + if (_map.TryGetValue(key, out var entry)) |
38 | 44 | { |
39 | | - value.LastAccess = DateTime.UtcNow; |
40 | | - result = value.Value; |
| 45 | + entry.UpdateLastAccess(); |
| 46 | + result = entry.Value; |
41 | 47 | return true; |
42 | 48 | } |
43 | 49 |
|
44 | 50 | result = default; |
45 | 51 | return false; |
46 | 52 | } |
47 | 53 |
|
| 54 | + /// <summary> |
| 55 | + /// Adds or updates a value in the cache. If the cache is at capacity, triggers compaction |
| 56 | + /// before adding the new entry. |
| 57 | + /// </summary> |
48 | 58 | public void Set(TKey key, TValue value) |
49 | 59 | { |
| 60 | + CompactIfNeeded(); |
| 61 | + |
| 62 | + _map[key] = new Entry(value); |
| 63 | + } |
| 64 | + |
| 65 | + /// <summary> |
| 66 | + /// Removes approximately half of the least recently used entries when the cache reaches capacity. |
| 67 | + /// </summary> |
| 68 | + private void CompactIfNeeded() |
| 69 | + { |
| 70 | + // Fast path: check size without locking |
| 71 | + if (_map.Count < _sizeLimit) |
| 72 | + { |
| 73 | + return; |
| 74 | + } |
| 75 | + |
50 | 76 | lock (_compactLock) |
51 | 77 | { |
52 | | - if (_dict.Count >= _sizeLimit) |
| 78 | + // Double-check after acquiring lock in case another thread already compacted |
| 79 | + if (_map.Count < _sizeLimit) |
53 | 80 | { |
54 | | - Compact(); |
| 81 | + return; |
55 | 82 | } |
56 | | - } |
57 | 83 |
|
58 | | - _dict[key] = new CacheEntry |
59 | | - { |
60 | | - LastAccess = DateTime.UtcNow, |
61 | | - Value = value, |
62 | | - }; |
63 | | - } |
| 84 | + // Create a snapshot with last access times to implement approximate LRU eviction. |
| 85 | + // This captures each entry's access time to determine which entries were least recently used. |
| 86 | + var orderedItems = _map.ToArray().SelectAndOrderByAsArray( |
| 87 | + selector: static x => (x.Key, x.Value.LastAccess), |
| 88 | + keySelector: static x => x.LastAccess); |
64 | 89 |
|
65 | | - public void Clear() => _dict.Clear(); |
| 90 | + var toRemove = Math.Max(_sizeLimit / 2, 1); |
66 | 91 |
|
67 | | - protected virtual void Compact() |
68 | | - { |
69 | | - var kvps = _dict.ToArray().OrderBy(x => x.Value.LastAccess).ToArray(); |
| 92 | + // Remove up to half of the oldest entries using an atomic remove-then-check pattern. |
| 93 | + // This ensures we don't remove entries that were accessed after our snapshot was taken. |
| 94 | + foreach (var (itemKey, itemLastAccess) in orderedItems) |
| 95 | + { |
| 96 | + // Atomic remove-then-check pattern eliminates race conditions |
| 97 | + // Note: If TryRemove fails, another thread already removed this entry. |
| 98 | + if (_map.TryRemove(itemKey, out var removedEntry)) |
| 99 | + { |
| 100 | + if (removedEntry.LastAccess == itemLastAccess) |
| 101 | + { |
| 102 | + // Entry was still old when removed - successful eviction |
| 103 | + toRemove--; |
| 104 | + |
| 105 | + // Stop early if we've removed enough entries |
| 106 | + if (toRemove == 0) |
| 107 | + { |
| 108 | + break; |
| 109 | + } |
| 110 | + } |
| 111 | + else |
| 112 | + { |
| 113 | + // Entry was accessed after snapshot - try to restore it |
| 114 | + // If TryAdd fails, another thread already added a new entry with this key, |
| 115 | + // which is acceptable - we preserve the hot entry's data either way |
| 116 | + _map.TryAdd(itemKey, removedEntry); |
| 117 | + } |
| 118 | + } |
| 119 | + } |
70 | 120 |
|
71 | | - for (var i = 0; i < _sizeLimit / 2; i++) |
72 | | - { |
73 | | - _dict.Remove(kvps[i].Key); |
| 121 | + _compactedHandler?.Invoke(); |
74 | 122 | } |
75 | | - |
76 | | - _compactedHandler?.Invoke(); |
77 | 123 | } |
78 | 124 |
|
79 | | - protected class CacheEntry |
80 | | - { |
81 | | - public required TValue Value { get; init; } |
82 | | - |
83 | | - public required DateTime LastAccess { get; set; } |
84 | | - } |
| 125 | + /// <summary> |
| 126 | + /// Removes all entries from the cache. |
| 127 | + /// </summary> |
| 128 | + public void Clear() |
| 129 | + => _map.Clear(); |
85 | 130 | } |
0 commit comments