From 288459afbecfd0cab63b2ba06eb4cd3a6f7d4e9c Mon Sep 17 00:00:00 2001 From: Jonas Kamsker <11245306+JKamsker@users.noreply.github.com> Date: Wed, 24 Sep 2025 02:04:49 +0200 Subject: [PATCH] Fix cache reuse to limit memory growth --- LiteDB.Tests/Internals/Cache_Tests.cs | 30 ++++++-- LiteDB/Engine/Disk/MemoryCache.cs | 98 +++++++++++++++------------ 2 files changed, 80 insertions(+), 48 deletions(-) diff --git a/LiteDB.Tests/Internals/Cache_Tests.cs b/LiteDB.Tests/Internals/Cache_Tests.cs index b5dd737e8..b8d896108 100644 --- a/LiteDB.Tests/Internals/Cache_Tests.cs +++ b/LiteDB.Tests/Internals/Cache_Tests.cs @@ -103,8 +103,8 @@ public void Cache_Extends() pages.Add(m.NewPage()); } - // extends must be increase - m.ExtendSegments.Should().Be(3); + // there were readable pages available, so cache should reuse instead of extending + m.ExtendSegments.Should().Be(2); // but if I release more than 10 pages, now I will re-use old pages foreach (var p in pages.Where(x => x.ShareCounter == -1).Take(10)) @@ -117,7 +117,7 @@ public void Cache_Extends() } m.WritablePages.Should().Be(7); - m.FreePages.Should().Be(8); + m.FreePages.Should().Be(3); // now, if I request for 10 pages, all pages will be reused (no segment extend) for (var i = 0; i < 10; i++) @@ -126,7 +126,7 @@ public void Cache_Extends() } // keep same extends - m.ExtendSegments.Should().Be(3); + m.ExtendSegments.Should().Be(2); // discard all pages PageBuffer pw; @@ -137,6 +137,28 @@ public void Cache_Extends() } } + [Fact] + public void Cache_Reuses_Readable_Pages_Before_Extending() + { + var cache = new MemoryCache(new int[] { 2, 4 }); + + var first = cache.GetReadablePage(0, FileOrigin.Data, (pos, slice) => { }); + var second = cache.GetReadablePage(Constants.PAGE_SIZE, FileOrigin.Data, (pos, slice) => { }); + + cache.ExtendSegments.Should().Be(1); + cache.FreePages.Should().Be(0); + + first.Release(); + second.Release(); + + var reused = cache.GetReadablePage(Constants.PAGE_SIZE * 2, FileOrigin.Data, (pos, slice) => { }); + + cache.ExtendSegments.Should().Be(1); + cache.FreePages.Should().Be(1); + + reused.Release(); + } + [Fact] public void Cache_UniqueIDNumbering() { diff --git a/LiteDB/Engine/Disk/MemoryCache.cs b/LiteDB/Engine/Disk/MemoryCache.cs index f82947dba..100a7bdef 100644 --- a/LiteDB/Engine/Disk/MemoryCache.cs +++ b/LiteDB/Engine/Disk/MemoryCache.cs @@ -303,64 +303,74 @@ private void Extend() var segmentSize = _segmentSizes[Math.Min(_segmentSizes.Length - 1, _extends)]; // if this count is larger than MEMORY_SEGMENT_SIZE, re-use all this pages - if (emptyShareCounter > segmentSize) + if (emptyShareCounter > 0) { - // get all readable pages that can return to _free (slow way) - // sort by timestamp used (set as free oldest first) - var readables = _readable - .Where(x => x.Value.ShareCounter == 0) - .OrderBy(x => x.Value.Timestamp) - .Select(x => x.Key) - .Take(segmentSize) - .ToArray(); - - // move pages from readable list to free list - foreach (var key in readables) + var take = Math.Min(segmentSize, emptyShareCounter); + + if (take > 0) { - var removed = _readable.TryRemove(key, out var page); + // get readable pages that can return to _free (slow way) + // sort by timestamp used (set as free oldest first) + var readables = _readable + .Where(x => x.Value.ShareCounter == 0) + .OrderBy(x => x.Value.Timestamp) + .Select(x => x.Key) + .Take(take) + .ToArray(); + + var reused = 0; + + // move pages from readable list to free list + foreach (var key in readables) + { + var removed = _readable.TryRemove(key, out var page); - ENSURE(removed, "page should be in readable list before moving to free list"); + ENSURE(removed, "page should be in readable list before moving to free list"); - // if removed page was changed between make array and now, must add back to readable list - if (page.ShareCounter > 0) - { - // but wait: between last "remove" and now, another thread can added this page - if (!_readable.TryAdd(key, page)) + // if removed page was changed between make array and now, must add back to readable list + if (page.ShareCounter > 0) { - // this is a terrible situation, to avoid memory corruption I will throw expcetion for now - throw new LiteException(0, "MemoryCache: removed in-use memory page. This situation has no way to fix (yet). Throwing exception to avoid database corruption. No other thread can read/write from database now."); + // but wait: between last "remove" and now, another thread can added this page + if (!_readable.TryAdd(key, page)) + { + // this is a terrible situation, to avoid memory corruption I will throw expcetion for now + throw new LiteException(0, "MemoryCache: removed in-use memory page. This situation has no way to fix (yet). Throwing exception to avoid database corruption. No other thread can read/write from database now."); + } } - } - else - { - ENSURE(page.ShareCounter == 0, "page should not be in use by anyone"); + else + { + ENSURE(page.ShareCounter == 0, "page should not be in use by anyone"); - // clean controls - page.Position = long.MaxValue; - page.Origin = FileOrigin.None; + // clean controls + page.Position = long.MaxValue; + page.Origin = FileOrigin.None; - _free.Enqueue(page); + _free.Enqueue(page); + reused++; + } } - } - LOG($"re-using cache pages (flushing {_free.Count} pages)", "CACHE"); - } - else - { - // create big linear array in heap memory (LOH => 85Kb) - var buffer = new byte[PAGE_SIZE * segmentSize]; - var uniqueID = this.ExtendPages + 1; - - // split linear array into many array slices - for (var i = 0; i < segmentSize; i++) - { - _free.Enqueue(new PageBuffer(buffer, i * PAGE_SIZE, uniqueID++)); + if (reused > 0) + { + LOG($"re-using cache pages (flushing {_free.Count} pages)", "CACHE"); + return; + } } + } - _extends++; + // create big linear array in heap memory (LOH => 85Kb) + var buffer = new byte[PAGE_SIZE * segmentSize]; + var uniqueID = this.ExtendPages + 1; - LOG($"extending memory usage: (segments: {_extends})", "CACHE"); + // split linear array into many array slices + for (var i = 0; i < segmentSize; i++) + { + _free.Enqueue(new PageBuffer(buffer, i * PAGE_SIZE, uniqueID++)); } + + _extends++; + + LOG($"extending memory usage: (segments: {_extends})", "CACHE"); } ///