Skip to content

Commit 7bba069

Browse files
committed
Fix possible UniqueID duplicity on Extend()
Added UnitTest to check this cases Fixed some ENSURE messages
1 parent f5cab32 commit 7bba069

File tree

2 files changed

+71
-40
lines changed

2 files changed

+71
-40
lines changed

LiteDB.Tests/Internals/Cache_Tests.cs

Lines changed: 40 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
using System.Linq;
1+
using System;
2+
using System.Linq;
23
using System.Collections.Generic;
34
using FluentAssertions;
45
using LiteDB.Engine;
@@ -17,7 +18,7 @@ public void Cache_Read_Write()
1718

1819
var p0 = m.NewPage();
1920

20-
// new pages are writables
21+
// new pages are writable
2122
(p0.ShareCounter).Should().Be(-1);
2223

2324
// simulate write operation on page
@@ -135,5 +136,42 @@ public void Cache_Extends()
135136
m.DiscardPage(pw);
136137
}
137138
}
139+
140+
[Fact]
141+
public void Cache_UniqueIDNumbering()
142+
{
143+
// Test case when second segment size is smaller than first
144+
int[] segmentSizes = { 5, 3 };
145+
ConsumeNewPages(segmentSizes);
146+
147+
// Test default database segment sizes
148+
segmentSizes = Constants.MEMORY_SEGMENT_SIZES;
149+
ConsumeNewPages(segmentSizes);
150+
151+
// Test random memory segment sizes
152+
Random rnd = new Random(DateTime.Now.Millisecond);
153+
segmentSizes = new int[rnd.Next(3, 12)];
154+
for (int i = 0; i < segmentSizes.Length; i++)
155+
{
156+
segmentSizes[i] = rnd.Next(1, 1000);
157+
}
158+
ConsumeNewPages(segmentSizes);
159+
}
160+
161+
private void ConsumeNewPages(int[] segmentSizes)
162+
{
163+
var m = new MemoryCache(segmentSizes);
164+
165+
// Test some additional segments using last segment size more than once
166+
var totalSegments = segmentSizes.Sum() + 10;
167+
for (int i = 1; i <= totalSegments; i++)
168+
{
169+
PageBuffer p = m.NewPage();
170+
p.UniqueID.Should().Be(i);
171+
172+
// Set ShareCounter to 0 to proper disposal (not needed in this test)
173+
p.ShareCounter = 0;
174+
}
175+
}
138176
}
139177
}

LiteDB/Engine/Disk/MemoryCache.cs

Lines changed: 31 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,23 @@
11
using System;
22
using System.Collections.Concurrent;
33
using System.Collections.Generic;
4-
using System.ComponentModel;
5-
using System.Diagnostics;
6-
using System.IO;
74
using System.Linq;
8-
using System.Reflection;
9-
using System.Text;
10-
using System.Text.RegularExpressions;
115
using System.Threading;
126
using static LiteDB.Constants;
137

148
namespace LiteDB.Engine
159
{
1610
/// <summary>
17-
/// Manage linear memory segments to avoid re-create array buffer in heap memory
18-
/// Do not share same memory store with diferent files
11+
/// Manage linear memory segments to avoid re-creating array buffer in heap memory
12+
/// Do not share same memory store with different files
1913
/// [ThreadSafe]
2014
/// </summary>
2115
internal class MemoryCache : IDisposable
2216
{
2317
/// <summary>
2418
/// Contains free ready-to-use pages in memory
25-
/// - All pages here MUST be ShareCounter = 0
26-
/// - All pages here MUST be Position = MaxValue
19+
/// - All pages here MUST have ShareCounter = 0
20+
/// - All pages here MUST have Position = MaxValue
2721
/// </summary>
2822
private readonly ConcurrentQueue<PageBuffer> _free = new ConcurrentQueue<PageBuffer>();
2923

@@ -32,13 +26,13 @@ internal class MemoryCache : IDisposable
3226
/// - MUST have defined Origin and Position
3327
/// - Contains only 1 instance per Position/Origin
3428
/// - Contains only pages with ShareCounter >= 0
35-
/// * = 0 - Page are avaiable but are not using by anyone (can be moved into _free list in next Extend())
36-
/// * >= 1 - Page are in used by 1 or more threads. Page must run "Release" when finish use
29+
/// * = 0 - Page is available but is not in use by anyone (can be moved into _free list on next Extend())
30+
/// * >= 1 - Page is in use by 1 or more threads. Page must run "Release" when finished using
3731
/// </summary>
3832
private readonly ConcurrentDictionary<long, PageBuffer> _readable = new ConcurrentDictionary<long, PageBuffer>();
3933

4034
/// <summary>
41-
/// Get how many extends was made in this store
35+
/// Get how many extends were made in this store
4236
/// </summary>
4337
private int _extends = 0;
4438

@@ -57,7 +51,7 @@ public MemoryCache(int[] memorySegmentSizes)
5751
#region Readable Pages
5852

5953
/// <summary>
60-
/// Get page from clean cache (readable). If page not exits, create this new page and load data using factory fn
54+
/// Get page from clean cache (readable). If page doesn't exist, create this new page and load data using factory fn
6155
/// </summary>
6256
public PageBuffer GetReadablePage(long position, FileOrigin origin, Action<long, BufferSlice> factory)
6357
{
@@ -171,14 +165,14 @@ private PageBuffer NewPage(long position, FileOrigin origin)
171165
}
172166

173167
/// <summary>
174-
/// Try move this page to readable list (if not alrady in readable list)
175-
/// Returns true if was moved
168+
/// Try to move this page to readable list (if not already in readable list)
169+
/// Returns true if it was moved
176170
/// </summary>
177171
public bool TryMoveToReadable(PageBuffer page)
178172
{
179173
ENSURE(page.Position != long.MaxValue, "page must have a position");
180174
ENSURE(page.ShareCounter == BUFFER_WRITABLE, "page must be writable");
181-
ENSURE(page.Origin != FileOrigin.None, "page must has defined origin");
175+
ENSURE(page.Origin != FileOrigin.None, "page must have origin defined");
182176

183177
var key = this.GetReadableKey(page.Position, page.Origin);
184178

@@ -187,7 +181,7 @@ public bool TryMoveToReadable(PageBuffer page)
187181

188182
var added = _readable.TryAdd(key, page);
189183

190-
// if not added, let's back ShareCounter to writable state
184+
// if not added, let's get ShareCounter back to writable state
191185
if (!added)
192186
{
193187
page.ShareCounter = BUFFER_WRITABLE;
@@ -197,16 +191,16 @@ public bool TryMoveToReadable(PageBuffer page)
197191
}
198192

199193
/// <summary>
200-
/// Move a writable page to readable list - if already exisits, override content
201-
/// Used after write operation that must mark page as readable becase page content was changed
194+
/// Move a writable page to readable list - if already exists, override content
195+
/// Used after write operation that must mark page as readable because page content was changed
202196
/// This method runs BEFORE send to write disk queue - but new page request must read this new content
203197
/// Returns readable page
204198
/// </summary>
205199
public PageBuffer MoveToReadable(PageBuffer page)
206200
{
207201
ENSURE(page.Position != long.MaxValue, "page must have position to be readable");
208202
ENSURE(page.Origin != FileOrigin.None, "page should be a source before move to readable");
209-
ENSURE(page.ShareCounter == BUFFER_WRITABLE, "page must be writable before from to readable dict");
203+
ENSURE(page.ShareCounter == BUFFER_WRITABLE, "page must be writable before move to readable dict");
210204

211205
var key = this.GetReadableKey(page.Position, page.Origin);
212206
var added = true;
@@ -216,8 +210,8 @@ public PageBuffer MoveToReadable(PageBuffer page)
216210

217211
var readable = _readable.AddOrUpdate(key, page, (newKey, current) =>
218212
{
219-
// if page already exist inside readable list, should never be in-used (this will be garanteed by lock control)
220-
ENSURE(current.ShareCounter == 0, "user must ensure this page are not in use when mark as read only");
213+
// if page already exist inside readable list, should never be in-used (this will be guaranteed by lock control)
214+
ENSURE(current.ShareCounter == 0, "user must ensure this page is not in use when marked as read only");
221215
ENSURE(current.Origin == page.Origin, "origin must be same");
222216

223217
current.ShareCounter = 1;
@@ -231,18 +225,18 @@ public PageBuffer MoveToReadable(PageBuffer page)
231225
return current;
232226
});
233227

234-
// if page was not added into readable list move page to free list
228+
// if page was not added into readable list, move page to free list
235229
if (added == false)
236230
{
237231
this.DiscardPage(page);
238232
}
239233

240-
// return page that are in _readble list
234+
// return page that are in _readable list
241235
return readable;
242236
}
243237

244238
/// <summary>
245-
/// Complete discard a writable page - clean content and move to free list
239+
/// Completely discard a writable page - clean content and move to free list
246240
/// </summary>
247241
public void DiscardPage(PageBuffer page)
248242
{
@@ -254,8 +248,8 @@ public void DiscardPage(PageBuffer page)
254248
page.Origin = FileOrigin.None;
255249

256250
// DO NOT CLEAR CONTENT
257-
// when this page will requested from free list will be clear if request was from NewPage()
258-
// or will be overwrite by ReadPage
251+
// when this page get requested from free list, it will be cleared if requested from NewPage()
252+
// or will be overwritten by ReadPage
259253

260254
// added into free list
261255
_free.Enqueue(page);
@@ -266,7 +260,7 @@ public void DiscardPage(PageBuffer page)
266260
#region Cache managment
267261

268262
/// <summary>
269-
/// Get a clean, re-usable page from store. If store are empty, can extend buffer segments
263+
/// Get a clean, re-usable page from store. Can extend buffer segments if store is empty
270264
/// </summary>
271265
private PageBuffer GetFreePage()
272266
{
@@ -321,7 +315,7 @@ private void Extend()
321315
{
322316
var removed = _readable.TryRemove(key, out var page);
323317

324-
ENSURE(removed, "page should be in readable list before move to free list");
318+
ENSURE(removed, "page should be in readable list before moving to free list");
325319

326320
// if removed page was changed between make array and now, must add back to readable list
327321
if (page.ShareCounter > 0)
@@ -335,7 +329,7 @@ private void Extend()
335329
}
336330
else
337331
{
338-
ENSURE(page.ShareCounter == 0, "page should be not in use by anyone");
332+
ENSURE(page.ShareCounter == 0, "page should not be in use by anyone");
339333

340334
// clean controls
341335
page.Position = long.MaxValue;
@@ -351,13 +345,12 @@ private void Extend()
351345
{
352346
// create big linear array in heap memory (LOH => 85Kb)
353347
var buffer = new byte[PAGE_SIZE * segmentSize];
348+
var uniqueID = this.ExtendPages + 1;
354349

355350
// split linear array into many array slices
356351
for (var i = 0; i < segmentSize; i++)
357352
{
358-
var uniqueID = (_extends * segmentSize) + i + 1;
359-
360-
_free.Enqueue(new PageBuffer(buffer, i * PAGE_SIZE, uniqueID));
353+
_free.Enqueue(new PageBuffer(buffer, i * PAGE_SIZE, uniqueID++));
361354
}
362355

363356
_extends++;
@@ -372,12 +365,12 @@ private void Extend()
372365
public int PagesInUse => _readable.Values.Where(x => x.ShareCounter != 0).Count();
373366

374367
/// <summary>
375-
/// Return how many pages are available completed free
368+
/// Return how many pages are available (completely free)
376369
/// </summary>
377370
public int FreePages => _free.Count;
378371

379372
/// <summary>
380-
/// Return how many segments already loaded in memory
373+
/// Return how many segments are already loaded in memory
381374
/// </summary>
382375
public int ExtendSegments => _extends;
383376

@@ -399,13 +392,13 @@ private void Extend()
399392

400393
/// <summary>
401394
/// Clean all cache memory - moving back all readable pages into free list
402-
/// This command must be called inside a exclusive lock
395+
/// This command must be called inside an exclusive lock
403396
/// </summary>
404397
public int Clear()
405398
{
406399
var counter = 0;
407400

408-
ENSURE(this.PagesInUse == 0, "must have no pages in used when call Clear() cache");
401+
ENSURE(this.PagesInUse == 0, "must have no pages in use when call Clear() cache");
409402

410403
foreach (var page in _readable.Values)
411404
{

0 commit comments

Comments
 (0)