diff --git a/LiteDB/Client/Database/LiteDatabase.cs b/LiteDB/Client/Database/LiteDatabase.cs
index 812ae629c..9e29a3e0d 100644
--- a/LiteDB/Client/Database/LiteDatabase.cs
+++ b/LiteDB/Client/Database/LiteDatabase.cs
@@ -25,6 +25,11 @@ public partial class LiteDatabase : ILiteDatabase
///
public BsonMapper Mapper => _mapper;
+ ///
+ /// Get current instance of ILiteEngine used in this database instance
+ ///
+ public ILiteEngine Engine => _engine;
+
#endregion
#region Ctor
diff --git a/LiteDB/Client/Mapper/BsonMapper.cs b/LiteDB/Client/Mapper/BsonMapper.cs
index 1b963f560..b6211470f 100644
--- a/LiteDB/Client/Mapper/BsonMapper.cs
+++ b/LiteDB/Client/Mapper/BsonMapper.cs
@@ -179,7 +179,7 @@ public BsonExpression GetExpression(Expression> predicate)
var expr = visitor.Resolve(typeof(K) == typeof(bool));
- LOG($"`{predicate.ToString()}` -> `{expr.Source}`", "LINQ");
+ Logging.LOG($"`{predicate.ToString()}` -> `{expr.Source}`", "LINQ");
return expr;
}
@@ -193,7 +193,7 @@ public BsonExpression GetIndexExpression(Expression> predicate)
var expr = visitor.Resolve(false);
- LOG($"`{predicate.ToString()}` -> `{expr.Source}`", "LINQ");
+ Logging.LOG($"`{predicate.ToString()}` -> `{expr.Source}`", "LINQ");
return expr;
}
diff --git a/LiteDB/Client/Shared/SharedEngine.cs b/LiteDB/Client/Shared/SharedEngine.cs
index 62bb35a65..694892c60 100644
--- a/LiteDB/Client/Shared/SharedEngine.cs
+++ b/LiteDB/Client/Shared/SharedEngine.cs
@@ -3,6 +3,7 @@
using System.Collections.Generic;
using System.IO;
using System.Threading;
+using System.Threading.Tasks;
#if NETFRAMEWORK
using System.Security.AccessControl;
using System.Security.Principal;
@@ -86,6 +87,10 @@ private void CloseDatabase()
// Release Mutex on every call to close DB.
_mutex.ReleaseMutex();
}
+
+ public bool IsDisposed => _engine.IsDisposed;
+
+ public Task Closed => _engine.Closed;
#region Transaction Operations
diff --git a/LiteDB/Client/SqlParser/SqlParser.cs b/LiteDB/Client/SqlParser/SqlParser.cs
index 53515928e..0902f3deb 100644
--- a/LiteDB/Client/SqlParser/SqlParser.cs
+++ b/LiteDB/Client/SqlParser/SqlParser.cs
@@ -29,7 +29,7 @@ public IBsonDataReader Execute()
{
var ahead = _tokenizer.LookAhead().Expect(TokenType.Word);
- LOG($"executing `{ahead.Value.ToUpper()}`", "SQL");
+ Logging.LOG($"executing `{ahead.Value.ToUpper()}`", "SQL");
switch (ahead.Value.ToUpper())
{
diff --git a/LiteDB/Engine/Disk/DiskService.cs b/LiteDB/Engine/Disk/DiskService.cs
index d414da4e1..12b649e00 100644
--- a/LiteDB/Engine/Disk/DiskService.cs
+++ b/LiteDB/Engine/Disk/DiskService.cs
@@ -1,10 +1,7 @@
using System;
-using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
-using System.Text;
using System.Threading;
-using System.Threading.Tasks;
using static LiteDB.Constants;
namespace LiteDB.Engine
@@ -29,7 +26,7 @@ internal class DiskService : IDisposable
private long _logLength;
public DiskService(
- EngineSettings settings,
+ EngineSettings settings,
EngineState state,
int[] memorySegmentSizes)
{
@@ -52,7 +49,7 @@ public DiskService(
// create new database if not exist yet
if (isNew)
{
- LOG($"creating new database: '{Path.GetFileName(_dataFactory.Name)}'", "DISK");
+ Logging.LOG($"creating new database: '{Path.GetFileName(_dataFactory.Name)}'", "DISK");
this.Initialize(_dataPool.Writer, settings.Collation, settings.InitialSize);
}
@@ -261,7 +258,7 @@ public IEnumerable ReadFull(FileOrigin origin)
var bytesRead = stream.Read(buffer, 0, PAGE_SIZE);
- ENSURE(bytesRead == PAGE_SIZE, $"ReadFull must read PAGE_SIZE bytes [{bytesRead}]");
+ ENSURE(bytesRead == PAGE_SIZE, "ReadFull must read PAGE_SIZE bytes [{0}]", bytesRead);
yield return new PageBuffer(buffer, 0, 0)
{
diff --git a/LiteDB/Engine/Disk/DiskWriterQueue.cs b/LiteDB/Engine/Disk/DiskWriterQueue.cs
index d86474a2b..a03f236bd 100644
--- a/LiteDB/Engine/Disk/DiskWriterQueue.cs
+++ b/LiteDB/Engine/Disk/DiskWriterQueue.cs
@@ -8,6 +8,8 @@
namespace LiteDB.Engine
{
+ using LiteDB.Utils.Extensions;
+
///
/// Implement disk write queue and async writer thread - used only for write on LOG file
/// [ThreadSafe]
@@ -23,7 +25,7 @@ internal class DiskWriterQueue : IDisposable
private readonly ConcurrentQueue _queue = new ConcurrentQueue();
private readonly object _queueSync = new object();
- private readonly AsyncManualResetEvent _queueHasItems = new AsyncManualResetEvent();
+ private readonly ManualResetEventSlim _queueHasItems = new ManualResetEventSlim();
private readonly ManualResetEventSlim _queueIsEmpty = new ManualResetEventSlim(true);
private Exception _exception = null; // store last exception in async running task
@@ -48,7 +50,7 @@ public void EnqueuePage(PageBuffer page)
ENSURE(page.Origin == FileOrigin.Log, "async writer must use only for Log file");
// throw last exception that stop running queue
- if (_exception != null) throw _exception;
+ if (_exception != null) throw new LiteException(0, _exception, "DiskWriterQueue error");
lock (_queueSync)
{
@@ -58,7 +60,7 @@ public void EnqueuePage(PageBuffer page)
if (_task == null)
{
- _task = Task.Factory.StartNew(ExecuteQueue, TaskCreationOptions.LongRunning);
+ _task = Task.Factory.StartNew(ExecuteQueue, CancellationToken.None, TaskCreationOptions.LongRunning, TaskScheduler.Default);
}
}
}
@@ -100,14 +102,23 @@ private async Task ExecuteQueue()
_stream.FlushToDisk();
- await _queueHasItems.WaitAsync();
+ await _queueHasItems.WaitHandle.WaitAsync().ConfigureAwait(false);
}
}
}
catch (Exception ex)
{
- _state.Handle(ex);
+ _state.Handle(LiteException.InvalidDatafileState(ex, "DiskWriterQueue failed"));
_exception = ex;
+ ExhaustQueue();
+ }
+ }
+
+ private void ExhaustQueue()
+ {
+ while (_queue.TryDequeue(out var page))
+ {
+ page.Release();
}
}
@@ -117,22 +128,28 @@ private void WritePageToStream(PageBuffer page)
ENSURE(page.ShareCounter > 0, "page must be shared at least 1");
- // set stream position according to page
- _stream.Position = page.Position;
+ try
+ {
+ // set stream position according to page
+ _stream.Position = page.Position;
#if DEBUG
- _state.SimulateDiskWriteFail?.Invoke(page);
+ _state.SimulateDiskWriteFail?.Invoke(page);
#endif
- _stream.Write(page.Array, page.Offset, PAGE_SIZE);
+ _stream.Write(page.Array, page.Offset, PAGE_SIZE);
- // release page here (no page use after this)
- page.Release();
+ }
+ finally
+ {
+ // release page here (no page use after this)
+ page.Release();
+ }
}
public void Dispose()
{
- LOG($"disposing disk writer queue (with {_queue.Count} pages in queue)", "DISK");
+ Logging.LOG($"disposing disk writer queue (with {_queue.Count} pages in queue)", "DISK");
_shouldClose = true;
_queueHasItems.Set(); // unblock the running loop in case there are no items
diff --git a/LiteDB/Engine/Disk/MemoryCache.cs b/LiteDB/Engine/Disk/MemoryCache.cs
index f82947dba..62db25a7c 100644
--- a/LiteDB/Engine/Disk/MemoryCache.cs
+++ b/LiteDB/Engine/Disk/MemoryCache.cs
@@ -343,7 +343,7 @@ private void Extend()
}
}
- LOG($"re-using cache pages (flushing {_free.Count} pages)", "CACHE");
+ Logging.LOG($"re-using cache pages (flushing {_free.Count} pages)", "CACHE");
}
else
{
@@ -359,7 +359,7 @@ private void Extend()
_extends++;
- LOG($"extending memory usage: (segments: {_extends})", "CACHE");
+ Logging.LOG($"extending memory usage: (segments: {_extends})", "CACHE");
}
}
diff --git a/LiteDB/Engine/Disk/Streams/AesStream.cs b/LiteDB/Engine/Disk/Streams/AesStream.cs
index 90f4edc80..fce5cca3b 100644
--- a/LiteDB/Engine/Disk/Streams/AesStream.cs
+++ b/LiteDB/Engine/Disk/Streams/AesStream.cs
@@ -22,7 +22,7 @@ public class AesStream : Stream
private readonly byte[] _decryptedZeroes = new byte[16];
- private static readonly byte[] _emptyContent = new byte[PAGE_SIZE - 1 - 16]; // 1 for aes indicator + 16 for salt
+ private static readonly byte[] _emptyContent = new byte[PAGE_SIZE - 1 - 16]; // 1 for aes indicator + 16 for salt
public byte[] Salt { get; }
@@ -111,7 +111,7 @@ public AesStream(string password, Stream stream)
// check whether bytes 32 to 64 is empty. This indicates LiteDb was unable to write encrypted 1s during last attempt.
_stream.Read(checkBuffer, 0, checkBuffer.Length);
isNew = checkBuffer.All(x => x == 0);
-
+
// reset checkBuffer and stream position
Array.Clear(checkBuffer, 0, checkBuffer.Length);
_stream.Position = 32;
@@ -160,7 +160,7 @@ public AesStream(string password, Stream stream)
///
public override int Read(byte[] array, int offset, int count)
{
- ENSURE(this.Position % PAGE_SIZE == 0, $"AesRead: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}");
+ ENSURE(this.Position % PAGE_SIZE == 0, "AesRead: position must be in PAGE_SIZE module. Position={0}, File={1}", this.Position, _name);
var r = _reader.Read(array, offset, count);
@@ -181,7 +181,7 @@ public override int Read(byte[] array, int offset, int count)
public override void Write(byte[] array, int offset, int count)
{
ENSURE(count == PAGE_SIZE || count == 1, "buffer size must be PAGE_SIZE");
- ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, $"AesWrite: position must be in PAGE_SIZE module. Position={this.Position}, File={_name}");
+ ENSURE(this.Position == HeaderPage.P_INVALID_DATAFILE_STATE || this.Position % PAGE_SIZE == 0, "AesWrite: position must be in PAGE_SIZE module. Position={0}, File={1}", this.Position, _name);
_writer.Write(array, offset, count);
}
diff --git a/LiteDB/Engine/Engine/Collection.cs b/LiteDB/Engine/Engine/Collection.cs
index b132e2e6a..70acfff73 100644
--- a/LiteDB/Engine/Engine/Collection.cs
+++ b/LiteDB/Engine/Engine/Collection.cs
@@ -33,7 +33,7 @@ public bool DropCollection(string name)
// if collection do not exist, just exit
if (snapshot.CollectionPage == null) return false;
- LOG($"drop collection `{name}`", "COMMAND");
+ Logging.LOG($"drop collection `{name}`", "COMMAND");
// call drop collection service
snapshot.DropCollection(transaction.Safepoint);
diff --git a/LiteDB/Engine/Engine/Delete.cs b/LiteDB/Engine/Engine/Delete.cs
index 1f8a5471d..34a8872af 100644
--- a/LiteDB/Engine/Engine/Delete.cs
+++ b/LiteDB/Engine/Engine/Delete.cs
@@ -24,7 +24,7 @@ public int Delete(string collection, IEnumerable ids)
if (collectionPage == null) return 0;
- LOG($"delete `{collection}`", "COMMAND");
+ Logging.LOG($"delete `{collection}`", "COMMAND");
var count = 0;
var pk = collectionPage.PK;
diff --git a/LiteDB/Engine/Engine/Index.cs b/LiteDB/Engine/Engine/Index.cs
index 157542848..474fdafe8 100644
--- a/LiteDB/Engine/Engine/Index.cs
+++ b/LiteDB/Engine/Engine/Index.cs
@@ -44,7 +44,7 @@ public bool EnsureIndex(string collection, string name, BsonExpression expressio
return false;
}
- LOG($"create index `{collection}.{name}`", "COMMAND");
+ Logging.LOG($"create index `{collection}.{name}`", "COMMAND");
// create index head
var index = indexer.CreateIndex(name, expression.Source, unique);
diff --git a/LiteDB/Engine/Engine/Insert.cs b/LiteDB/Engine/Engine/Insert.cs
index 7f3097df0..d2d000279 100644
--- a/LiteDB/Engine/Engine/Insert.cs
+++ b/LiteDB/Engine/Engine/Insert.cs
@@ -24,7 +24,7 @@ public int Insert(string collection, IEnumerable docs, BsonAutoId
var indexer = new IndexService(snapshot, _header.Pragmas.Collation, _disk.MAX_ITEMS_COUNT);
var data = new DataService(snapshot, _disk.MAX_ITEMS_COUNT);
- LOG($"insert `{collection}`", "COMMAND");
+ Logging.LOG($"insert `{collection}`", "COMMAND");
foreach (var doc in docs)
{
diff --git a/LiteDB/Engine/Engine/Transaction.cs b/LiteDB/Engine/Engine/Transaction.cs
index d5f5927ff..3394ea5fe 100644
--- a/LiteDB/Engine/Engine/Transaction.cs
+++ b/LiteDB/Engine/Engine/Transaction.cs
@@ -22,7 +22,7 @@ public bool BeginTrans()
if (transacion.OpenCursors.Count > 0) throw new LiteException(0, "This thread contains an open cursors/query. Close cursors before Begin()");
- LOG(isNew, $"begin trans", "COMMAND");
+ Logging.LOG(isNew, $"begin trans", "COMMAND");
return isNew;
}
diff --git a/LiteDB/Engine/Engine/Update.cs b/LiteDB/Engine/Engine/Update.cs
index 0d825beea..2bbc469e4 100644
--- a/LiteDB/Engine/Engine/Update.cs
+++ b/LiteDB/Engine/Engine/Update.cs
@@ -25,7 +25,7 @@ public int Update(string collection, IEnumerable docs)
if (collectionPage == null) return 0;
- LOG($"update `{collection}`", "COMMAND");
+ Logging.LOG($"update `{collection}`", "COMMAND");
foreach (var doc in docs)
{
diff --git a/LiteDB/Engine/Engine/Upsert.cs b/LiteDB/Engine/Engine/Upsert.cs
index e9545c0ea..72d39f050 100644
--- a/LiteDB/Engine/Engine/Upsert.cs
+++ b/LiteDB/Engine/Engine/Upsert.cs
@@ -24,7 +24,7 @@ public int Upsert(string collection, IEnumerable docs, BsonAutoId
var data = new DataService(snapshot, _disk.MAX_ITEMS_COUNT);
var count = 0;
- LOG($"upsert `{collection}`", "COMMAND");
+ Logging.LOG($"upsert `{collection}`", "COMMAND");
foreach (var doc in docs)
{
diff --git a/LiteDB/Engine/EngineState.cs b/LiteDB/Engine/EngineState.cs
index cfdad8a25..5a982ac55 100644
--- a/LiteDB/Engine/EngineState.cs
+++ b/LiteDB/Engine/EngineState.cs
@@ -37,7 +37,7 @@ public void Validate()
public bool Handle(Exception ex)
{
- LOG(ex.Message, "ERROR");
+ Logging.LOG(ex, "ERROR");
if (ex is IOException ||
(ex is LiteException lex && lex.ErrorCode == LiteException.INVALID_DATAFILE_STATE))
diff --git a/LiteDB/Engine/FileReader/FileReaderV8.cs b/LiteDB/Engine/FileReader/FileReaderV8.cs
index a53a90bcf..3230c9d57 100644
--- a/LiteDB/Engine/FileReader/FileReaderV8.cs
+++ b/LiteDB/Engine/FileReader/FileReaderV8.cs
@@ -117,7 +117,7 @@ public IEnumerable GetDocuments(string collection)
var colID = _collections[collection];
if (!_collectionsDataPages.ContainsKey(colID)) yield break;
-
+
var dataPages = _collectionsDataPages[colID];
var uniqueIDs = new HashSet();
@@ -156,8 +156,8 @@ public IEnumerable GetDocuments(string collection)
// empty slot
if (position == 0) continue;
- ENSURE(position > 0 && length > 0, $"Invalid footer ref position {position} with length {length}");
- ENSURE(position + length < PAGE_SIZE, $"Invalid footer ref position {position} with length {length}");
+ ENSURE(position > 0 && length > 0, "Invalid footer ref position {0} with length {1}", position, length);
+ ENSURE(position + length < PAGE_SIZE, "Invalid footer ref position {0} with length {1}", position, length);
// get segment slice
var segment = buffer.Slice(position, length);
@@ -183,8 +183,8 @@ public IEnumerable GetDocuments(string collection)
var nextBuffer = nextPage.Value.Buffer;
// make page validations
- ENSURE(nextPage.Value.PageType == PageType.Data, $"Invalid PageType (excepted Data, get {nextPage.Value.PageType})");
- ENSURE(nextPage.Value.ColID == colID, $"Invalid ColID in this page (expected {colID}, get {nextPage.Value.ColID})");
+ ENSURE(nextPage.Value.PageType == PageType.Data, "Invalid PageType (excepted Data, get {0})", nextPage.Value.PageType);
+ ENSURE(nextPage.Value.ColID == colID, "Invalid ColID in this page (expected {0}, get {1})", colID, nextPage.Value.ColID);
ENSURE(nextPage.Value.ItemsCount > 0, "Page with no items count");
// read slot address
@@ -196,7 +196,7 @@ public IEnumerable GetDocuments(string collection)
length = nextBuffer.ReadUInt16(lengthAddr);
// empty slot
- ENSURE(length > 0, $"Last DataBlock request a next extend to {nextBlock}, but this block are empty footer");
+ ENSURE(length > 0, "Last DataBlock request a next extend to {0}, but this block are empty footer", nextBlock);
// get segment slice
segment = nextBuffer.Slice(position, length);
@@ -204,7 +204,7 @@ public IEnumerable GetDocuments(string collection)
nextBlock = segment.ReadPageAddress(DataBlock.P_NEXT_BLOCK);
data = segment.Slice(DataBlock.P_BUFFER, segment.Count - DataBlock.P_BUFFER);
- ENSURE(extend == true, $"Next datablock always be an extend. Invalid data block {nextBlock}");
+ ENSURE(extend == true, "Next datablock always be an extend. Invalid data block {0}", nextBlock);
// write data on memorystream
@@ -219,8 +219,8 @@ public IEnumerable GetDocuments(string collection)
var docResult = r.ReadDocument();
var id = docResult.Value["_id"];
- ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), $"Invalid _id value: {id}");
- ENSURE(uniqueIDs.Contains(id) == false, $"Duplicated _id value: {id}");
+ ENSURE(!(id == BsonValue.Null || id == BsonValue.MinValue || id == BsonValue.MaxValue), "Invalid _id value: {0}", id);
+ ENSURE(uniqueIDs.Contains(id) == false, "Duplicated _id value: {0}", id);
uniqueIDs.Add(id);
@@ -279,7 +279,7 @@ private void LoadDataPages()
var header = this.ReadPage(0, out var pageInfo).GetValue();
var lastPageID = header.Buffer.ReadUInt32(HeaderPage.P_LAST_PAGE_ID); //TOFO: tentar não usar esse valor como referencia (varrer tudo)
- ENSURE(lastPageID <= _maxPageID, $"LastPageID {lastPageID} should be less or equals to maxPageID {_maxPageID}");
+ ENSURE(lastPageID <= _maxPageID, "LastPageID {0} should be less or equals to maxPageID {1}", lastPageID, _maxPageID);
for (uint i = 0; i <= lastPageID; i++)
{
@@ -398,8 +398,8 @@ private void LoadIndexes()
position += 15; // head 5 bytes, tail 5 bytes, reserved 1 byte, freeIndexPageList 4 bytes
- ENSURE(!string.IsNullOrEmpty(name), $"Index name can't be empty (collection {collection.Key} - index: {i})");
- ENSURE(!string.IsNullOrEmpty(expr), $"Index expression can't be empty (collection {collection.Key} - index: {i})");
+ ENSURE(!string.IsNullOrEmpty(name), "Index name can't be empty (collection {0} - index: {1})", collection.Key, i);
+ ENSURE(!string.IsNullOrEmpty(expr), "Index expression can't be empty (collection {0} - index: {1})", collection.Key, i);
var indexInfo = new IndexInfo
{
@@ -481,7 +481,7 @@ private void LoadIndexMap()
pageInfo.PageID = pageID;
pageInfo.ColID = buffer.ReadUInt32(BasePage.P_COL_ID);
- ENSURE(read == PAGE_SIZE, $"Page position {_logStream} read only than {read} bytes (instead {PAGE_SIZE})");
+ ENSURE(read == PAGE_SIZE, "Page position {0} read only than {1} bytes (instead {2})", _logStream, read, PAGE_SIZE);
var position = new PagePosition(pageID, currentPosition);
@@ -515,7 +515,7 @@ private void LoadIndexMap()
{
var mapIndexPages = transactions[transactionID];
- // update
+ // update
foreach (var page in mapIndexPages)
{
_logIndexMap[page.PageID] = page.Position;
@@ -532,7 +532,7 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo)
try
{
- ENSURE(pageID <= _maxPageID, $"PageID: {pageID} should be less then or equals to maxPageID: {_maxPageID}");
+ ENSURE(pageID <= _maxPageID, "PageID: {0} should be less then or equals to maxPageID: {1}", pageID, _maxPageID);
var pageBuffer = new PageBuffer(new byte[PAGE_SIZE], 0, PAGE_SIZE);
Stream stream;
@@ -556,13 +556,13 @@ private Result ReadPage(uint pageID, out PageInfo pageInfo)
read = stream.Read(pageBuffer.Array, pageBuffer.Offset, pageBuffer.Count);
- ENSURE(read == PAGE_SIZE, $"Page position {stream.Position} read only than {read} bytes (instead {PAGE_SIZE})");
+ ENSURE(read == PAGE_SIZE, "Page position {0} read only than {1} bytes (instead {2})", stream.Position, read, PAGE_SIZE);
var page = new BasePage(pageBuffer);
pageInfo.ColID = page.ColID;
- ENSURE(page.PageID == pageID, $"Expect read pageID: {pageID} but header contains pageID: {page.PageID}");
+ ENSURE(page.PageID == pageID, "Expect read pageID: {0} but header contains pageID: {1}", pageID, page.PageID);
return page;
}
diff --git a/LiteDB/Engine/ILiteEngine.cs b/LiteDB/Engine/ILiteEngine.cs
index 067589999..603b150dd 100644
--- a/LiteDB/Engine/ILiteEngine.cs
+++ b/LiteDB/Engine/ILiteEngine.cs
@@ -1,5 +1,6 @@
using System;
using System.Collections.Generic;
+using System.Threading.Tasks;
namespace LiteDB.Engine
{
@@ -29,5 +30,8 @@ public interface ILiteEngine : IDisposable
BsonValue Pragma(string name);
bool Pragma(string name, BsonValue value);
+
+ bool IsDisposed { get; }
+ Task Closed { get; }
}
}
\ No newline at end of file
diff --git a/LiteDB/Engine/LiteEngine.cs b/LiteDB/Engine/LiteEngine.cs
index ce5f1987a..ee8e502b5 100644
--- a/LiteDB/Engine/LiteEngine.cs
+++ b/LiteDB/Engine/LiteEngine.cs
@@ -1,12 +1,10 @@
using LiteDB.Utils;
-
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
-using System.Diagnostics;
using System.IO;
using System.Linq;
-using System.Threading;
+using System.Threading.Tasks;
using static LiteDB.Constants;
namespace LiteDB.Engine
@@ -81,9 +79,14 @@ public LiteEngine(EngineSettings settings)
#region Open & Close
+ public bool IsDisposed => _state.Disposed;
+
+ private readonly TaskCompletionSource _closedTask = new TaskCompletionSource();
+ public Task Closed => _closedTask.Task;
+
internal bool Open()
{
- LOG($"start initializing{(_settings.ReadOnly ? " (readonly)" : "")}", "ENGINE");
+ Logging.LOG($"start initializing{(_settings.ReadOnly ? " (readonly)" : "")}", "ENGINE");
_systemCollections = new Dictionary(StringComparer.OrdinalIgnoreCase);
_sequences = new ConcurrentDictionary(StringComparer.OrdinalIgnoreCase);
@@ -154,13 +157,13 @@ internal bool Open()
// register system collections
this.InitializeSystemCollections();
- LOG("initialization completed", "ENGINE");
+ Logging.LOG("initialization completed", "ENGINE");
return true;
}
catch (Exception ex)
{
- LOG(ex.Message, "ERROR");
+ Logging.LOG(ex, "ERROR");
this.Close(ex);
throw;
@@ -207,6 +210,8 @@ internal List Close()
// dispose lockers
tc.Catch(() => _locker?.Dispose());
+ _closedTask.TrySetResult(true);
+
return tc.Exceptions;
}
@@ -249,6 +254,8 @@ internal List Close(Exception ex)
tc.Catch(() => _disk.MarkAsInvalidState());
}
+ _closedTask.TrySetResult(true);
+
return tc.Exceptions;
}
diff --git a/LiteDB/Engine/Pages/BasePage.cs b/LiteDB/Engine/Pages/BasePage.cs
index 92b645206..c62b145b7 100644
--- a/LiteDB/Engine/Pages/BasePage.cs
+++ b/LiteDB/Engine/Pages/BasePage.cs
@@ -102,8 +102,8 @@ internal class BasePage
/// Get how many bytes are used in footer page at this moment
/// ((HighestIndex + 1) * 4 bytes per slot: [2 for position, 2 for length])
///
- public int FooterSize =>
- (this.HighestIndex == byte.MaxValue ?
+ public int FooterSize =>
+ (this.HighestIndex == byte.MaxValue ?
0 : // no items in page
((this.HighestIndex + 1) * SLOT_SIZE)); // 4 bytes PER item (2 to position + 2 to length) - need consider HighestIndex used
@@ -282,8 +282,8 @@ public BufferSlice Get(byte index)
var position = _buffer.ReadUInt16(positionAddr);
var length = _buffer.ReadUInt16(lengthAddr);
- ENSURE(this.IsValidPos(position), $"invalid segment position in index footer: {ToString()}/{index}");
- ENSURE(this.IsValidLen(length), $"invalid segment length in index footer: {ToString()}/{index}");
+ ENSURE(this.IsValidPos(position), "invalid segment position in index footer: {0}/{1}", this, index);
+ ENSURE(this.IsValidLen(length), "invalid segment length in index footer: {0}/{1}", this, index);
// return buffer slice with content only data
return _buffer.Slice(position, length);
@@ -408,7 +408,7 @@ public void Delete(byte index)
this.NextFreePosition = position;
}
else
- {
+ {
// if segment is in middle of the page, add this blocks as fragment block
this.FragmentedBytes += length;
}
@@ -475,7 +475,7 @@ public BufferSlice Update(byte index, ushort bytesLength)
if (isLastSegment)
{
- // if is at end of page, must get back unused blocks
+ // if is at end of page, must get back unused blocks
this.NextFreePosition -= diff;
}
else
@@ -534,7 +534,7 @@ public void Defrag()
ENSURE(_buffer.ShareCounter == BUFFER_WRITABLE, "page must be writable to support changes");
ENSURE(this.HighestIndex < byte.MaxValue, "there is no items in this page to run defrag");
- LOG($"defrag page #{this.PageID} (fragments: {this.FragmentedBytes})", "DISK");
+ Logging.LOG($"defrag page #{this.PageID} (fragments: {this.FragmentedBytes})", "DISK");
// first get all segments inside this page sorted by position (position, index)
var segments = new SortedList();
diff --git a/LiteDB/Engine/Pages/CollectionPage.cs b/LiteDB/Engine/Pages/CollectionPage.cs
index 4db992238..389ba3cc4 100644
--- a/LiteDB/Engine/Pages/CollectionPage.cs
+++ b/LiteDB/Engine/Pages/CollectionPage.cs
@@ -39,7 +39,7 @@ public CollectionPage(PageBuffer buffer, uint pageID)
public CollectionPage(PageBuffer buffer)
: base(buffer)
{
- ENSURE(this.PageType == PageType.Collection, "page type must be collection page");
+ ENSURE(this.PageType == PageType.Collection, "page type must be collection page: {0}", PageType);
if (this.PageType != PageType.Collection) throw LiteException.InvalidPageType(PageType.Collection, this);
diff --git a/LiteDB/Engine/Pages/DataPage.cs b/LiteDB/Engine/Pages/DataPage.cs
index 842bb49d4..bbda32ba9 100644
--- a/LiteDB/Engine/Pages/DataPage.cs
+++ b/LiteDB/Engine/Pages/DataPage.cs
@@ -1,6 +1,4 @@
using System.Collections.Generic;
-using System.IO;
-using System.Linq;
using static LiteDB.Constants;
namespace LiteDB.Engine
@@ -16,7 +14,7 @@ internal class DataPage : BasePage
public DataPage(PageBuffer buffer)
: base(buffer)
{
- ENSURE(this.PageType == PageType.Data, $"Page type must be data page: {PageType}");
+ ENSURE(this.PageType == PageType.Data, "Page type must be data page: {0}", PageType);
if (this.PageType != PageType.Data) throw LiteException.InvalidPageType(PageType.Data, this);
}
@@ -108,7 +106,7 @@ public IEnumerable GetBlocks()
/// A slot number between 0 and 4
public static byte FreeIndexSlot(int freeBytes)
{
- ENSURE(freeBytes >= 0, $"FreeBytes must be positive: {freeBytes}");
+ ENSURE(freeBytes >= 0, "FreeBytes must be positive: {0}", freeBytes);
for (var i = 0; i < _freePageSlots.Length; i++)
{
diff --git a/LiteDB/Engine/Pages/IndexPage.cs b/LiteDB/Engine/Pages/IndexPage.cs
index b5cd7b0cb..c454fbf77 100644
--- a/LiteDB/Engine/Pages/IndexPage.cs
+++ b/LiteDB/Engine/Pages/IndexPage.cs
@@ -16,7 +16,7 @@ internal class IndexPage : BasePage
public IndexPage(PageBuffer buffer)
: base(buffer)
{
- ENSURE(this.PageType == PageType.Index, "page type must be index page");
+ ENSURE(this.PageType == PageType.Index, "page type must be index page: {0}", PageType);
if (this.PageType != PageType.Index) throw LiteException.InvalidPageType(PageType.Index, this);
}
diff --git a/LiteDB/Engine/Query/Pipeline/BasePipe.cs b/LiteDB/Engine/Query/Pipeline/BasePipe.cs
index a16bbd953..3f978448c 100644
--- a/LiteDB/Engine/Query/Pipeline/BasePipe.cs
+++ b/LiteDB/Engine/Query/Pipeline/BasePipe.cs
@@ -163,7 +163,7 @@ protected IEnumerable OrderBy(IEnumerable source, Bs
{
sorter.Insert(keyValues);
- LOG($"sort {sorter.Count} keys in {sorter.Containers.Count} containers", "SORT");
+ Logging.LOG($"sort {sorter.Count} keys in {sorter.Containers.Count} containers", "SORT");
var result = sorter.Sort().Skip(offset).Take(limit);
diff --git a/LiteDB/Engine/Query/QueryExecutor.cs b/LiteDB/Engine/Query/QueryExecutor.cs
index 10d8e034b..e181f8576 100644
--- a/LiteDB/Engine/Query/QueryExecutor.cs
+++ b/LiteDB/Engine/Query/QueryExecutor.cs
@@ -43,7 +43,7 @@ public QueryExecutor(
_cursor = new CursorInfo(collection, query);
- LOG(_query.ToSQL(_collection).Replace(Environment.NewLine, " "), "QUERY");
+ Logging.LOG(_query.ToSQL(_collection).Replace(Environment.NewLine, " "), "QUERY");
// source will be != null when query will run over external data source, like system collections or files (not user collection)
_source = source;
diff --git a/LiteDB/Engine/Services/DataService.cs b/LiteDB/Engine/Services/DataService.cs
index 499968b4e..9ef48772c 100644
--- a/LiteDB/Engine/Services/DataService.cs
+++ b/LiteDB/Engine/Services/DataService.cs
@@ -165,7 +165,7 @@ public IEnumerable Read(PageAddress address)
while (address != PageAddress.Empty)
{
- ENSURE(counter++ < _maxItemsCount, $"Detected loop in data Read({address})");
+ ENSURE(counter++ < _maxItemsCount, "Detected loop in data Read({0})", address);
var dataPage = _snapshot.GetPage(address.PageID);
diff --git a/LiteDB/Engine/Services/IndexService.cs b/LiteDB/Engine/Services/IndexService.cs
index 9dcf6b771..6b8fb230d 100644
--- a/LiteDB/Engine/Services/IndexService.cs
+++ b/LiteDB/Engine/Services/IndexService.cs
@@ -79,10 +79,10 @@ public IndexNode AddNode(CollectionIndex index, BsonValue key, PageAddress dataB
/// Insert a new node index inside an collection index.
///
private IndexNode AddNode(
- CollectionIndex index,
- BsonValue key,
- PageAddress dataBlock,
- byte insertLevels,
+ CollectionIndex index,
+ BsonValue key,
+ PageAddress dataBlock,
+ byte insertLevels,
IndexNode last)
{
// get a free index page for head note
@@ -108,7 +108,7 @@ private IndexNode AddNode(
// while: scan from left to right
while (right.IsEmpty == false && right != index.Tail)
{
- ENSURE(counter++ < _maxItemsCount, $"Detected loop in AddNode({node.Position})");
+ ENSURE(counter++ < _maxItemsCount, "Detected loop in AddNode({0})", node.Position);
var rightNode = this.GetNode(right);
@@ -206,7 +206,7 @@ public IEnumerable GetNodeList(PageAddress nodeAddress)
while (node != null)
{
- ENSURE(counter++ < _maxItemsCount, $"Detected loop in GetNodeList({nodeAddress})");
+ ENSURE(counter++ < _maxItemsCount, "Detected loop in GetNodeList({0})", nodeAddress);
yield return node;
@@ -225,7 +225,7 @@ public void DeleteAll(PageAddress pkAddress)
while (node != null)
{
- ENSURE(counter++ < _maxItemsCount, $"Detected loop in DeleteAll({pkAddress})");
+ ENSURE(counter++ < _maxItemsCount, "Detected loop in DeleteAll({0})", pkAddress);
this.DeleteSingleNode(node, indexes[node.Slot]);
@@ -246,7 +246,7 @@ public IndexNode DeleteList(PageAddress pkAddress, HashSet toDelete
while (node != null)
{
- ENSURE(counter++ < _maxItemsCount, $"Detected loop in DeleteList({pkAddress})");
+ ENSURE(counter++ < _maxItemsCount, "Detected loop in DeleteList({0})", pkAddress);
if (toDelete.Contains(node.Position))
{
@@ -333,7 +333,7 @@ public void DropIndex(CollectionIndex index)
}
#region Find
-
+
///
/// Return all index nodes from an index
///
@@ -344,7 +344,7 @@ public IEnumerable FindAll(CollectionIndex index, int order)
while (!cur.GetNextPrev(0, order).IsEmpty)
{
- ENSURE(counter++ < _maxItemsCount, $"Detected loop in FindAll({index.Name})");
+ ENSURE(counter++ < _maxItemsCount, "Detected loop in FindAll({0})", index.Name);
cur = this.GetNode(cur.GetNextPrev(0, order));
@@ -356,7 +356,7 @@ public IEnumerable FindAll(CollectionIndex index, int order)
}
///
- /// Find first node that index match with value .
+ /// Find first node that index match with value .
/// If index are unique, return unique value - if index are not unique, return first found (can start, middle or end)
/// If not found but sibling = true and key are not found, returns next value index node (if order = Asc) or prev node (if order = Desc)
///
@@ -371,7 +371,7 @@ public IndexNode Find(CollectionIndex index, BsonValue value, bool sibling, int
while (right.IsEmpty == false)
{
- ENSURE(counter++ < _maxItemsCount, $"Detected loop in Find({index.Name}, {value})");
+ ENSURE(counter++ < _maxItemsCount, "Detected loop in Find({0}, {1})", index.Name, value);
var rightNode = this.GetNode(right);
diff --git a/LiteDB/Engine/Services/TransactionService.cs b/LiteDB/Engine/Services/TransactionService.cs
index 8163e3643..39e85920a 100644
--- a/LiteDB/Engine/Services/TransactionService.cs
+++ b/LiteDB/Engine/Services/TransactionService.cs
@@ -113,7 +113,7 @@ public Snapshot CreateSnapshot(LockMode mode, string collection, bool addIfNotEx
_snapshots[collection] = snapshot = create();
}
- // update transaction mode to write in first write snaphost request
+ // update transaction mode to write in first write snaphost request
if (mode == LockMode.Write) _mode = LockMode.Write;
return snapshot;
@@ -128,7 +128,7 @@ public void Safepoint()
if (_monitor.CheckSafepoint(this))
{
- LOG($"safepoint flushing transaction pages: {_transPages.TransactionSize}", "TRANSACTION");
+ Logging.LOG($"safepoint flushing transaction pages: {_transPages.TransactionSize}", "TRANSACTION");
// if any snapshot are writable, persist pages
if (_mode == LockMode.Write)
@@ -250,9 +250,9 @@ IEnumerable source()
///
public void Commit()
{
- ENSURE(_state == TransactionState.Active, $"transaction must be active to commit (current state: {_state})");
+ ENSURE(_state == TransactionState.Active, "transaction must be active to commit (current state: {0})", _state);
- LOG($"commit transaction ({_transPages.TransactionSize} pages)", "TRANSACTION");
+ Logging.LOG($"commit transaction ({_transPages.TransactionSize} pages)", "TRANSACTION");
if (_mode == LockMode.Write || _transPages.HeaderChanged)
{
@@ -281,9 +281,9 @@ public void Commit()
///
public void Rollback()
{
- ENSURE(_state == TransactionState.Active, $"transaction must be active to rollback (current state: {_state})");
+ ENSURE(_state == TransactionState.Active, "transaction must be active to rollback (current state: {0})", _state);
- LOG($"rollback transaction ({_transPages.TransactionSize} pages with {_transPages.NewPages.Count} returns)", "TRANSACTION");
+ Logging.LOG($"rollback transaction ({_transPages.TransactionSize} pages with {_transPages.NewPages.Count} returns)", "TRANSACTION");
// if transaction contains new pages, must return to database in another transaction
if (_transPages.NewPages.Count > 0)
diff --git a/LiteDB/Engine/Services/WalIndexService.cs b/LiteDB/Engine/Services/WalIndexService.cs
index b17d9da92..c0e46fe4a 100644
--- a/LiteDB/Engine/Services/WalIndexService.cs
+++ b/LiteDB/Engine/Services/WalIndexService.cs
@@ -293,7 +293,7 @@ public int TryCheckpoint()
///
private int CheckpointInternal()
{
- LOG($"checkpoint", "WAL");
+ Logging.LOG($"checkpoint", "WAL");
// wait all pages write on disk
_disk.Queue.Value.Wait();
diff --git a/LiteDB/LiteDB.csproj b/LiteDB/LiteDB.csproj
index 721395e91..cb00eee92 100644
--- a/LiteDB/LiteDB.csproj
+++ b/LiteDB/LiteDB.csproj
@@ -29,7 +29,7 @@
LiteDB.snk
true
-
+
diff --git a/LiteDB/Utils/AsyncManualResetEvent.cs b/LiteDB/Utils/AsyncManualResetEvent.cs
deleted file mode 100644
index 0cbaf3421..000000000
--- a/LiteDB/Utils/AsyncManualResetEvent.cs
+++ /dev/null
@@ -1,35 +0,0 @@
-using System.Threading;
-using System.Threading.Tasks;
-
-namespace LiteDB
-{
- ///
- /// Async implementation of ManualResetEvent
- /// https://devblogs.microsoft.com/pfxteam/building-async-coordination-primitives-part-1-asyncmanualresetevent/
- ///
- internal class AsyncManualResetEvent
- {
- private volatile TaskCompletionSource _tcs = new TaskCompletionSource();
-
- public Task WaitAsync()
- {
- return _tcs.Task;
- }
-
- public void Set()
- {
- _tcs.TrySetResult(true);
- }
-
- public void Reset()
- {
- while (true)
- {
- var tcs = _tcs;
- if (!tcs.Task.IsCompleted ||
- Interlocked.CompareExchange(ref _tcs, new TaskCompletionSource(), tcs) == tcs)
- return;
- }
- }
- }
-}
\ No newline at end of file
diff --git a/LiteDB/Utils/Constants.cs b/LiteDB/Utils/Constants.cs
index a33aa70ec..928966d0d 100644
--- a/LiteDB/Utils/Constants.cs
+++ b/LiteDB/Utils/Constants.cs
@@ -11,6 +11,8 @@
namespace LiteDB
{
+ using System.Globalization;
+
///
/// Class with all constants used in LiteDB + Debbuger HELPER
///
@@ -106,32 +108,24 @@ internal class Constants
#endif
///
- /// Log a message using Debug.WriteLine
+ /// Ensure condition is true, otherwise throw exception (check contract)
///
[DebuggerHidden]
- [Conditional("DEBUG")]
- public static void LOG(string message, string category)
+ public static void ENSURE(bool conditional, string message = null)
{
- //Debug.WriteLine is too slow in multi-threads
- //var threadID = Environment.CurrentManagedThreadId;
- //Debug.WriteLine(message, threadID + "|" + category);
- }
+ if (conditional == false)
+ {
+ if (Debugger.IsAttached)
+ {
+ Debugger.Break();
+ }
- ///
- /// Log a message using Debug.WriteLine only if conditional = true
- ///
- [DebuggerHidden]
- [Conditional("DEBUG")]
- public static void LOG(bool conditional, string message, string category)
- {
- if (conditional) LOG(message, category);
+ throw LiteException.InvalidDatafileState(message);
+ }
}
- ///
- /// Ensure condition is true, otherwise throw exception (check contract)
- ///
[DebuggerHidden]
- public static void ENSURE(bool conditional, string message = null)
+ public static void ENSURE(bool conditional, string format, params object[] args)
{
if (conditional == false)
{
@@ -140,7 +134,9 @@ public static void ENSURE(bool conditional, string message = null)
Debugger.Break();
}
- throw LiteException.InvalidDatafileState(message);
+ var message = string.Format(CultureInfo.InvariantCulture, format, args);
+
+ throw LiteException.InvalidDatafileState(format);
}
}
diff --git a/LiteDB/Utils/Extensions/WaitHandleExtensions.cs b/LiteDB/Utils/Extensions/WaitHandleExtensions.cs
new file mode 100644
index 000000000..67b28e835
--- /dev/null
+++ b/LiteDB/Utils/Extensions/WaitHandleExtensions.cs
@@ -0,0 +1,33 @@
+namespace LiteDB.Utils.Extensions
+{
+ using System;
+ using System.Threading;
+ using System.Threading.Tasks;
+
+ internal static class WaitHandleExtensions
+ {
+ public static async Task WaitAsync(this WaitHandle handle)
+ {
+ var tcs = new TaskCompletionSource();
+
+ using (new ThreadPoolRegistration(handle, tcs))
+ {
+ return await tcs.Task.ConfigureAwait(false);
+ }
+ }
+
+ private sealed class ThreadPoolRegistration : IDisposable
+ {
+ private readonly RegisteredWaitHandle _registeredWaitHandle;
+
+ public ThreadPoolRegistration(WaitHandle handle, TaskCompletionSource tcs)
+ {
+ _registeredWaitHandle = ThreadPool.RegisterWaitForSingleObject(handle,
+ (state, timedOut) => ((TaskCompletionSource)state).TrySetResult(!timedOut), tcs,
+ Timeout.InfiniteTimeSpan, executeOnlyOnce: true);
+ }
+
+ void IDisposable.Dispose() => _registeredWaitHandle.Unregister(null);
+ }
+ }
+}
\ No newline at end of file
diff --git a/LiteDB/Utils/LiteException.cs b/LiteDB/Utils/LiteException.cs
index 6e9ccaa4f..c72859851 100644
--- a/LiteDB/Utils/LiteException.cs
+++ b/LiteDB/Utils/LiteException.cs
@@ -347,6 +347,11 @@ internal static LiteException InvalidDatafileState(string message)
{
return new LiteException(INVALID_DATAFILE_STATE, message);
}
+
+ internal static LiteException InvalidDatafileState(Exception innerException, string message)
+ {
+ return new LiteException(INVALID_DATAFILE_STATE, innerException, message);
+ }
#endregion
}
diff --git a/LiteDB/Utils/Logging.cs b/LiteDB/Utils/Logging.cs
new file mode 100644
index 000000000..4a23fe6ec
--- /dev/null
+++ b/LiteDB/Utils/Logging.cs
@@ -0,0 +1,44 @@
+using System;
+using System.Diagnostics;
+using System.Text;
+
+namespace LiteDB
+{
+ public static class Logging
+ {
+ public static event Action LogCallback;
+
+ ///
+ /// Log a message using Debug.WriteLine
+ ///
+ [DebuggerHidden]
+ public static void LOG(string message, string category)
+ {
+ LogCallback?.Invoke(new LogEventArgs() { Message = message, Category = category });
+ //Debug.WriteLine is too slow in multi-threads
+ //var threadID = Environment.CurrentManagedThreadId;
+ //Debug.WriteLine(message, threadID + "|" + category);
+ }
+
+ ///
+ /// Log a message using Debug.WriteLine only if conditional = true
+ ///
+ [DebuggerHidden]
+ public static void LOG(bool conditional, string message, string category)
+ {
+ if (conditional) LOG(message, category);
+ }
+
+ public static void LOG(Exception exception, string category)
+ {
+ LogCallback?.Invoke(new LogEventArgs() { Exception = exception, Category = category });
+ }
+ }
+
+ public class LogEventArgs
+ {
+ public string Category { get; set; }
+ public string Message { get; set; }
+ public Exception Exception { get; set; }
+ }
+}
\ No newline at end of file